diff --git a/.buildkite/README.md b/.buildkite/README.md new file mode 100644 index 0000000000000..25a5ed487d51f --- /dev/null +++ b/.buildkite/README.md @@ -0,0 +1,41 @@ +# Elasticsearch CI Pipelines + +This directory contains pipeline definitions and scripts for running Elasticsearch CI on Buildkite. + +## Directory Structure + +- [pipelines](pipelines/) - pipeline definitions/yml +- [scripts](scripts/) - scripts used by pipelines, inside steps +- [hooks](hooks/) - [Buildkite hooks](https://buildkite.com/docs/agent/v3/hooks), where global env vars and secrets are set + +## Pipeline Definitions + +Pipelines are defined using YAML files residing in [pipelines](pipelines/). These are mostly static definitions that are used as-is, but there are a few dynamically-generated exceptions (see below). + +### Dynamically Generated Pipelines + +Pull request pipelines are generated dynamically based on labels, files changed, and other properties of pull requests. + +Non-pull request pipelines that include BWC version matrices must also be generated whenever the [list of BWC versions](../.ci/bwcVersions) is updated. + +#### Pull Request Pipelines + +Pull request pipelines are generated dynamically at CI time based on numerous properties of the pull request. See [scripts/pull-request](scripts/pull-request) for details. + +#### BWC Version Matrices + +For pipelines that include BWC version matrices, you will see one or more template files (e.g. [periodic.template.yml](pipelines/periodic.template.yml)) and a corresponding generated file (e.g. [periodic.yml](pipelines/periodic.yml)). The generated file is the one that is actually used by Buildkite. + +These files are updated by running: + +```bash +./gradlew updateCIBwcVersions +``` + +This also runs automatically during release procedures. + +You should always make changes to the template files, and run the above command to update the generated files. + +## Node / TypeScript + +Node (technically `bun`), TypeScript, and related files are currently used to generate pipelines for pull request CI. See [scripts/pull-request](scripts/pull-request) for details. diff --git a/.buildkite/package.json b/.buildkite/package.json index c13d5f10fdf60..af5e6b628ab49 100644 --- a/.buildkite/package.json +++ b/.buildkite/package.json @@ -1,6 +1,5 @@ { "name": "buildkite-pipelines", - "module": "index.ts", "type": "module", "devDependencies": { "@types/node": "^20.6.0", diff --git a/.buildkite/pipelines/dra-workflow.yml b/.buildkite/pipelines/dra-workflow.yml index 336bb74041be3..e7bf19816356f 100644 --- a/.buildkite/pipelines/dra-workflow.yml +++ b/.buildkite/pipelines/dra-workflow.yml @@ -7,3 +7,13 @@ steps: image: family/elasticsearch-ubuntu-2204 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + - wait + # The hadoop build depends on the ES artifact + # So let's trigger the hadoop build any time we build a new staging artifact + - trigger: elasticsearch-hadoop-dra-workflow + async: true + build: + branch: "${BUILDKITE_BRANCH}" + env: + DRA_WORKFLOW: staging + if: build.env('DRA_WORKFLOW') == 'staging' diff --git a/.buildkite/pipelines/periodic-platform-support.yml b/.buildkite/pipelines/periodic-platform-support.yml index b52f8506885c9..faf904f2f8b04 100644 --- a/.buildkite/pipelines/periodic-platform-support.yml +++ b/.buildkite/pipelines/periodic-platform-support.yml @@ -80,3 +80,19 @@ steps: diskName: /dev/sda1 env: GRADLE_TASK: "{{matrix.GRADLE_TASK}}" + - group: platform-support-unix-aws + steps: + - label: "{{matrix.image}} / platform-support-aws" + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true functionalTests + timeout_in_minutes: 420 + matrix: + setup: + image: + - amazonlinux-2023 + agents: + provider: aws + imagePrefix: elasticsearch-{{matrix.image}} + instanceType: m6a.8xlarge + diskSizeGb: 350 + diskType: gp3 + diskName: /dev/sda1 diff --git a/.buildkite/pipelines/periodic.template.yml b/.buildkite/pipelines/periodic.template.yml index fab36deb6124a..39b79c85e4da8 100644 --- a/.buildkite/pipelines/periodic.template.yml +++ b/.buildkite/pipelines/periodic.template.yml @@ -73,6 +73,7 @@ steps: - openjdk19 - openjdk20 - openjdk21 + - openjdk22 GRADLE_TASK: - checkPart1 - checkPart2 @@ -180,7 +181,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n2-standard-8 buildDirectory: /dev/shm/bk - if: build.branch == "main" || build.branch =~ /^[0-9]+\.[0-9]+\$/ + if: build.branch == "main" || build.branch == "7.17" - label: Check branch consistency command: .ci/scripts/run-gradle.sh branchConsistency timeout_in_minutes: 15 diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 3ce048533d131..acb2c1bb2d769 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -1194,6 +1194,7 @@ steps: - openjdk19 - openjdk20 - openjdk21 + - openjdk22 GRADLE_TASK: - checkPart1 - checkPart2 @@ -1301,7 +1302,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n2-standard-8 buildDirectory: /dev/shm/bk - if: build.branch == "main" || build.branch =~ /^[0-9]+\.[0-9]+\$/ + if: build.branch == "main" || build.branch == "7.17" - label: Check branch consistency command: .ci/scripts/run-gradle.sh branchConsistency timeout_in_minutes: 15 diff --git a/.buildkite/scripts/periodic.trigger.sh b/.buildkite/scripts/periodic.trigger.sh index 3571d112c5b6d..cc10a5ae41861 100755 --- a/.buildkite/scripts/periodic.trigger.sh +++ b/.buildkite/scripts/periodic.trigger.sh @@ -6,11 +6,26 @@ echo "steps:" source .buildkite/scripts/branches.sh +IS_FIRST=true +SKIP_DELAY="${SKIP_DELAY:-false}" + for BRANCH in "${BRANCHES[@]}"; do INTAKE_PIPELINE_SLUG="elasticsearch-intake" BUILD_JSON=$(curl -sH "Authorization: Bearer ${BUILDKITE_API_TOKEN}" "https://api.buildkite.com/v2/organizations/elastic/pipelines/${INTAKE_PIPELINE_SLUG}/builds?branch=${BRANCH}&state=passed&per_page=1" | jq '.[0] | {commit: .commit, url: .web_url}') LAST_GOOD_COMMIT=$(echo "${BUILD_JSON}" | jq -r '.commit') + # Put a delay between each branch's set of pipelines by prepending each non-first branch with a sleep + # This is to smooth out the spike in agent requests + if [[ "$IS_FIRST" != "true" && "$SKIP_DELAY" != "true" ]]; then + cat <.*` + - Various configurations for filtering/activating steps based on labels, changed files, etc. See below. - Replacing `$SNAPSHOT_BWC_VERSIONS` in pipelines with an array of versions from `.ci/snapshotBwcVersions` - Duplicating any step with `bwc_template: true` for each BWC version in `.ci/bwcVersions` @@ -21,14 +16,21 @@ The generator handles the following: Pipelines are in [`.buildkite/pipelines`](../../pipelines/pull-request). They are automatically picked up and given a name based on their filename. - ## Setup - [Install bun](https://bun.sh/) - `npm install -g bun` will work if you already have `npm` - `cd .buildkite; bun install` to install dependencies -## Run tests +## Testing + +Testing the pipeline generator is done mostly using snapshot tests, which generate pipeline objects using the pipeline configurations in `mocks/pipelines` and then compare them to previously-generated snapshots in `__snapshots__` to confirm that they are correct. + +The mock pipeline configurations should, therefore, try to cover all of the various features of the generator (allow-labels, skip-labels, etc). + +Snapshots are generated/managed automatically whenever you create a new test that has a snapshot test condition. These are very similar to Jest snapshots. + +### Run tests ```bash cd .buildkite @@ -36,3 +38,53 @@ bun test ``` If you need to regenerate the snapshots, run `bun test --update-snapshots`. + +## Pipeline Configuration + +The `config:` property at the top of pipelines inside `.buildkite/pipelines/pull-request` is a custom property used by our pipeline generator. It is not used by Buildkite. + +All of the pipelines in this directory are evaluated whenever CI for a pull request is started, and the steps are filtered and combined into one pipeline based on the properties in `config:` and the state of the pull request. + +The various configurations available mirror what we were using in our Jenkins pipelines. + +### Config Properties + +#### `allow-labels` + +- Type: `string|string[]` +- Example: `["test-full-bwc"]` + +Only trigger a step if the PR has one of these labels. + +#### `skip-labels` + +- Type: `string|string[]` +- Example: `>test-mute` + +Don't trigger the step if the PR has one of these labels. + +#### `excluded-regions` + +- Type: `string|string[]` - must be JavaScript regexes +- Example: `["^docs/.*", "^x-pack/docs/.*"]` + +Exclude the pipeline if all of the changed files in the PR match at least one regex. E.g. for the example above, don't run the step if all of the changed files are docs changes. + +#### `included-regions` + +- Type: `string|string[]` - must be JavaScript regexes +- Example: `["^docs/.*", "^x-pack/docs/.*"]` + +Only include the pipeline if all of the changed files in the PR match at least one regex. E.g. for the example above, only run the step if all of the changed files are docs changes. + +This is particularly useful for having a step that only runs, for example, when all of the other steps get filtered out because of the `excluded-regions` property. + +#### `trigger-phrase` + +- Type: `string` - must be a JavaScript regex +- Example: `"^run\\W+elasticsearch-ci/test-full-bwc.*"` +- Default: `.*run\\W+elasticsearch-ci/.*` (`` is generated from the filename of the yml file). + +Trigger this step, and ignore all other steps, if the build was triggered by a comment and that comment matches this regex. + +Note that the entire build itself is triggered via [`.buildkite/pull-requests.json`](../pull-requests.json). So, a comment has to first match the trigger configured there. diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index a5e959e795c07..64ad5c5c851e3 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -26,3 +26,16 @@ x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/sto # APM Data index templates, etc. x-pack/plugin/apm-data/src/main/resources @elastic/apm-server x-pack/plugin/apm-data/src/yamlRestTest/resources @elastic/apm-server + +# Delivery +gradle @elastic/es-delivery +build-conventions @elastic/es-delivery +build-tools @elastic/es-delivery +build-tools-internal @elastic/es-delivery +*.gradle @elastic/es-delivery +.buildkite @elastic/es-delivery +.ci @elastic/es-delivery +.idea @elastic/es-delivery +distribution/src @elastic/es-delivery +distribution/packages/src @elastic/es-delivery +distribution/docker/src @elastic/es-delivery diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/AggregatorBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/AggregatorBenchmark.java index 7b1efb82cd1f0..63686023498c9 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/AggregatorBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/AggregatorBenchmark.java @@ -27,11 +27,9 @@ import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.BytesRefBlock; -import org.elasticsearch.compute.data.DoubleArrayVector; import org.elasticsearch.compute.data.DoubleBlock; import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.compute.data.IntBlock; -import org.elasticsearch.compute.data.LongArrayVector; import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.AggregationOperator; @@ -66,7 +64,10 @@ public class AggregatorBenchmark { private static final int OP_COUNT = 1024; private static final int GROUPS = 5; - private static final BigArrays BIG_ARRAYS = BigArrays.NON_RECYCLING_INSTANCE; // TODO real big arrays? + private static final BlockFactory blockFactory = BlockFactory.getInstance( + new NoopCircuitBreaker("noop"), + BigArrays.NON_RECYCLING_INSTANCE // TODO real big arrays? + ); private static final String LONGS = "longs"; private static final String INTS = "ints"; @@ -116,8 +117,7 @@ public class AggregatorBenchmark { @Param({ VECTOR_LONGS, HALF_NULL_LONGS, VECTOR_DOUBLES, HALF_NULL_DOUBLES }) public String blockType; - private static Operator operator(String grouping, String op, String dataType) { - DriverContext driverContext = driverContext(); + private static Operator operator(DriverContext driverContext, String grouping, String op, String dataType) { if (grouping.equals("none")) { return new AggregationOperator( List.of(supplier(op, dataType, 0).aggregatorFactory(AggregatorMode.SINGLE).apply(driverContext)), @@ -154,25 +154,25 @@ private static Operator operator(String grouping, String op, String dataType) { private static AggregatorFunctionSupplier supplier(String op, String dataType, int dataChannel) { return switch (op) { - case COUNT -> CountAggregatorFunction.supplier(BIG_ARRAYS, List.of(dataChannel)); + case COUNT -> CountAggregatorFunction.supplier(List.of(dataChannel)); case COUNT_DISTINCT -> switch (dataType) { - case LONGS -> new CountDistinctLongAggregatorFunctionSupplier(BIG_ARRAYS, List.of(dataChannel), 3000); - case DOUBLES -> new CountDistinctDoubleAggregatorFunctionSupplier(BIG_ARRAYS, List.of(dataChannel), 3000); + case LONGS -> new CountDistinctLongAggregatorFunctionSupplier(List.of(dataChannel), 3000); + case DOUBLES -> new CountDistinctDoubleAggregatorFunctionSupplier(List.of(dataChannel), 3000); default -> throw new IllegalArgumentException("unsupported data type [" + dataType + "]"); }; case MAX -> switch (dataType) { - case LONGS -> new MaxLongAggregatorFunctionSupplier(BIG_ARRAYS, List.of(dataChannel)); - case DOUBLES -> new MaxDoubleAggregatorFunctionSupplier(BIG_ARRAYS, List.of(dataChannel)); + case LONGS -> new MaxLongAggregatorFunctionSupplier(List.of(dataChannel)); + case DOUBLES -> new MaxDoubleAggregatorFunctionSupplier(List.of(dataChannel)); default -> throw new IllegalArgumentException("unsupported data type [" + dataType + "]"); }; case MIN -> switch (dataType) { - case LONGS -> new MinLongAggregatorFunctionSupplier(BIG_ARRAYS, List.of(dataChannel)); - case DOUBLES -> new MinDoubleAggregatorFunctionSupplier(BIG_ARRAYS, List.of(dataChannel)); + case LONGS -> new MinLongAggregatorFunctionSupplier(List.of(dataChannel)); + case DOUBLES -> new MinDoubleAggregatorFunctionSupplier(List.of(dataChannel)); default -> throw new IllegalArgumentException("unsupported data type [" + dataType + "]"); }; case SUM -> switch (dataType) { - case LONGS -> new SumLongAggregatorFunctionSupplier(BIG_ARRAYS, List.of(dataChannel)); - case DOUBLES -> new SumDoubleAggregatorFunctionSupplier(BIG_ARRAYS, List.of(dataChannel)); + case LONGS -> new SumLongAggregatorFunctionSupplier(List.of(dataChannel)); + case DOUBLES -> new SumDoubleAggregatorFunctionSupplier(List.of(dataChannel)); default -> throw new IllegalArgumentException("unsupported data type [" + dataType + "]"); }; default -> throw new IllegalArgumentException("unsupported op [" + op + "]"); @@ -432,8 +432,8 @@ private static void checkUngrouped(String prefix, String op, String dataType, Pa } } - private static Page page(String grouping, String blockType) { - Block dataBlock = dataBlock(blockType); + private static Page page(BlockFactory blockFactory, String grouping, String blockType) { + Block dataBlock = dataBlock(blockFactory, blockType); if (grouping.equals("none")) { return new Page(dataBlock); } @@ -441,15 +441,15 @@ private static Page page(String grouping, String blockType) { return new Page(Stream.concat(blocks.stream(), Stream.of(dataBlock)).toArray(Block[]::new)); } - private static Block dataBlock(String blockType) { + private static Block dataBlock(BlockFactory blockFactory, String blockType) { return switch (blockType) { - case VECTOR_LONGS -> new LongArrayVector(LongStream.range(0, BLOCK_LENGTH).toArray(), BLOCK_LENGTH).asBlock(); - case VECTOR_DOUBLES -> new DoubleArrayVector( + case VECTOR_LONGS -> blockFactory.newLongArrayVector(LongStream.range(0, BLOCK_LENGTH).toArray(), BLOCK_LENGTH).asBlock(); + case VECTOR_DOUBLES -> blockFactory.newDoubleArrayVector( LongStream.range(0, BLOCK_LENGTH).mapToDouble(l -> Long.valueOf(l).doubleValue()).toArray(), BLOCK_LENGTH ).asBlock(); case MULTIVALUED_LONGS -> { - var builder = LongBlock.newBlockBuilder(BLOCK_LENGTH); + var builder = blockFactory.newLongBlockBuilder(BLOCK_LENGTH); builder.beginPositionEntry(); for (int i = 0; i < BLOCK_LENGTH; i++) { builder.appendLong(i); @@ -462,7 +462,7 @@ private static Block dataBlock(String blockType) { yield builder.build(); } case HALF_NULL_LONGS -> { - var builder = LongBlock.newBlockBuilder(BLOCK_LENGTH); + var builder = blockFactory.newLongBlockBuilder(BLOCK_LENGTH); for (int i = 0; i < BLOCK_LENGTH; i++) { builder.appendLong(i); builder.appendNull(); @@ -470,7 +470,7 @@ private static Block dataBlock(String blockType) { yield builder.build(); } case HALF_NULL_DOUBLES -> { - var builder = DoubleBlock.newBlockBuilder(BLOCK_LENGTH); + var builder = blockFactory.newDoubleBlockBuilder(BLOCK_LENGTH); for (int i = 0; i < BLOCK_LENGTH; i++) { builder.appendDouble(i); builder.appendNull(); @@ -502,7 +502,7 @@ private static Block groupingBlock(String grouping, String blockType) { }; return switch (grouping) { case LONGS -> { - var builder = LongBlock.newBlockBuilder(BLOCK_LENGTH); + var builder = blockFactory.newLongBlockBuilder(BLOCK_LENGTH); for (int i = 0; i < BLOCK_LENGTH; i++) { for (int v = 0; v < valuesPerGroup; v++) { builder.appendLong(i % GROUPS); @@ -511,7 +511,7 @@ private static Block groupingBlock(String grouping, String blockType) { yield builder.build(); } case INTS -> { - var builder = IntBlock.newBlockBuilder(BLOCK_LENGTH); + var builder = blockFactory.newIntBlockBuilder(BLOCK_LENGTH); for (int i = 0; i < BLOCK_LENGTH; i++) { for (int v = 0; v < valuesPerGroup; v++) { builder.appendInt(i % GROUPS); @@ -520,7 +520,7 @@ private static Block groupingBlock(String grouping, String blockType) { yield builder.build(); } case DOUBLES -> { - var builder = DoubleBlock.newBlockBuilder(BLOCK_LENGTH); + var builder = blockFactory.newDoubleBlockBuilder(BLOCK_LENGTH); for (int i = 0; i < BLOCK_LENGTH; i++) { for (int v = 0; v < valuesPerGroup; v++) { builder.appendDouble(i % GROUPS); @@ -529,7 +529,7 @@ private static Block groupingBlock(String grouping, String blockType) { yield builder.build(); } case BOOLEANS -> { - BooleanBlock.Builder builder = BooleanBlock.newBlockBuilder(BLOCK_LENGTH); + BooleanBlock.Builder builder = blockFactory.newBooleanBlockBuilder(BLOCK_LENGTH); for (int i = 0; i < BLOCK_LENGTH; i++) { for (int v = 0; v < valuesPerGroup; v++) { builder.appendBoolean(i % 2 == 1); @@ -538,7 +538,7 @@ private static Block groupingBlock(String grouping, String blockType) { yield builder.build(); } case BYTES_REFS -> { - BytesRefBlock.Builder builder = BytesRefBlock.newBlockBuilder(BLOCK_LENGTH); + BytesRefBlock.Builder builder = blockFactory.newBytesRefBlockBuilder(BLOCK_LENGTH); for (int i = 0; i < BLOCK_LENGTH; i++) { for (int v = 0; v < valuesPerGroup; v++) { builder.appendBytesRef(bytesGroup(i % GROUPS)); @@ -574,8 +574,9 @@ private static void run(String grouping, String op, String blockType, int opCoun default -> throw new IllegalArgumentException(); }; - Operator operator = operator(grouping, op, dataType); - Page page = page(grouping, blockType); + DriverContext driverContext = driverContext(); + Operator operator = operator(driverContext, grouping, op, dataType); + Page page = page(driverContext.blockFactory(), grouping, blockType); for (int i = 0; i < opCount; i++) { operator.addInput(page); } @@ -584,9 +585,6 @@ private static void run(String grouping, String op, String blockType, int opCoun } static DriverContext driverContext() { - return new DriverContext( - BigArrays.NON_RECYCLING_INSTANCE, - BlockFactory.getInstance(new NoopCircuitBreaker("noop"), BigArrays.NON_RECYCLING_INSTANCE) - ); + return new DriverContext(BigArrays.NON_RECYCLING_INSTANCE, blockFactory); } } diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/BlockBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/BlockBenchmark.java new file mode 100644 index 0000000000000..e0281dbb856d4 --- /dev/null +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/BlockBenchmark.java @@ -0,0 +1,849 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.benchmark.compute.operator; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.breaker.NoopCircuitBreaker; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.BitArray; +import org.elasticsearch.common.util.BytesRefArray; +import org.elasticsearch.common.util.DoubleArray; +import org.elasticsearch.common.util.IntArray; +import org.elasticsearch.common.util.LongArray; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BooleanBigArrayBlock; +import org.elasticsearch.compute.data.BooleanBigArrayVector; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.DoubleBigArrayBlock; +import org.elasticsearch.compute.data.DoubleBigArrayVector; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.DoubleVector; +import org.elasticsearch.compute.data.IntBigArrayBlock; +import org.elasticsearch.compute.data.IntBigArrayVector; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBigArrayBlock; +import org.elasticsearch.compute.data.LongBigArrayVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OperationsPerInvocation; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.TearDown; +import org.openjdk.jmh.annotations.Warmup; + +import java.util.ArrayList; +import java.util.BitSet; +import java.util.Collections; +import java.util.List; +import java.util.Random; +import java.util.concurrent.TimeUnit; +import java.util.stream.IntStream; + +@Warmup(iterations = 5) +@Measurement(iterations = 7) +@BenchmarkMode(Mode.AverageTime) +@OutputTimeUnit(TimeUnit.NANOSECONDS) +@State(Scope.Thread) +@Fork(1) +public class BlockBenchmark { + + /** + * All data type/block kind combinations to be loaded before the benchmark. + * It is important to be exhaustive here so that all implementers of {@link IntBlock#getInt(int)} are actually loaded when we benchmark + * {@link IntBlock}s etc. + */ + // We could also consider DocBlocks/DocVectors but they do not implement any of the typed block interfaces like IntBlock etc. + public static final String[] RELEVANT_TYPE_BLOCK_COMBINATIONS = { + "boolean/array", + "boolean/array-multivalue-null", + "boolean/big-array", + "boolean/big-array-multivalue-null", + "boolean/vector", + "boolean/vector-big-array", + "boolean/vector-const", + "BytesRef/array", + "BytesRef/array-multivalue-null", + "BytesRef/vector", + "BytesRef/vector-const", + "double/array", + "double/array-multivalue-null", + "double/big-array", + "double/big-array-multivalue-null", + "double/vector", + "double/vector-big-array", + "double/vector-const", + "int/array", + "int/array-multivalue-null", + "int/big-array", + "int/big-array-multivalue-null", + "int/vector", + "int/vector-big-array", + "int/vector-const", + "long/array", + "long/array-multivalue-null", + "long/big-array", + "long/big-array-multivalue-null", + "long/vector", + "long/vector-big-array", + "long/vector-const" }; + public static final int NUM_BLOCKS_PER_ITERATION = 1024; + public static final int BLOCK_TOTAL_POSITIONS = 8096; + + private static final double MV_PERCENTAGE = 0.3; + private static final double NULL_PERCENTAGE = 0.1; + private static final int MAX_MV_ELEMENTS = 100; + private static final int MAX_BYTES_REF_LENGTH = 255; + + private static final Random random = new Random(); + + private static final BlockFactory blockFactory = BlockFactory.getInstance( + new NoopCircuitBreaker("noop"), + BigArrays.NON_RECYCLING_INSTANCE + ); + + static { + // Smoke test all the expected values and force loading subclasses more like prod + int totalPositions = 10; + long[] actualCheckSums = new long[NUM_BLOCKS_PER_ITERATION]; + + for (String paramString : RELEVANT_TYPE_BLOCK_COMBINATIONS) { + String[] params = paramString.split("/"); + String dataType = params[0]; + String blockKind = params[1]; + + BenchmarkBlocks data = buildBlocks(dataType, blockKind, totalPositions); + int[][] traversalOrders = createTraversalOrders(data.blocks, false); + run(dataType, data, traversalOrders, actualCheckSums); + assertCheckSums(data, actualCheckSums); + } + } + + private record BenchmarkBlocks(Block[] blocks, long[] checkSums) {}; + + private static BenchmarkBlocks buildBlocks(String dataType, String blockKind, int totalPositions) { + Block[] blocks = new Block[NUM_BLOCKS_PER_ITERATION]; + long[] checkSums = new long[NUM_BLOCKS_PER_ITERATION]; + + switch (dataType) { + case "boolean" -> { + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + if (blockKind.equalsIgnoreCase("vector-const")) { + BooleanVector vector = blockFactory.newConstantBooleanVector(random.nextBoolean(), totalPositions); + blocks[blockIndex] = vector.asBlock(); + continue; + } + + boolean[] values = new boolean[totalPositions]; + for (int i = 0; i < totalPositions; i++) { + values[i] = random.nextBoolean(); + } + + switch (blockKind) { + case "array" -> { + blocks[blockIndex] = blockFactory.newBooleanArrayBlock( + values, + totalPositions, + null, + null, + Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING + ); + } + case "array-multivalue-null" -> { + int[] firstValueIndexes = randomFirstValueIndexes(totalPositions); + int positionCount = firstValueIndexes.length - 1; + BitSet nulls = randomNulls(positionCount); + + blocks[blockIndex] = blockFactory.newBooleanArrayBlock( + values, + positionCount, + firstValueIndexes, + nulls, + Block.MvOrdering.UNORDERED + ); + } + case "big-array" -> { + BitArray valuesBigArray = new BitArray(totalPositions, BigArrays.NON_RECYCLING_INSTANCE); + for (int i = 0; i < values.length; i++) { + if (values[i]) { + valuesBigArray.set(i); + } + } + + blocks[blockIndex] = new BooleanBigArrayBlock( + valuesBigArray, + totalPositions, + null, + null, + Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, + blockFactory + ); + } + case "big-array-multivalue-null" -> { + int[] firstValueIndexes = randomFirstValueIndexes(totalPositions); + int positionCount = firstValueIndexes.length - 1; + BitSet nulls = randomNulls(positionCount); + BitArray valuesBigArray = new BitArray(totalPositions, BigArrays.NON_RECYCLING_INSTANCE); + for (int i = 0; i < values.length; i++) { + if (values[i]) { + valuesBigArray.set(i); + } + } + + blocks[blockIndex] = new BooleanBigArrayBlock( + valuesBigArray, + positionCount, + firstValueIndexes, + nulls, + Block.MvOrdering.UNORDERED, + blockFactory + ); + } + case "vector" -> { + BooleanVector vector = blockFactory.newBooleanArrayVector(values, totalPositions); + blocks[blockIndex] = vector.asBlock(); + } + case "vector-big-array" -> { + BitArray valuesBigArray = new BitArray(totalPositions, BigArrays.NON_RECYCLING_INSTANCE); + for (int i = 0; i < values.length; i++) { + if (values[i]) { + valuesBigArray.set(i); + } + } + BooleanVector vector = new BooleanBigArrayVector(valuesBigArray, totalPositions, blockFactory); + blocks[blockIndex] = vector.asBlock(); + } + default -> { + throw new IllegalStateException("illegal block kind [" + blockKind + "]"); + } + } + } + + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + BooleanBlock block = (BooleanBlock) blocks[blockIndex]; + checkSums[blockIndex] = computeBooleanCheckSum(block, IntStream.range(0, block.getPositionCount()).toArray()); + } + } + case "BytesRef" -> { + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + if (blockKind.equalsIgnoreCase("vector-const")) { + byte[] bytes = new byte[random.nextInt(MAX_BYTES_REF_LENGTH)]; + random.nextBytes(bytes); + + BytesRefVector vector = blockFactory.newConstantBytesRefVector(new BytesRef(bytes), totalPositions); + blocks[blockIndex] = vector.asBlock(); + continue; + } + + BytesRefArray values = new BytesRefArray(totalPositions, BigArrays.NON_RECYCLING_INSTANCE); + byte[] bytes; + for (int i = 0; i < totalPositions; i++) { + bytes = new byte[random.nextInt(MAX_BYTES_REF_LENGTH)]; + random.nextBytes(bytes); + values.append(new BytesRef(bytes)); + } + + switch (blockKind) { + case "array" -> { + blocks[blockIndex] = blockFactory.newBytesRefArrayBlock( + values, + totalPositions, + null, + null, + Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING + ); + } + case "array-multivalue-null" -> { + int[] firstValueIndexes = randomFirstValueIndexes(totalPositions); + int positionCount = firstValueIndexes.length - 1; + BitSet nulls = randomNulls(positionCount); + + blocks[blockIndex] = blockFactory.newBytesRefArrayBlock( + values, + positionCount, + firstValueIndexes, + nulls, + Block.MvOrdering.UNORDERED + ); + } + case "vector" -> { + BytesRefVector vector = blockFactory.newBytesRefArrayVector(values, totalPositions); + blocks[blockIndex] = vector.asBlock(); + } + default -> { + throw new IllegalStateException("illegal block kind [" + blockKind + "]"); + } + } + } + + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + BytesRefBlock block = (BytesRefBlock) blocks[blockIndex]; + checkSums[blockIndex] = computeBytesRefCheckSum(block, IntStream.range(0, block.getPositionCount()).toArray()); + } + } + case "double" -> { + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + if (blockKind.equalsIgnoreCase("vector-const")) { + DoubleVector vector = blockFactory.newConstantDoubleVector(random.nextDouble() * 1000000.0, totalPositions); + blocks[blockIndex] = vector.asBlock(); + continue; + } + + double[] values = new double[totalPositions]; + for (int i = 0; i < totalPositions; i++) { + values[i] = random.nextDouble() * 1000000.0; + } + + switch (blockKind) { + case "array" -> { + blocks[blockIndex] = blockFactory.newDoubleArrayBlock( + values, + totalPositions, + null, + null, + Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING + ); + } + case "array-multivalue-null" -> { + int[] firstValueIndexes = randomFirstValueIndexes(totalPositions); + int positionCount = firstValueIndexes.length - 1; + BitSet nulls = randomNulls(positionCount); + + blocks[blockIndex] = blockFactory.newDoubleArrayBlock( + values, + positionCount, + firstValueIndexes, + nulls, + Block.MvOrdering.UNORDERED + ); + } + case "big-array" -> { + DoubleArray valuesBigArray = blockFactory.bigArrays().newDoubleArray(totalPositions, false); + for (int i = 0; i < values.length; i++) { + valuesBigArray.set(i, values[i]); + } + + blocks[blockIndex] = new DoubleBigArrayBlock( + valuesBigArray, + totalPositions, + null, + null, + Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, + blockFactory + ); + } + case "big-array-multivalue-null" -> { + int[] firstValueIndexes = randomFirstValueIndexes(totalPositions); + int positionCount = firstValueIndexes.length - 1; + BitSet nulls = randomNulls(positionCount); + DoubleArray valuesBigArray = blockFactory.bigArrays().newDoubleArray(totalPositions, false); + for (int i = 0; i < values.length; i++) { + valuesBigArray.set(i, values[i]); + } + + blocks[blockIndex] = new DoubleBigArrayBlock( + valuesBigArray, + positionCount, + firstValueIndexes, + nulls, + Block.MvOrdering.UNORDERED, + blockFactory + ); + } + case "vector" -> { + DoubleVector vector = blockFactory.newDoubleArrayVector(values, totalPositions); + blocks[blockIndex] = vector.asBlock(); + } + case "vector-big-array" -> { + DoubleArray valuesBigArray = blockFactory.bigArrays().newDoubleArray(totalPositions, false); + for (int i = 0; i < values.length; i++) { + valuesBigArray.set(i, values[i]); + } + DoubleVector vector = new DoubleBigArrayVector(valuesBigArray, totalPositions, blockFactory); + blocks[blockIndex] = vector.asBlock(); + } + default -> { + throw new IllegalStateException("illegal block kind [" + blockKind + "]"); + } + } + } + + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + DoubleBlock block = (DoubleBlock) blocks[blockIndex]; + checkSums[blockIndex] = computeDoubleCheckSum(block, IntStream.range(0, block.getPositionCount()).toArray()); + } + } + case "int" -> { + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + if (blockKind.equalsIgnoreCase("vector-const")) { + IntVector vector = blockFactory.newConstantIntVector(random.nextInt(), totalPositions); + blocks[blockIndex] = vector.asBlock(); + continue; + } + + int[] values = new int[totalPositions]; + for (int i = 0; i < totalPositions; i++) { + values[i] = random.nextInt(); + } + + switch (blockKind) { + case "array" -> { + blocks[blockIndex] = blockFactory.newIntArrayBlock( + values, + totalPositions, + null, + null, + Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING + ); + } + case "array-multivalue-null" -> { + int[] firstValueIndexes = randomFirstValueIndexes(totalPositions); + int positionCount = firstValueIndexes.length - 1; + BitSet nulls = randomNulls(positionCount); + + blocks[blockIndex] = blockFactory.newIntArrayBlock( + values, + positionCount, + firstValueIndexes, + nulls, + Block.MvOrdering.UNORDERED + ); + } + case "big-array" -> { + IntArray valuesBigArray = blockFactory.bigArrays().newIntArray(totalPositions, false); + for (int i = 0; i < values.length; i++) { + valuesBigArray.set(i, values[i]); + } + + blocks[blockIndex] = new IntBigArrayBlock( + valuesBigArray, + totalPositions, + null, + null, + Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, + blockFactory + ); + } + case "big-array-multivalue-null" -> { + int[] firstValueIndexes = randomFirstValueIndexes(totalPositions); + int positionCount = firstValueIndexes.length - 1; + BitSet nulls = randomNulls(positionCount); + IntArray valuesBigArray = blockFactory.bigArrays().newIntArray(totalPositions, false); + for (int i = 0; i < values.length; i++) { + valuesBigArray.set(i, values[i]); + } + + blocks[blockIndex] = new IntBigArrayBlock( + valuesBigArray, + positionCount, + firstValueIndexes, + nulls, + Block.MvOrdering.UNORDERED, + blockFactory + ); + } + case "vector" -> { + IntVector vector = blockFactory.newIntArrayVector(values, totalPositions); + blocks[blockIndex] = vector.asBlock(); + } + case "vector-big-array" -> { + IntArray valuesBigArray = blockFactory.bigArrays().newIntArray(totalPositions, false); + for (int i = 0; i < values.length; i++) { + valuesBigArray.set(i, values[i]); + } + IntVector vector = new IntBigArrayVector(valuesBigArray, totalPositions, blockFactory); + blocks[blockIndex] = vector.asBlock(); + } + default -> { + throw new IllegalStateException("illegal block kind [" + blockKind + "]"); + } + } + } + + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + IntBlock block = (IntBlock) blocks[blockIndex]; + checkSums[blockIndex] = computeIntCheckSum(block, IntStream.range(0, block.getPositionCount()).toArray()); + } + } + case "long" -> { + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + if (blockKind.equalsIgnoreCase("vector-const")) { + LongVector vector = blockFactory.newConstantLongVector(random.nextLong(), totalPositions); + blocks[blockIndex] = vector.asBlock(); + continue; + } + + long[] values = new long[totalPositions]; + for (int i = 0; i < totalPositions; i++) { + values[i] = random.nextLong(); + } + + switch (blockKind) { + case "array" -> { + blocks[blockIndex] = blockFactory.newLongArrayBlock( + values, + totalPositions, + null, + null, + Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING + ); + } + case "array-multivalue-null" -> { + int[] firstValueIndexes = randomFirstValueIndexes(totalPositions); + int positionCount = firstValueIndexes.length - 1; + BitSet nulls = randomNulls(positionCount); + + blocks[blockIndex] = blockFactory.newLongArrayBlock( + values, + positionCount, + firstValueIndexes, + nulls, + Block.MvOrdering.UNORDERED + ); + } + case "big-array" -> { + LongArray valuesBigArray = blockFactory.bigArrays().newLongArray(totalPositions, false); + for (int i = 0; i < values.length; i++) { + valuesBigArray.set(i, values[i]); + } + + blocks[blockIndex] = new LongBigArrayBlock( + valuesBigArray, + totalPositions, + null, + null, + Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, + blockFactory + ); + } + case "big-array-multivalue-null" -> { + int[] firstValueIndexes = randomFirstValueIndexes(totalPositions); + int positionCount = firstValueIndexes.length - 1; + BitSet nulls = randomNulls(positionCount); + LongArray valuesBigArray = blockFactory.bigArrays().newLongArray(totalPositions, false); + for (int i = 0; i < values.length; i++) { + valuesBigArray.set(i, values[i]); + } + + blocks[blockIndex] = new LongBigArrayBlock( + valuesBigArray, + positionCount, + firstValueIndexes, + nulls, + Block.MvOrdering.UNORDERED, + blockFactory + ); + } + case "vector" -> { + LongVector vector = blockFactory.newLongArrayVector(values, totalPositions); + blocks[blockIndex] = vector.asBlock(); + } + case "vector-big-array" -> { + LongArray valuesBigArray = blockFactory.bigArrays().newLongArray(totalPositions, false); + for (int i = 0; i < values.length; i++) { + valuesBigArray.set(i, values[i]); + } + LongVector vector = new LongBigArrayVector(valuesBigArray, totalPositions, blockFactory); + blocks[blockIndex] = vector.asBlock(); + } + default -> { + throw new IllegalStateException("illegal block kind [" + blockKind + "]"); + } + } + } + + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + LongBlock block = (LongBlock) blocks[blockIndex]; + checkSums[blockIndex] = computeLongCheckSum(block, IntStream.range(0, block.getPositionCount()).toArray()); + } + } + default -> { + throw new IllegalStateException("illegal data type [" + dataType + "]"); + } + } + + return new BenchmarkBlocks(blocks, checkSums); + } + + private static int[][] createTraversalOrders(Block[] blocks, boolean randomized) { + int[][] orders = new int[blocks.length][]; + + for (int i = 0; i < blocks.length; i++) { + IntStream positionsStream = IntStream.range(0, blocks[i].getPositionCount()); + + if (randomized) { + List positions = new java.util.ArrayList<>(positionsStream.boxed().toList()); + Collections.shuffle(positions, random); + orders[i] = positions.stream().mapToInt(x -> x).toArray(); + } else { + orders[i] = positionsStream.toArray(); + } + } + + return orders; + } + + private static int[] randomFirstValueIndexes(int totalPositions) { + ArrayList firstValueIndexes = new ArrayList<>(); + firstValueIndexes.add(0); + + int currentPosition = 0; + int nextPosition; + while (currentPosition < totalPositions) { + if (random.nextDouble() < MV_PERCENTAGE) { + nextPosition = Math.min(currentPosition + 1 + random.nextInt(MAX_MV_ELEMENTS), totalPositions); + } else { + nextPosition = currentPosition + 1; + } + firstValueIndexes.add(nextPosition); + currentPosition = nextPosition; + } + + return firstValueIndexes.stream().mapToInt(x -> x).toArray(); + } + + private static BitSet randomNulls(int positionCount) { + BitSet nulls = new BitSet(positionCount); + for (int i = 0; i < positionCount; i++) { + if (random.nextDouble() < NULL_PERCENTAGE) { + nulls.set(i); + } + } + + return nulls; + } + + private static void run(String dataType, BenchmarkBlocks data, int[][] traversalOrders, long[] resultCheckSums) { + switch (dataType) { + case "boolean" -> { + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + BooleanBlock block = (BooleanBlock) data.blocks[blockIndex]; + + resultCheckSums[blockIndex] = computeBooleanCheckSum(block, traversalOrders[blockIndex]); + } + } + case "BytesRef" -> { + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + BytesRefBlock block = (BytesRefBlock) data.blocks[blockIndex]; + + resultCheckSums[blockIndex] = computeBytesRefCheckSum(block, traversalOrders[blockIndex]); + } + } + case "double" -> { + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + DoubleBlock block = (DoubleBlock) data.blocks[blockIndex]; + + resultCheckSums[blockIndex] = computeDoubleCheckSum(block, traversalOrders[blockIndex]); + } + } + case "int" -> { + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + IntBlock block = (IntBlock) data.blocks[blockIndex]; + + resultCheckSums[blockIndex] = computeIntCheckSum(block, traversalOrders[blockIndex]); + } + } + case "long" -> { + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + LongBlock block = (LongBlock) data.blocks[blockIndex]; + + resultCheckSums[blockIndex] = computeLongCheckSum(block, traversalOrders[blockIndex]); + } + } + default -> { + throw new IllegalStateException(); + } + } + } + + private static void assertCheckSums(BenchmarkBlocks data, long[] actualCheckSums) { + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + if (actualCheckSums[blockIndex] != data.checkSums[blockIndex]) { + throw new AssertionError("checksums do not match for block [" + blockIndex + "]"); + } + } + } + + private static long computeBooleanCheckSum(BooleanBlock block, int[] traversalOrder) { + long sum = 0; + + for (int position : traversalOrder) { + if (block.isNull(position)) { + continue; + } + int start = block.getFirstValueIndex(position); + int end = start + block.getValueCount(position); + for (int i = start; i < end; i++) { + sum += block.getBoolean(i) ? 1 : 0; + } + } + + return sum; + } + + private static long computeBytesRefCheckSum(BytesRefBlock block, int[] traversalOrder) { + long sum = 0; + BytesRef currentValue = new BytesRef(); + + for (int position : traversalOrder) { + if (block.isNull(position)) { + continue; + } + int start = block.getFirstValueIndex(position); + int end = start + block.getValueCount(position); + for (int i = start; i < end; i++) { + block.getBytesRef(i, currentValue); + sum += currentValue.length > 0 ? currentValue.bytes[0] : 0; + } + } + + return sum; + } + + private static long computeDoubleCheckSum(DoubleBlock block, int[] traversalOrder) { + double sum = 0; + + for (int position : traversalOrder) { + if (block.isNull(position)) { + continue; + } + int start = block.getFirstValueIndex(position); + int end = start + block.getValueCount(position); + for (int i = start; i < end; i++) { + sum += block.getDouble(i); + } + } + + return (long) sum; + } + + private static long computeIntCheckSum(IntBlock block, int[] traversalOrder) { + int sum = 0; + + for (int position : traversalOrder) { + if (block.isNull(position)) { + continue; + } + int start = block.getFirstValueIndex(position); + int end = start + block.getValueCount(position); + for (int i = start; i < end; i++) { + sum += block.getInt(i); + } + } + + return sum; + } + + private static long computeLongCheckSum(LongBlock block, int[] traversalOrder) { + long sum = 0; + + for (int position : traversalOrder) { + if (block.isNull(position)) { + continue; + } + int start = block.getFirstValueIndex(position); + int end = start + block.getValueCount(position); + for (int i = start; i < end; i++) { + sum += block.getLong(i); + } + } + + return sum; + } + + private static boolean isRandom(String accessType) { + return accessType.equalsIgnoreCase("random"); + } + + /** + * Must be a subset of {@link BlockBenchmark#RELEVANT_TYPE_BLOCK_COMBINATIONS} + */ + @Param( + { + "boolean/array", + "boolean/array-multivalue-null", + "boolean/big-array", + "boolean/big-array-multivalue-null", + "boolean/vector", + "boolean/vector-big-array", + "boolean/vector-const", + "BytesRef/array", + "BytesRef/array-multivalue-null", + "BytesRef/vector", + "BytesRef/vector-const", + "double/array", + "double/array-multivalue-null", + "double/big-array", + "double/big-array-multivalue-null", + "double/vector", + "double/vector-big-array", + "double/vector-const", + "int/array", + "int/array-multivalue-null", + "int/big-array", + "int/big-array-multivalue-null", + "int/vector", + "int/vector-big-array", + "int/vector-const", + "long/array", + "long/array-multivalue-null", + "long/big-array", + "long/big-array-multivalue-null", + "long/vector", + "long/vector-big-array", + "long/vector-const" } + ) + public String dataTypeAndBlockKind; + + @Param({ "sequential", "random" }) + public String accessType; + + private BenchmarkBlocks data; + + private int[][] traversalOrders; + + private final long[] actualCheckSums = new long[NUM_BLOCKS_PER_ITERATION]; + + @Setup + public void setup() { + String[] params = dataTypeAndBlockKind.split("/"); + String dataType = params[0]; + String blockKind = params[1]; + + data = buildBlocks(dataType, blockKind, BLOCK_TOTAL_POSITIONS); + traversalOrders = createTraversalOrders(data.blocks, isRandom(accessType)); + } + + @Benchmark + @OperationsPerInvocation(NUM_BLOCKS_PER_ITERATION * BLOCK_TOTAL_POSITIONS) + public void run() { + String[] params = dataTypeAndBlockKind.split("/"); + String dataType = params[0]; + + run(dataType, data, traversalOrders, actualCheckSums); + } + + @TearDown(Level.Iteration) + public void assertCheckSums() { + assertCheckSums(data, actualCheckSums); + } +} diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/EvalBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/EvalBenchmark.java index 3a1142ad87d2f..1765897ba35e7 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/EvalBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/EvalBenchmark.java @@ -14,7 +14,6 @@ import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.BooleanVector; -import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.LongVector; import org.elasticsearch.compute.data.Page; @@ -59,6 +58,12 @@ @State(Scope.Thread) @Fork(1) public class EvalBenchmark { + private static final BigArrays BIG_ARRAYS = BigArrays.NON_RECYCLING_INSTANCE; // TODO real big arrays? + private static final BlockFactory blockFactory = BlockFactory.getInstance( + new NoopCircuitBreaker("noop"), + BigArrays.NON_RECYCLING_INSTANCE + ); + private static final int BLOCK_LENGTH = 8 * 1024; static final DriverContext driverContext = new DriverContext( @@ -207,15 +212,15 @@ private static void checkExpected(String operation, Page actual) { private static Page page(String operation) { return switch (operation) { case "abs", "add", "date_trunc", "equal_to_const" -> { - var builder = LongBlock.newBlockBuilder(BLOCK_LENGTH); + var builder = blockFactory.newLongBlockBuilder(BLOCK_LENGTH); for (int i = 0; i < BLOCK_LENGTH; i++) { builder.appendLong(i * 100_000); } yield new Page(builder.build()); } case "long_equal_to_long" -> { - var lhs = LongBlock.newBlockBuilder(BLOCK_LENGTH); - var rhs = LongBlock.newBlockBuilder(BLOCK_LENGTH); + var lhs = blockFactory.newLongBlockBuilder(BLOCK_LENGTH); + var rhs = blockFactory.newLongBlockBuilder(BLOCK_LENGTH); for (int i = 0; i < BLOCK_LENGTH; i++) { lhs.appendLong(i * 100_000); rhs.appendLong(i * 100_000); @@ -223,8 +228,8 @@ private static Page page(String operation) { yield new Page(lhs.build(), rhs.build()); } case "long_equal_to_int" -> { - var lhs = LongBlock.newBlockBuilder(BLOCK_LENGTH); - var rhs = IntBlock.newBlockBuilder(BLOCK_LENGTH); + var lhs = blockFactory.newLongBlockBuilder(BLOCK_LENGTH); + var rhs = blockFactory.newIntBlockBuilder(BLOCK_LENGTH); for (int i = 0; i < BLOCK_LENGTH; i++) { lhs.appendLong(i * 100_000); rhs.appendInt(i * 100_000); @@ -232,7 +237,7 @@ private static Page page(String operation) { yield new Page(lhs.build(), rhs.build()); } case "mv_min", "mv_min_ascending" -> { - var builder = LongBlock.newBlockBuilder(BLOCK_LENGTH); + var builder = blockFactory.newLongBlockBuilder(BLOCK_LENGTH); if (operation.endsWith("ascending")) { builder.mvOrdering(Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING); } diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/MultivalueDedupeBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/MultivalueDedupeBenchmark.java index 09cdc8b269ad3..7580808be36ad 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/MultivalueDedupeBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/MultivalueDedupeBenchmark.java @@ -10,6 +10,8 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.breaker.NoopCircuitBreaker; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BooleanBlock; @@ -43,6 +45,11 @@ @State(Scope.Thread) @Fork(1) public class MultivalueDedupeBenchmark { + private static final BlockFactory blockFactory = BlockFactory.getInstance( + new NoopCircuitBreaker("noop"), + BigArrays.NON_RECYCLING_INSTANCE + ); + @Param({ "BOOLEAN", "BYTES_REF", "DOUBLE", "INT", "LONG" }) private ElementType elementType; @@ -58,7 +65,7 @@ public class MultivalueDedupeBenchmark { public void setup() { this.block = switch (elementType) { case BOOLEAN -> { - BooleanBlock.Builder builder = BooleanBlock.newBlockBuilder(AggregatorBenchmark.BLOCK_LENGTH * (size + repeats)); + BooleanBlock.Builder builder = blockFactory.newBooleanBlockBuilder(AggregatorBenchmark.BLOCK_LENGTH * (size + repeats)); for (int p = 0; p < AggregatorBenchmark.BLOCK_LENGTH; p++) { List values = new ArrayList<>(); for (int i = 0; i < size; i++) { @@ -77,7 +84,7 @@ public void setup() { yield builder.build(); } case BYTES_REF -> { - BytesRefBlock.Builder builder = BytesRefBlock.newBlockBuilder(AggregatorBenchmark.BLOCK_LENGTH * (size + repeats)); + BytesRefBlock.Builder builder = blockFactory.newBytesRefBlockBuilder(AggregatorBenchmark.BLOCK_LENGTH * (size + repeats)); for (int p = 0; p < AggregatorBenchmark.BLOCK_LENGTH; p++) { List values = new ArrayList<>(); for (int i = 0; i < size; i++) { @@ -96,7 +103,7 @@ public void setup() { yield builder.build(); } case DOUBLE -> { - DoubleBlock.Builder builder = DoubleBlock.newBlockBuilder(AggregatorBenchmark.BLOCK_LENGTH * (size + repeats)); + DoubleBlock.Builder builder = blockFactory.newDoubleBlockBuilder(AggregatorBenchmark.BLOCK_LENGTH * (size + repeats)); for (int p = 0; p < AggregatorBenchmark.BLOCK_LENGTH; p++) { List values = new ArrayList<>(); for (int i = 0; i < size; i++) { @@ -115,7 +122,7 @@ public void setup() { yield builder.build(); } case INT -> { - IntBlock.Builder builder = IntBlock.newBlockBuilder(AggregatorBenchmark.BLOCK_LENGTH * (size + repeats)); + IntBlock.Builder builder = blockFactory.newIntBlockBuilder(AggregatorBenchmark.BLOCK_LENGTH * (size + repeats)); for (int p = 0; p < AggregatorBenchmark.BLOCK_LENGTH; p++) { List values = new ArrayList<>(); for (int i = 0; i < size; i++) { @@ -134,7 +141,7 @@ public void setup() { yield builder.build(); } case LONG -> { - LongBlock.Builder builder = LongBlock.newBlockBuilder(AggregatorBenchmark.BLOCK_LENGTH * (size + repeats)); + LongBlock.Builder builder = blockFactory.newLongBlockBuilder(AggregatorBenchmark.BLOCK_LENGTH * (size + repeats)); for (int p = 0; p < AggregatorBenchmark.BLOCK_LENGTH; p++) { List values = new ArrayList<>(); for (long i = 0; i < size; i++) { @@ -159,18 +166,18 @@ public void setup() { @Benchmark @OperationsPerInvocation(AggregatorBenchmark.BLOCK_LENGTH) public void adaptive() { - MultivalueDedupe.dedupeToBlockAdaptive(block, BlockFactory.getNonBreakingInstance()).close(); + MultivalueDedupe.dedupeToBlockAdaptive(block, blockFactory).close(); } @Benchmark @OperationsPerInvocation(AggregatorBenchmark.BLOCK_LENGTH) public void copyAndSort() { - MultivalueDedupe.dedupeToBlockUsingCopyAndSort(block, BlockFactory.getNonBreakingInstance()).close(); + MultivalueDedupe.dedupeToBlockUsingCopyAndSort(block, blockFactory).close(); } @Benchmark @OperationsPerInvocation(AggregatorBenchmark.BLOCK_LENGTH) public void copyMissing() { - MultivalueDedupe.dedupeToBlockUsingCopyMissing(block, BlockFactory.getNonBreakingInstance()).close(); + MultivalueDedupe.dedupeToBlockUsingCopyMissing(block, blockFactory).close(); } } diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/TopNBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/TopNBenchmark.java index d723ea3e1a6b3..3d5a36ea288b4 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/TopNBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/TopNBenchmark.java @@ -10,16 +10,15 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.BytesRefBlock; -import org.elasticsearch.compute.data.DoubleBlock; import org.elasticsearch.compute.data.ElementType; -import org.elasticsearch.compute.data.IntBlock; -import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.Operator; import org.elasticsearch.compute.operator.topn.TopNEncoder; @@ -51,6 +50,12 @@ @State(Scope.Thread) @Fork(1) public class TopNBenchmark { + private static final BigArrays BIG_ARRAYS = BigArrays.NON_RECYCLING_INSTANCE; // TODO real big arrays? + private static final BlockFactory blockFactory = BlockFactory.getInstance( + new NoopCircuitBreaker("noop"), + BigArrays.NON_RECYCLING_INSTANCE + ); + private static final int BLOCK_LENGTH = 8 * 1024; private static final String LONGS = "longs"; @@ -110,7 +115,7 @@ private static Operator operator(String data, int topCount) { ClusterSettings.createBuiltInClusterSettings() ); return new TopNOperator( - BlockFactory.getNonBreakingInstance(), + blockFactory, breakerService.getBreaker(CircuitBreaker.REQUEST), topCount, elementTypes, @@ -137,35 +142,35 @@ private static Page page(String data) { private static Block block(String data) { return switch (data) { case LONGS -> { - var builder = LongBlock.newBlockBuilder(BLOCK_LENGTH); + var builder = blockFactory.newLongBlockBuilder(BLOCK_LENGTH); for (int i = 0; i < BLOCK_LENGTH; i++) { builder.appendLong(i); } yield builder.build(); } case INTS -> { - var builder = IntBlock.newBlockBuilder(BLOCK_LENGTH); + var builder = blockFactory.newIntBlockBuilder(BLOCK_LENGTH); for (int i = 0; i < BLOCK_LENGTH; i++) { builder.appendInt(i); } yield builder.build(); } case DOUBLES -> { - var builder = DoubleBlock.newBlockBuilder(BLOCK_LENGTH); + var builder = blockFactory.newDoubleBlockBuilder(BLOCK_LENGTH); for (int i = 0; i < BLOCK_LENGTH; i++) { builder.appendDouble(i); } yield builder.build(); } case BOOLEANS -> { - BooleanBlock.Builder builder = BooleanBlock.newBlockBuilder(BLOCK_LENGTH); + BooleanBlock.Builder builder = blockFactory.newBooleanBlockBuilder(BLOCK_LENGTH); for (int i = 0; i < BLOCK_LENGTH; i++) { builder.appendBoolean(i % 2 == 1); } yield builder.build(); } case BYTES_REFS -> { - BytesRefBlock.Builder builder = BytesRefBlock.newBlockBuilder(BLOCK_LENGTH); + BytesRefBlock.Builder builder = blockFactory.newBytesRefBlockBuilder(BLOCK_LENGTH); for (int i = 0; i < BLOCK_LENGTH; i++) { builder.appendBytesRef(new BytesRef(Integer.toString(i))); } diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/ValuesSourceReaderBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/ValuesSourceReaderBenchmark.java index afe8377d3e58c..dc517f257537a 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/ValuesSourceReaderBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/ValuesSourceReaderBenchmark.java @@ -22,13 +22,16 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.NumericUtils; +import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.BytesRefVector; import org.elasticsearch.compute.data.DocVector; import org.elasticsearch.compute.data.DoubleBlock; import org.elasticsearch.compute.data.DoubleVector; +import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.data.LongBlock; @@ -40,6 +43,7 @@ import org.elasticsearch.core.IOUtils; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.mapper.BlockLoader; +import org.elasticsearch.index.mapper.FieldNamesFieldMapper; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; @@ -78,6 +82,11 @@ public class ValuesSourceReaderBenchmark { private static final int BLOCK_LENGTH = 16 * 1024; private static final int INDEX_SIZE = 10 * BLOCK_LENGTH; private static final int COMMIT_INTERVAL = 500; + private static final BigArrays BIG_ARRAYS = BigArrays.NON_RECYCLING_INSTANCE; + private static final BlockFactory blockFactory = BlockFactory.getInstance( + new NoopCircuitBreaker("noop"), + BigArrays.NON_RECYCLING_INSTANCE + ); static { // Smoke test all the expected values and force loading subclasses more like prod @@ -89,8 +98,12 @@ public class ValuesSourceReaderBenchmark { for (String name : ValuesSourceReaderBenchmark.class.getField("name").getAnnotationsByType(Param.class)[0].value()) { benchmark.layout = layout; benchmark.name = name; - benchmark.setupPages(); - benchmark.benchmark(); + try { + benchmark.setupPages(); + benchmark.benchmark(); + } catch (Exception e) { + throw new AssertionError("error initializing [" + layout + "/" + name + "]", e); + } } } } finally { @@ -104,11 +117,11 @@ public class ValuesSourceReaderBenchmark { private static List fields(String name) { return switch (name) { case "3_stored_keywords" -> List.of( - new ValuesSourceReaderOperator.FieldInfo("keyword_1", List.of(blockLoader("stored_keyword_1"))), - new ValuesSourceReaderOperator.FieldInfo("keyword_2", List.of(blockLoader("stored_keyword_2"))), - new ValuesSourceReaderOperator.FieldInfo("keyword_3", List.of(blockLoader("stored_keyword_3"))) + new ValuesSourceReaderOperator.FieldInfo("keyword_1", ElementType.BYTES_REF, shardIdx -> blockLoader("stored_keyword_1")), + new ValuesSourceReaderOperator.FieldInfo("keyword_2", ElementType.BYTES_REF, shardIdx -> blockLoader("stored_keyword_2")), + new ValuesSourceReaderOperator.FieldInfo("keyword_3", ElementType.BYTES_REF, shardIdx -> blockLoader("stored_keyword_3")) ); - default -> List.of(new ValuesSourceReaderOperator.FieldInfo(name, List.of(blockLoader(name)))); + default -> List.of(new ValuesSourceReaderOperator.FieldInfo(name, elementType(name), shardIdx -> blockLoader(name))); }; } @@ -118,29 +131,38 @@ enum Where { STORED; } - private static BlockLoader blockLoader(String name) { - Where where = Where.DOC_VALUES; - if (name.startsWith("stored_")) { - name = name.substring("stored_".length()); - where = Where.STORED; - } else if (name.startsWith("source_")) { - name = name.substring("source_".length()); - where = Where.SOURCE; - } + private static ElementType elementType(String name) { + name = WhereAndBaseName.fromName(name).name; switch (name) { case "long": - return numericBlockLoader(name, where, NumberFieldMapper.NumberType.LONG); + return ElementType.LONG; case "int": - return numericBlockLoader(name, where, NumberFieldMapper.NumberType.INTEGER); + return ElementType.INT; case "double": - return numericBlockLoader(name, where, NumberFieldMapper.NumberType.DOUBLE); - case "keyword": - name = "keyword_1"; + return ElementType.DOUBLE; } if (name.startsWith("keyword")) { + return ElementType.BYTES_REF; + } + throw new UnsupportedOperationException("no element type for [" + name + "]"); + } + + private static BlockLoader blockLoader(String name) { + WhereAndBaseName w = WhereAndBaseName.fromName(name); + switch (w.name) { + case "long": + return numericBlockLoader(w, NumberFieldMapper.NumberType.LONG); + case "int": + return numericBlockLoader(w, NumberFieldMapper.NumberType.INTEGER); + case "double": + return numericBlockLoader(w, NumberFieldMapper.NumberType.DOUBLE); + case "keyword": + w = new WhereAndBaseName(w.where, "keyword_1"); + } + if (w.name.startsWith("keyword")) { boolean syntheticSource = false; FieldType ft = new FieldType(KeywordFieldMapper.Defaults.FIELD_TYPE); - switch (where) { + switch (w.where) { case DOC_VALUES: break; case SOURCE: @@ -154,7 +176,7 @@ private static BlockLoader blockLoader(String name) { } ft.freeze(); return new KeywordFieldMapper.KeywordFieldType( - name, + w.name, ft, Lucene.KEYWORD_ANALYZER, Lucene.KEYWORD_ANALYZER, @@ -181,15 +203,31 @@ public Set sourcePaths(String name) { public String parentField(String field) { throw new UnsupportedOperationException(); } + + @Override + public FieldNamesFieldMapper.FieldNamesFieldType fieldNames() { + return FieldNamesFieldMapper.FieldNamesFieldType.get(true); + } }); } throw new IllegalArgumentException("can't read [" + name + "]"); } - private static BlockLoader numericBlockLoader(String name, Where where, NumberFieldMapper.NumberType numberType) { + private record WhereAndBaseName(Where where, String name) { + static WhereAndBaseName fromName(String name) { + if (name.startsWith("stored_")) { + return new WhereAndBaseName(Where.STORED, name.substring("stored_".length())); + } else if (name.startsWith("source_")) { + return new WhereAndBaseName(Where.SOURCE, name.substring("source_".length())); + } + return new WhereAndBaseName(Where.DOC_VALUES, name); + } + } + + private static BlockLoader numericBlockLoader(WhereAndBaseName w, NumberFieldMapper.NumberType numberType) { boolean stored = false; boolean docValues = true; - switch (where) { + switch (w.where) { case DOC_VALUES: break; case SOURCE: @@ -200,7 +238,7 @@ private static BlockLoader numericBlockLoader(String name, Where where, NumberFi throw new UnsupportedOperationException(); } return new NumberFieldMapper.NumberFieldType( - name, + w.name, numberType, true, stored, @@ -241,7 +279,7 @@ private static BlockLoader numericBlockLoader(String name, Where where, NumberFi @OperationsPerInvocation(INDEX_SIZE) public void benchmark() { ValuesSourceReaderOperator op = new ValuesSourceReaderOperator( - BlockFactory.getNonBreakingInstance(), + blockFactory, fields(name), List.of(new ValuesSourceReaderOperator.ShardContext(reader, () -> { throw new UnsupportedOperationException("can't load _source here"); @@ -374,7 +412,7 @@ private void setupPages() { pages = new ArrayList<>(); switch (layout) { case "in_order" -> { - IntVector.Builder docs = IntVector.newVectorBuilder(BLOCK_LENGTH); + IntVector.Builder docs = blockFactory.newIntVectorBuilder(BLOCK_LENGTH); for (LeafReaderContext ctx : reader.leaves()) { int begin = 0; while (begin < ctx.reader().maxDoc()) { @@ -385,14 +423,14 @@ private void setupPages() { pages.add( new Page( new DocVector( - IntBlock.newConstantBlockWith(0, end - begin).asVector(), - IntBlock.newConstantBlockWith(ctx.ord, end - begin).asVector(), + blockFactory.newConstantIntBlockWith(0, end - begin).asVector(), + blockFactory.newConstantIntBlockWith(ctx.ord, end - begin).asVector(), docs.build(), true ).asBlock() ) ); - docs = IntVector.newVectorBuilder(BLOCK_LENGTH); + docs = blockFactory.newIntVectorBuilder(BLOCK_LENGTH); begin = end; } } @@ -403,8 +441,8 @@ record ItrAndOrd(PrimitiveIterator.OfInt itr, int ord) {} for (LeafReaderContext ctx : reader.leaves()) { docItrs.add(new ItrAndOrd(IntStream.range(0, ctx.reader().maxDoc()).iterator(), ctx.ord)); } - IntVector.Builder docs = IntVector.newVectorBuilder(BLOCK_LENGTH); - IntVector.Builder leafs = IntVector.newVectorBuilder(BLOCK_LENGTH); + IntVector.Builder docs = blockFactory.newIntVectorBuilder(BLOCK_LENGTH); + IntVector.Builder leafs = blockFactory.newIntVectorBuilder(BLOCK_LENGTH); int size = 0; while (docItrs.isEmpty() == false) { Iterator itrItr = docItrs.iterator(); @@ -420,12 +458,11 @@ record ItrAndOrd(PrimitiveIterator.OfInt itr, int ord) {} if (size >= BLOCK_LENGTH) { pages.add( new Page( - new DocVector(IntBlock.newConstantBlockWith(0, size).asVector(), leafs.build(), docs.build(), null) - .asBlock() + new DocVector(blockFactory.newConstantIntVector(0, size), leafs.build(), docs.build(), null).asBlock() ) ); - docs = IntVector.newVectorBuilder(BLOCK_LENGTH); - leafs = IntVector.newVectorBuilder(BLOCK_LENGTH); + docs = blockFactory.newIntVectorBuilder(BLOCK_LENGTH); + leafs = blockFactory.newIntVectorBuilder(BLOCK_LENGTH); size = 0; } } @@ -434,7 +471,7 @@ record ItrAndOrd(PrimitiveIterator.OfInt itr, int ord) {} pages.add( new Page( new DocVector( - IntBlock.newConstantBlockWith(0, size).asVector(), + blockFactory.newConstantIntBlockWith(0, size).asVector(), leafs.build().asBlock().asVector(), docs.build(), null @@ -460,9 +497,9 @@ record ItrAndOrd(PrimitiveIterator.OfInt itr, int ord) {} pages.add( new Page( new DocVector( - IntBlock.newConstantBlockWith(0, 1).asVector(), - IntBlock.newConstantBlockWith(next.ord, 1).asVector(), - IntBlock.newConstantBlockWith(next.itr.nextInt(), 1).asVector(), + blockFactory.newConstantIntVector(0, 1), + blockFactory.newConstantIntVector(next.ord, 1), + blockFactory.newConstantIntVector(next.itr.nextInt(), 1), true ).asBlock() ) diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/mapper/DynamicMapperBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/mapper/DynamicMapperBenchmark.java index eae233e276038..2d042977cc4e7 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/mapper/DynamicMapperBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/mapper/DynamicMapperBenchmark.java @@ -108,7 +108,8 @@ private SourceToParse generateRandomDocument() { if (random.nextBoolean()) { continue; } - String objFieldPrefix = Stream.generate(() -> "obj_field_" + idx).limit(objFieldDepth).collect(Collectors.joining(".")); + int objFieldDepthActual = random.nextInt(1, objFieldDepth); + String objFieldPrefix = Stream.generate(() -> "obj_field_" + idx).limit(objFieldDepthActual).collect(Collectors.joining(".")); for (int j = 0; j < textFields; j++) { if (random.nextBoolean()) { StringBuilder fieldValueBuilder = generateTextField(fieldValueCountMax); diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/mapper/MapperServiceFactory.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/mapper/MapperServiceFactory.java index aabd1ac3f7a1b..9858b124f0e73 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/mapper/MapperServiceFactory.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/mapper/MapperServiceFactory.java @@ -45,7 +45,7 @@ public static MapperService create(String mappings) { .put("index.number_of_replicas", 0) .put("index.number_of_shards", 1) .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) - .put("index.mapping.total_fields.limit", 10000) + .put("index.mapping.total_fields.limit", 100000) .build(); IndexMetadata meta = IndexMetadata.builder("index").settings(settings).build(); IndexSettings indexSettings = new IndexSettings(meta, settings); diff --git a/build-tools-internal/build.gradle b/build-tools-internal/build.gradle index d0c52945801d3..a4e0e2389dbec 100644 --- a/build-tools-internal/build.gradle +++ b/build-tools-internal/build.gradle @@ -255,7 +255,7 @@ dependencies { // ensuring brought asm version brought in by spock is up-to-date testImplementation buildLibs.asm integTestImplementation buildLibs.asm - integTestImplementation('org.ow2.asm:asm:9.5') + integTestImplementation('org.ow2.asm:asm:9.6') api("org.yaml:snakeyaml") { version { strictly(versions.snakeyaml) } } @@ -295,6 +295,8 @@ dependencies { compileOnly buildLibs.checkstyle compileOnly buildLibs.reflections + implementation 'com.github.javaparser:javaparser-core:3.18.0' + runtimeOnly "org.elasticsearch.gradle:reaper:$version" testImplementation buildLibs.checkstyle testImplementation buildLibs.wiremock diff --git a/build-tools-internal/src/main/groovy/elasticsearch.fips.gradle b/build-tools-internal/src/main/groovy/elasticsearch.fips.gradle index f691d4bd996a7..aaae18401685a 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.fips.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.fips.gradle @@ -79,6 +79,7 @@ if (BuildParams.inFipsJvm) { // with no x-pack. Tests having security explicitly enabled/disabled will override this setting setting 'xpack.security.enabled', 'false' setting 'xpack.security.fips_mode.enabled', 'true' + setting 'xpack.security.fips_mode.required_providers', '["BCFIPS", "BCJSSE"]' setting 'xpack.license.self_generated.type', 'trial' setting 'xpack.security.authc.password_hashing.algorithm', 'pbkdf2_stretch' keystorePassword 'keystore-password' diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java index bad3ebb11a0dd..f0604ab33ceec 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java @@ -134,7 +134,7 @@ public void execute(BuildFinishedFlowAction.Parameters parameters) throws FileNo } uploadFile.getParentFile().mkdirs(); createBuildArchiveTar(parameters.getFilteredFiles().get(), parameters.getProjectDir().get(), uploadFile); - if (uploadFile.exists() && System.getenv("BUILDKITE").equals("true")) { + if (uploadFile.exists() && "true".equals(System.getenv("BUILDKITE"))) { String uploadFilePath = "build/" + uploadFile.getName(); try { System.out.println("Uploading buildkite artifact: " + uploadFilePath + "..."); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java index f1804064b7e07..31b62c4ac700f 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java @@ -108,10 +108,7 @@ public void execute(Task t) { "--add-opens=java.base/java.nio.file=ALL-UNNAMED", "--add-opens=java.base/java.time=ALL-UNNAMED", "--add-opens=java.management/java.lang.management=ALL-UNNAMED", - "-XX:+HeapDumpOnOutOfMemoryError", - // REMOVE once bumped to a JDK greater than 21.0.1, https://github.com/elastic/elasticsearch/issues/103004 - "-XX:CompileCommand=exclude,org.apache.lucene.util.MSBRadixSorter::computeCommonPrefixLengthAndBuildHistogram", - "-XX:CompileCommand=exclude,org.apache.lucene.util.RadixSelector::computeCommonPrefixLengthAndBuildHistogram" + "-XX:+HeapDumpOnOutOfMemoryError" ); test.getJvmArgumentProviders().add(new SimpleCommandLineArgumentProvider("-XX:HeapDumpPath=" + heapdumpDir)); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/RestrictedBuildApiService.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/RestrictedBuildApiService.java index 8042bdd64dabb..23afcab7bec7c 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/RestrictedBuildApiService.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/RestrictedBuildApiService.java @@ -123,10 +123,6 @@ private static ListMultimap, String> createLegacyRestTestBasePluginUsag map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:eql:qa:ccs-rolling-upgrade"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:eql:qa:correctness"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:eql:qa:mixed-node"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:esql:qa:security"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:esql:qa:server:heap-attack"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:esql:qa:server:multi-node"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:esql:qa:server:single-node"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:fleet:qa:rest"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:identity-provider:qa:idp-rest-tests"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ilm:qa:multi-cluster"); @@ -166,7 +162,6 @@ private static ListMultimap, String> createLegacyRestTestBasePluginUsag map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:vector-tile:qa:multi-cluster"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:watcher:qa:rest"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:watcher:qa:with-security"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:esql:qa:server:mixed-cluster"); return map; } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/packer/CacheTestFixtureResourcesPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/packer/CacheTestFixtureResourcesPlugin.java index f8ab8eef1004c..c8ce9d5ca2c71 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/packer/CacheTestFixtureResourcesPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/packer/CacheTestFixtureResourcesPlugin.java @@ -12,6 +12,7 @@ import org.elasticsearch.gradle.internal.ResolveAllDependencies; import org.gradle.api.Plugin; import org.gradle.api.Project; +import org.gradle.api.artifacts.Dependency; import org.gradle.api.artifacts.dsl.DependencyHandler; import org.gradle.api.plugins.JavaPlugin; import org.gradle.api.plugins.JavaPluginExtension; @@ -26,9 +27,12 @@ public void apply(Project project) { var cacheTestFixturesConfiguration = project.getConfigurations().create(CACHE_TEST_FIXTURES); cacheTestFixturesConfiguration.defaultDependencies(deps -> { DependencyHandler dependencyHandler = project.getDependencies(); - deps.add(dependencyHandler.create("org.reflections:reflections:" + VersionProperties.getVersions().get("reflections"))); - deps.add(dependencyHandler.create("org.javassist:javassist:" + VersionProperties.getVersions().get("javassist"))); + Dependency reflections = dependencyHandler.create( + "org.reflections:reflections:" + VersionProperties.getVersions().get("reflections") + ); + deps.add(reflections); }); + project.getPlugins().withType(JavaPlugin.class, javaPlugin -> { var cacheTestFixtures = project.getTasks().register(CACHE_TEST_FIXTURES, CacheCacheableTestFixtures.class, (t) -> { var testSourceSet = project.getExtensions() diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java index d2ba86bb99cf2..24df3c4dab464 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java @@ -59,6 +59,7 @@ import static org.gradle.api.JavaVersion.VERSION_20; import static org.gradle.api.JavaVersion.VERSION_21; +import static org.gradle.api.JavaVersion.VERSION_22; @CacheableTask public abstract class ThirdPartyAuditTask extends DefaultTask { @@ -335,8 +336,8 @@ private String runForbiddenAPIsCli() throws IOException { spec.setExecutable(javaHome.get() + "/bin/java"); } spec.classpath(getForbiddenAPIsClasspath(), classpath); - // Enable explicitly for each release as appropriate. Just JDK 20/21 for now, and just the vector module. - if (isJavaVersion(VERSION_20) || isJavaVersion(VERSION_21)) { + // Enable explicitly for each release as appropriate. Just JDK 20/21/22 for now, and just the vector module. + if (isJavaVersion(VERSION_20) || isJavaVersion(VERSION_21) || isJavaVersion(VERSION_22)) { spec.jvmArgs("--add-modules", "jdk.incubator.vector"); } spec.jvmArgs("-Xmx1g"); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java index f9f831439f2ca..6c978edd48c29 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java @@ -47,6 +47,9 @@ public void apply(Project project) { final Version version = VersionProperties.getElasticsearchVersion(); + project.getTasks() + .register("updateVersions", UpdateVersionsTask.class, t -> project.getTasks().named("spotlessApply").get().mustRunAfter(t)); + final FileTree yamlFiles = projectDirectory.dir("docs/changelog") .getAsFileTree() .matching(new PatternSet().include("**/*.yml", "**/*.yaml")); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/UpdateVersionsTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/UpdateVersionsTask.java new file mode 100644 index 0000000000000..f8073f384b871 --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/UpdateVersionsTask.java @@ -0,0 +1,215 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.release; + +import com.github.javaparser.StaticJavaParser; +import com.github.javaparser.ast.CompilationUnit; +import com.github.javaparser.ast.NodeList; +import com.github.javaparser.ast.body.ClassOrInterfaceDeclaration; +import com.github.javaparser.ast.body.FieldDeclaration; +import com.github.javaparser.ast.body.VariableDeclarator; +import com.github.javaparser.ast.expr.NameExpr; +import com.github.javaparser.printer.lexicalpreservation.LexicalPreservingPrinter; +import com.google.common.annotations.VisibleForTesting; + +import org.elasticsearch.gradle.Version; +import org.gradle.api.DefaultTask; +import org.gradle.api.logging.Logger; +import org.gradle.api.logging.Logging; +import org.gradle.api.tasks.TaskAction; +import org.gradle.api.tasks.options.Option; +import org.gradle.initialization.layout.BuildLayout; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.Map; +import java.util.NavigableMap; +import java.util.Objects; +import java.util.Optional; +import java.util.TreeMap; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; + +import javax.annotation.Nullable; +import javax.inject.Inject; + +public class UpdateVersionsTask extends DefaultTask { + private static final Logger LOGGER = Logging.getLogger(UpdateVersionsTask.class); + + static final String SERVER_MODULE_PATH = "server/src/main/java/"; + static final String VERSION_FILE_PATH = SERVER_MODULE_PATH + "org/elasticsearch/Version.java"; + + static final Pattern VERSION_FIELD = Pattern.compile("V_(\\d+)_(\\d+)_(\\d+)(?:_(\\w+))?"); + + final Path rootDir; + + @Nullable + private Version addVersion; + private boolean setCurrent; + @Nullable + private Version removeVersion; + + @Inject + public UpdateVersionsTask(BuildLayout layout) { + rootDir = layout.getRootDirectory().toPath(); + } + + @Option(option = "add-version", description = "Specifies the version to add") + public void addVersion(String version) { + this.addVersion = Version.fromString(version); + } + + @Option(option = "set-current", description = "Set the 'current' constant to the new version") + public void setCurrent(boolean setCurrent) { + this.setCurrent = setCurrent; + } + + @Option(option = "remove-version", description = "Specifies the version to remove") + public void removeVersion(String version) { + this.removeVersion = Version.fromString(version); + } + + static String toVersionField(Version version) { + return String.format("V_%d_%d_%d", version.getMajor(), version.getMinor(), version.getRevision()); + } + + static Optional parseVersionField(CharSequence field) { + Matcher m = VERSION_FIELD.matcher(field); + if (m.find() == false) return Optional.empty(); + + return Optional.of( + new Version(Integer.parseInt(m.group(1)), Integer.parseInt(m.group(2)), Integer.parseInt(m.group(3)), m.group(4)) + ); + } + + @TaskAction + public void executeTask() throws IOException { + if (addVersion == null && removeVersion == null) { + throw new IllegalArgumentException("No versions to add or remove specified"); + } + if (setCurrent && addVersion == null) { + throw new IllegalArgumentException("No new version added to set as the current version"); + } + if (Objects.equals(addVersion, removeVersion)) { + throw new IllegalArgumentException("Same version specified to add and remove"); + } + + Path versionJava = rootDir.resolve(VERSION_FILE_PATH); + CompilationUnit file = LexicalPreservingPrinter.setup(StaticJavaParser.parse(versionJava)); + + Optional modifiedFile = Optional.empty(); + if (addVersion != null) { + LOGGER.lifecycle("Adding new version [{}] to [{}]", addVersion, versionJava); + var added = addVersionConstant(modifiedFile.orElse(file), addVersion, setCurrent); + if (added.isPresent()) { + modifiedFile = added; + } + } + if (removeVersion != null) { + LOGGER.lifecycle("Removing version [{}] from [{}]", removeVersion, versionJava); + var removed = removeVersionConstant(modifiedFile.orElse(file), removeVersion); + if (removed.isPresent()) { + modifiedFile = removed; + } + } + + if (modifiedFile.isPresent()) { + writeOutNewContents(versionJava, modifiedFile.get()); + } + } + + @VisibleForTesting + static Optional addVersionConstant(CompilationUnit versionJava, Version version, boolean updateCurrent) { + String newFieldName = toVersionField(version); + + ClassOrInterfaceDeclaration versionClass = versionJava.getClassByName("Version").get(); + if (versionClass.getFieldByName(newFieldName).isPresent()) { + LOGGER.lifecycle("New version constant [{}] already present, skipping", newFieldName); + return Optional.empty(); + } + + NavigableMap versions = versionClass.getFields() + .stream() + .map(f -> Map.entry(f, parseVersionField(f.getVariable(0).getNameAsString()))) + .filter(e -> e.getValue().isPresent()) + .collect(Collectors.toMap(e -> e.getValue().get(), Map.Entry::getKey, (v1, v2) -> { + throw new IllegalArgumentException("Duplicate version constants " + v1); + }, TreeMap::new)); + + // find the version this should be inserted after + var previousVersion = versions.lowerEntry(version); + if (previousVersion == null) { + throw new IllegalStateException(String.format("Could not find previous version to [%s]", version)); + } + FieldDeclaration newVersion = createNewVersionConstant( + previousVersion.getValue(), + newFieldName, + String.format("%d_%02d_%02d_99", version.getMajor(), version.getMinor(), version.getRevision()) + ); + versionClass.getMembers().addAfter(newVersion, previousVersion.getValue()); + + if (updateCurrent) { + versionClass.getFieldByName("CURRENT") + .orElseThrow(() -> new IllegalArgumentException("Could not find CURRENT constant")) + .getVariable(0) + .setInitializer(new NameExpr(newFieldName)); + } + + return Optional.of(versionJava); + } + + private static FieldDeclaration createNewVersionConstant(FieldDeclaration lastVersion, String newName, String newExpr) { + return new FieldDeclaration( + new NodeList<>(lastVersion.getModifiers()), + new VariableDeclarator( + lastVersion.getCommonType(), + newName, + StaticJavaParser.parseExpression(String.format("new Version(%s)", newExpr)) + ) + ); + } + + @VisibleForTesting + static Optional removeVersionConstant(CompilationUnit versionJava, Version version) { + String removeFieldName = toVersionField(version); + + ClassOrInterfaceDeclaration versionClass = versionJava.getClassByName("Version").get(); + var declaration = versionClass.getFieldByName(removeFieldName); + if (declaration.isEmpty()) { + LOGGER.lifecycle("Version constant [{}] not found, skipping", removeFieldName); + return Optional.empty(); + } + + // check if this is referenced by CURRENT + String currentReference = versionClass.getFieldByName("CURRENT") + .orElseThrow(() -> new IllegalArgumentException("Could not find CURRENT constant")) + .getVariable(0) + .getInitializer() + .get() + .asNameExpr() + .getNameAsString(); + if (currentReference.equals(removeFieldName)) { + throw new IllegalArgumentException(String.format("Cannot remove version [%s], it is referenced by CURRENT", version)); + } + + declaration.get().remove(); + + return Optional.of(versionJava); + } + + static void writeOutNewContents(Path file, CompilationUnit unit) throws IOException { + if (unit.containsData(LexicalPreservingPrinter.NODE_TEXT_DATA) == false) { + throw new IllegalArgumentException("CompilationUnit has no lexical information for output"); + } + Files.writeString(file, LexicalPreservingPrinter.print(unit), StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); + } +} diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/UpdateVersionsTaskTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/UpdateVersionsTaskTests.java new file mode 100644 index 0000000000000..97441990d47c2 --- /dev/null +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/UpdateVersionsTaskTests.java @@ -0,0 +1,244 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.release; + +import com.github.javaparser.StaticJavaParser; +import com.github.javaparser.ast.CompilationUnit; +import com.github.javaparser.ast.Node; +import com.github.javaparser.ast.body.FieldDeclaration; +import com.github.javaparser.printer.lexicalpreservation.LexicalPreservingPrinter; + +import org.elasticsearch.gradle.Version; +import org.junit.Test; + +import java.io.StringWriter; +import java.nio.file.Path; +import java.util.List; +import java.util.Optional; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.hasToString; +import static org.hamcrest.Matchers.is; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThrows; + +public class UpdateVersionsTaskTests { + + @Test + public void addVersion_versionExists() { + final String versionJava = """ + public class Version { + public static final Version V_8_10_0 = new Version(8_10_00_99); + public static final Version V_8_10_1 = new Version(8_10_01_99); + public static final Version V_8_11_0 = new Version(8_11_00_99); + public static final Version CURRENT = V_8_11_0; + }"""; + + CompilationUnit unit = StaticJavaParser.parse(versionJava); + + var newUnit = UpdateVersionsTask.addVersionConstant(unit, Version.fromString("8.10.1"), false); + assertThat(newUnit.isPresent(), is(false)); + } + + @Test + public void addVersion_oldVersion() { + final String versionJava = """ + public class Version { + public static final Version V_8_10_0 = new Version(8_10_00_99); + public static final Version V_8_10_1 = new Version(8_10_01_99); + public static final Version V_8_11_0 = new Version(8_11_00_99); + public static final Version CURRENT = V_8_11_0; + }"""; + final String updatedVersionJava = """ + public class Version { + + public static final Version V_8_10_0 = new Version(8_10_00_99); + + public static final Version V_8_10_1 = new Version(8_10_01_99); + + public static final Version V_8_10_2 = new Version(8_10_02_99); + + public static final Version V_8_11_0 = new Version(8_11_00_99); + + public static final Version CURRENT = V_8_11_0; + } + """; + + CompilationUnit unit = StaticJavaParser.parse(versionJava); + + UpdateVersionsTask.addVersionConstant(unit, Version.fromString("8.10.2"), false); + + assertThat(unit, hasToString(updatedVersionJava)); + } + + @Test + public void addVersion_newVersion_current() { + final String versionJava = """ + public class Version { + public static final Version V_8_10_0 = new Version(8_10_00_99); + public static final Version V_8_10_1 = new Version(8_10_01_99); + public static final Version V_8_11_0 = new Version(8_11_00_99); + public static final Version CURRENT = V_8_11_0; + }"""; + final String updatedVersionJava = """ + public class Version { + + public static final Version V_8_10_0 = new Version(8_10_00_99); + + public static final Version V_8_10_1 = new Version(8_10_01_99); + + public static final Version V_8_11_0 = new Version(8_11_00_99); + + public static final Version V_8_11_1 = new Version(8_11_01_99); + + public static final Version CURRENT = V_8_11_1; + } + """; + + CompilationUnit unit = StaticJavaParser.parse(versionJava); + + UpdateVersionsTask.addVersionConstant(unit, Version.fromString("8.11.1"), true); + + assertThat(unit, hasToString(updatedVersionJava)); + } + + @Test + public void removeVersion_versionDoesntExist() { + final String versionJava = """ + public class Version { + public static final Version V_8_10_0 = new Version(8_10_00_99); + public static final Version V_8_10_1 = new Version(8_10_01_99); + public static final Version V_8_11_0 = new Version(8_11_00_99); + public static final Version CURRENT = V_8_11_0; + }"""; + + CompilationUnit unit = StaticJavaParser.parse(versionJava); + + var newUnit = UpdateVersionsTask.removeVersionConstant(unit, Version.fromString("8.10.2")); + assertThat(newUnit.isPresent(), is(false)); + } + + @Test + public void removeVersion_versionIsCurrent() { + final String versionJava = """ + public class Version { + public static final Version V_8_10_0 = new Version(8_10_00_99); + public static final Version V_8_10_1 = new Version(8_10_01_99); + public static final Version V_8_11_0 = new Version(8_11_00_99); + public static final Version CURRENT = V_8_11_0; + }"""; + + CompilationUnit unit = StaticJavaParser.parse(versionJava); + + var ex = assertThrows( + IllegalArgumentException.class, + () -> UpdateVersionsTask.removeVersionConstant(unit, Version.fromString("8.11.0")) + ); + assertThat(ex.getMessage(), equalTo("Cannot remove version [8.11.0], it is referenced by CURRENT")); + } + + @Test + public void removeVersion() { + final String versionJava = """ + public class Version { + public static final Version V_8_10_0 = new Version(8_10_00_99); + public static final Version V_8_10_1 = new Version(8_10_01_99); + public static final Version V_8_11_0 = new Version(8_11_00_99); + public static final Version CURRENT = V_8_11_0; + }"""; + final String updatedVersionJava = """ + public class Version { + + public static final Version V_8_10_0 = new Version(8_10_00_99); + + public static final Version V_8_11_0 = new Version(8_11_00_99); + + public static final Version CURRENT = V_8_11_0; + } + """; + + CompilationUnit unit = StaticJavaParser.parse(versionJava); + + UpdateVersionsTask.removeVersionConstant(unit, Version.fromString("8.10.1")); + + assertThat(unit, hasToString(updatedVersionJava)); + } + + @Test + public void updateVersionFile_addsCorrectly() throws Exception { + Version newVersion = new Version(50, 10, 20); + String versionField = UpdateVersionsTask.toVersionField(newVersion); + + Path versionFile = Path.of("..", UpdateVersionsTask.VERSION_FILE_PATH); + CompilationUnit unit = LexicalPreservingPrinter.setup(StaticJavaParser.parse(versionFile)); + assertFalse("Test version already exists in the file", findFirstField(unit, versionField).isPresent()); + + List existingFields = unit.findAll(FieldDeclaration.class); + + var result = UpdateVersionsTask.addVersionConstant(unit, newVersion, true); + assertThat(result.isPresent(), is(true)); + + // write out & parse back in again + StringWriter writer = new StringWriter(); + LexicalPreservingPrinter.print(unit, writer); + unit = StaticJavaParser.parse(writer.toString()); + + // a field has been added + assertThat(unit.findAll(FieldDeclaration.class), hasSize(existingFields.size() + 1)); + // the field has the right name + var field = findFirstField(unit, versionField); + assertThat(field.isPresent(), is(true)); + // the field has the right constant + assertThat( + field.get().getVariable(0).getInitializer().get(), + hasToString( + String.format("new Version(%d_%02d_%02d_99)", newVersion.getMajor(), newVersion.getMinor(), newVersion.getRevision()) + ) + ); + // and CURRENT has been updated + var current = findFirstField(unit, "CURRENT"); + assertThat(current.get().getVariable(0).getInitializer().get(), hasToString(versionField)); + } + + @Test + public void updateVersionFile_removesCorrectly() throws Exception { + Path versionFile = Path.of("..", UpdateVersionsTask.VERSION_FILE_PATH); + CompilationUnit unit = LexicalPreservingPrinter.setup(StaticJavaParser.parse(versionFile)); + + List existingFields = unit.findAll(FieldDeclaration.class); + + var staticVersionFields = unit.findAll( + FieldDeclaration.class, + f -> f.isStatic() && f.getVariable(0).getTypeAsString().equals("Version") + ); + // remove the last-but-two static version field (skip CURRENT and the latest version) + String constant = staticVersionFields.get(staticVersionFields.size() - 3).getVariable(0).getNameAsString(); + + Version versionToRemove = UpdateVersionsTask.parseVersionField(constant).orElseThrow(AssertionError::new); + var result = UpdateVersionsTask.removeVersionConstant(unit, versionToRemove); + assertThat(result.isPresent(), is(true)); + + // write out & parse back in again + StringWriter writer = new StringWriter(); + LexicalPreservingPrinter.print(unit, writer); + unit = StaticJavaParser.parse(writer.toString()); + + // a field has been removed + assertThat(unit.findAll(FieldDeclaration.class), hasSize(existingFields.size() - 1)); + // the removed field does not exist + var field = findFirstField(unit, constant); + assertThat(field.isPresent(), is(false)); + } + + private static Optional findFirstField(Node node, String name) { + return node.findFirst(FieldDeclaration.class, f -> f.getVariable(0).getName().getIdentifier().equals(name)); + } +} diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/match/RemoveMatchTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/match/RemoveMatchTests.java index 8ae0cb4a83831..3b30cfcb7361e 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/match/RemoveMatchTests.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/match/RemoveMatchTests.java @@ -9,11 +9,8 @@ package org.elasticsearch.gradle.internal.test.rest.transform.match; import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; -import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; import org.elasticsearch.gradle.internal.test.rest.transform.TransformTests; import org.hamcrest.CoreMatchers; @@ -29,10 +26,6 @@ public class RemoveMatchTests extends TransformTests { - private static final YAMLFactory YAML_FACTORY = new YAMLFactory(); - private static final ObjectMapper MAPPER = new ObjectMapper(YAML_FACTORY); - private static final ObjectReader READER = MAPPER.readerFor(ObjectNode.class); - @Test public void testRemoveAll() throws Exception { String testName = "/rest/transform/match/match_original.yml"; diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties index c34bdc95046b3..a76f507079f2f 100644 --- a/build-tools-internal/version.properties +++ b/build-tools-internal/version.properties @@ -1,5 +1,5 @@ elasticsearch = 8.13.0 -lucene = 9.9.0-snapshot-bb4fec631e6 +lucene = 9.9.1 bundled_jdk_vendor = openjdk bundled_jdk = 21.0.1+12@415e3f918a1f4062a0074a2794853d0d @@ -27,7 +27,7 @@ bouncycastle=1.76 opensaml = 4.3.0 # client dependencies -httpclient = 4.5.13 +httpclient = 4.5.14 httpcore = 4.4.13 httpasyncclient = 4.1.5 commonslogging = 1.2 @@ -36,7 +36,7 @@ protobuf = 3.21.9 # test dependencies randomizedrunner = 2.8.0 -junit = 4.12 +junit = 4.13.2 junit5 = 5.7.1 hamcrest = 2.1 mocksocket = 1.2 @@ -48,8 +48,7 @@ ductTape = 1.0.8 commonsCompress = 1.24.0 # packer caching build logic -reflections = 0.9.12 -javassist = 3.28.0-GA +reflections = 0.10.2 # benchmark dependencies jmh = 1.26 diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/util/GradleUtils.java b/build-tools/src/main/java/org/elasticsearch/gradle/util/GradleUtils.java index 00e5834b0f826..880d7a069e9a7 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/util/GradleUtils.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/util/GradleUtils.java @@ -48,7 +48,7 @@ public static SourceSetContainer getJavaSourceSets(Project project) { } public static void maybeConfigure(TaskContainer tasks, String name, Action config) { - tasks.matching(t -> t.getName().equals(name)).configureEach(t -> config.execute(t)); + tasks.matching(t -> t.getName().equals(name)).configureEach(config); } public static void maybeConfigure( @@ -57,7 +57,7 @@ public static void maybeConfigure( Class type, Action config ) { - tasks.withType(type).matching((Spec) t -> t.getName().equals(name)).configureEach(task -> { config.execute(task); }); + tasks.withType(type).matching((Spec) t -> t.getName().equals(name)).configureEach(config); } public static TaskProvider findByName(TaskContainer tasks, String name) { diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/RestNoopSearchAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/RestNoopSearchAction.java index b39007c3a3691..0e91a063596e3 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/RestNoopSearchAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/RestNoopSearchAction.java @@ -12,7 +12,7 @@ import org.elasticsearch.plugin.noop.NoopPlugin; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.util.List; @@ -39,6 +39,10 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) { SearchRequest searchRequest = new SearchRequest(); - return channel -> client.execute(NoopPlugin.NOOP_SEARCH_ACTION, searchRequest, new RestChunkedToXContentListener<>(channel)); + return channel -> client.execute( + NoopPlugin.NOOP_SEARCH_ACTION, + searchRequest, + new RestRefCountedChunkedToXContentListener<>(channel) + ); } } diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/TransportNoopSearchAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/TransportNoopSearchAction.java index baefb15e6373a..790b6bfd6deca 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/TransportNoopSearchAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/TransportNoopSearchAction.java @@ -7,7 +7,6 @@ */ package org.elasticsearch.plugin.noop.action.search; -import org.apache.lucene.search.TotalHits; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; @@ -18,10 +17,8 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.plugin.noop.NoopPlugin; -import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.profile.SearchProfileResults; import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.tasks.Task; @@ -45,15 +42,13 @@ public TransportNoopSearchAction(TransportService transportService, ActionFilter protected void doExecute(Task task, SearchRequest request, ActionListener listener) { listener.onResponse( new SearchResponse( - new InternalSearchResponse( - new SearchHits(new SearchHit[0], new TotalHits(0L, TotalHits.Relation.EQUAL_TO), 0.0f), - InternalAggregations.EMPTY, - new Suggest(Collections.emptyList()), - new SearchProfileResults(Collections.emptyMap()), - false, - false, - 1 - ), + SearchHits.EMPTY_WITH_TOTAL_HITS, + InternalAggregations.EMPTY, + new Suggest(Collections.emptyList()), + false, + false, + new SearchProfileResults(Collections.emptyMap()), + 1, "", 1, 1, diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java b/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java index 600ad6d671711..250f3f0b34cec 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java @@ -37,6 +37,8 @@ import java.util.Locale; import java.util.Objects; import java.util.Properties; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.atomic.AtomicLong; import javax.net.ssl.SSLContext; @@ -51,6 +53,9 @@ public final class RestClientBuilder { public static final int DEFAULT_MAX_CONN_PER_ROUTE = 10; public static final int DEFAULT_MAX_CONN_TOTAL = 30; + static final String THREAD_NAME_PREFIX = "elasticsearch-rest-client-"; + private static final String THREAD_NAME_FORMAT = THREAD_NAME_PREFIX + "%d-thread-%d"; + public static final String VERSION; static final String META_HEADER_NAME = "X-Elastic-Client-Meta"; static final String META_HEADER_VALUE; @@ -298,6 +303,24 @@ public RestClient build() { return restClient; } + /** + * Similar to {@code org.apache.http.impl.nio.reactor.AbstractMultiworkerIOReactor.DefaultThreadFactory} but with better thread names. + */ + private static class RestClientThreadFactory implements ThreadFactory { + private static final AtomicLong CLIENT_THREAD_POOL_ID_GENERATOR = new AtomicLong(); + + private final long clientThreadPoolId = CLIENT_THREAD_POOL_ID_GENERATOR.getAndIncrement(); // 0-based + private final AtomicLong clientThreadId = new AtomicLong(); + + @Override + public Thread newThread(Runnable runnable) { + return new Thread( + runnable, + String.format(Locale.ROOT, THREAD_NAME_FORMAT, clientThreadPoolId, clientThreadId.incrementAndGet()) // 1-based + ); + } + } + private CloseableHttpAsyncClient createHttpClient() { // default timeouts are all infinite RequestConfig.Builder requestConfigBuilder = RequestConfig.custom() @@ -315,7 +338,8 @@ private CloseableHttpAsyncClient createHttpClient() { .setMaxConnTotal(DEFAULT_MAX_CONN_TOTAL) .setSSLContext(SSLContext.getDefault()) .setUserAgent(USER_AGENT_HEADER_VALUE) - .setTargetAuthenticationStrategy(new PersistentCredentialsAuthenticationStrategy()); + .setTargetAuthenticationStrategy(new PersistentCredentialsAuthenticationStrategy()) + .setThreadFactory(new RestClientThreadFactory()); if (httpClientConfigCallback != null) { httpClientBuilder = httpClientConfigCallback.customizeHttpClient(httpClientBuilder); } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderIntegTests.java index b9e0e996c3f76..265bd52eabe83 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderIntegTests.java @@ -42,15 +42,21 @@ import java.security.cert.Certificate; import java.security.cert.CertificateFactory; import java.security.spec.PKCS8EncodedKeySpec; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.SSLContext; import javax.net.ssl.SSLHandshakeException; import javax.net.ssl.TrustManagerFactory; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.startsWith; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; /** @@ -105,6 +111,40 @@ public void testBuilderUsesDefaultSSLContext() throws Exception { } } + public void testBuilderSetsThreadName() throws Exception { + assumeFalse("https://github.com/elastic/elasticsearch/issues/49094", inFipsJvm()); + final SSLContext defaultSSLContext = SSLContext.getDefault(); + try { + SSLContext.setDefault(getSslContext()); + try (RestClient client = buildRestClient()) { + final CountDownLatch latch = new CountDownLatch(1); + client.performRequestAsync(new Request("GET", "/"), new ResponseListener() { + @Override + public void onSuccess(Response response) { + assertThat( + Thread.currentThread().getName(), + allOf( + startsWith(RestClientBuilder.THREAD_NAME_PREFIX), + containsString("elasticsearch"), + containsString("rest-client") + ) + ); + assertEquals(200, response.getStatusLine().getStatusCode()); + latch.countDown(); + } + + @Override + public void onFailure(Exception exception) { + throw new AssertionError("unexpected", exception); + } + }); + assertTrue(latch.await(10, TimeUnit.SECONDS)); + } + } finally { + SSLContext.setDefault(defaultSSLContext); + } + } + private RestClient buildRestClient() { InetSocketAddress address = httpsServer.getAddress(); return RestClient.builder(new HttpHost(address.getHostString(), address.getPort(), "https")).build(); diff --git a/distribution/src/config/jvm.options b/distribution/src/config/jvm.options index 9e26582d58439..c5e905f461f45 100644 --- a/distribution/src/config/jvm.options +++ b/distribution/src/config/jvm.options @@ -58,10 +58,6 @@ # result in less optimal vector performance 20-:--add-modules=jdk.incubator.vector -# REMOVE once bumped to a JDK greater than 21.0.1, https://github.com/elastic/elasticsearch/issues/103004 -19-21:-XX:CompileCommand=exclude,org.apache.lucene.util.MSBRadixSorter::computeCommonPrefixLengthAndBuildHistogram -19-21:-XX:CompileCommand=exclude,org.apache.lucene.util.RadixSelector::computeCommonPrefixLengthAndBuildHistogram - ## heap dumps # generate a heap dump when an allocation from the Java heap fails; heap dumps diff --git a/distribution/tools/plugin-cli/build.gradle b/distribution/tools/plugin-cli/build.gradle index 3859dfa1ddbb9..e55e8ec39654e 100644 --- a/distribution/tools/plugin-cli/build.gradle +++ b/distribution/tools/plugin-cli/build.gradle @@ -23,8 +23,8 @@ dependencies { compileOnly project(":libs:elasticsearch-cli") implementation project(":libs:elasticsearch-plugin-api") implementation project(":libs:elasticsearch-plugin-scanner") - implementation 'org.ow2.asm:asm:9.5' - implementation 'org.ow2.asm:asm-tree:9.5' + implementation 'org.ow2.asm:asm:9.6' + implementation 'org.ow2.asm:asm-tree:9.6' api "org.bouncycastle:bcpg-fips:1.0.7.1" api "org.bouncycastle:bc-fips:1.0.2.4" diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/PluginsConfig.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/PluginsConfig.java index 8edd5f701706c..168e5ba3806f3 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/PluginsConfig.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/PluginsConfig.java @@ -160,12 +160,11 @@ static PluginsConfig parseConfig(Path configPath, XContent xContent) throws IOEx parser.declareStringOrNull(PluginsConfig::setProxy, new ParseField("proxy")); parser.declareObjectArrayOrNull(PluginsConfig::setPlugins, descriptorParser, new ParseField("plugins")); - final XContentParser yamlXContentParser = xContent.createParser( - XContentParserConfiguration.EMPTY, - Files.newInputStream(configPath) - ); - - return parser.parse(yamlXContentParser, null); + try ( + XContentParser yamlXContentParser = xContent.createParser(XContentParserConfiguration.EMPTY, Files.newInputStream(configPath)) + ) { + return parser.parse(yamlXContentParser, null); + } } /** diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/MachineDependentHeap.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/MachineDependentHeap.java index a30f3115be5c9..87c4883ca3073 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/MachineDependentHeap.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/MachineDependentHeap.java @@ -86,8 +86,7 @@ static class NodeRoleParser { @SuppressWarnings("unchecked") public static MachineNodeRole parse(InputStream config) { final Settings settings; - try { - var parser = YamlXContent.yamlXContent.createParser(XContentParserConfiguration.EMPTY, config); + try (var parser = YamlXContent.yamlXContent.createParser(XContentParserConfiguration.EMPTY, config)) { if (parser.currentToken() == null && parser.nextToken() == null) { settings = null; } else { diff --git a/docs/Versions.asciidoc b/docs/Versions.asciidoc index 3f44db9928434..420ee36359745 100644 --- a/docs/Versions.asciidoc +++ b/docs/Versions.asciidoc @@ -1,8 +1,8 @@ include::{docs-root}/shared/versions/stack/{source_branch}.asciidoc[] -:lucene_version: 9.9.0 -:lucene_version_path: 9_9_0 +:lucene_version: 9.9.1 +:lucene_version_path: 9_9_1 :jdk: 11.0.2 :jdk_major: 11 :build_type: tar diff --git a/docs/changelog/101487.yaml b/docs/changelog/101487.yaml new file mode 100644 index 0000000000000..b4531f7fd6f75 --- /dev/null +++ b/docs/changelog/101487.yaml @@ -0,0 +1,5 @@ +pr: 101487 +summary: Wait for async searches to finish when shutting down +area: Infra/Node Lifecycle +type: enhancement +issues: [] diff --git a/docs/changelog/101640.yaml b/docs/changelog/101640.yaml new file mode 100644 index 0000000000000..6f61a3a3ffd84 --- /dev/null +++ b/docs/changelog/101640.yaml @@ -0,0 +1,5 @@ +pr: 101640 +summary: Support cross clusters query in ESQL +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/101717.yaml b/docs/changelog/101717.yaml new file mode 100644 index 0000000000000..7e97ef1049f88 --- /dev/null +++ b/docs/changelog/101717.yaml @@ -0,0 +1,5 @@ +pr: 101717 +summary: Pause shard snapshots on graceful shutdown +area: Snapshot/Restore +type: enhancement +issues: [] diff --git a/docs/changelog/102207.yaml b/docs/changelog/102207.yaml new file mode 100644 index 0000000000000..8b247828845f4 --- /dev/null +++ b/docs/changelog/102207.yaml @@ -0,0 +1,6 @@ +pr: 102207 +summary: Fix disk computation when initializing unassigned shards in desired balance + computation +area: Allocation +type: bug +issues: [] diff --git a/docs/changelog/102557.yaml b/docs/changelog/102557.yaml new file mode 100644 index 0000000000000..dfca1763064d4 --- /dev/null +++ b/docs/changelog/102557.yaml @@ -0,0 +1,5 @@ +pr: 102557 +summary: Metrics for search latencies +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/102584.yaml b/docs/changelog/102584.yaml new file mode 100644 index 0000000000000..44ff5dd9f7461 --- /dev/null +++ b/docs/changelog/102584.yaml @@ -0,0 +1,5 @@ +pr: 102584 +summary: Expose some ML metrics via APM +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/102798.yaml b/docs/changelog/102798.yaml new file mode 100644 index 0000000000000..986ad99f96a19 --- /dev/null +++ b/docs/changelog/102798.yaml @@ -0,0 +1,5 @@ +pr: 102798 +summary: Hot-reloadable remote cluster credentials +area: Security +type: enhancement +issues: [] diff --git a/docs/changelog/102824.yaml b/docs/changelog/102824.yaml new file mode 100644 index 0000000000000..21b39a4c3999d --- /dev/null +++ b/docs/changelog/102824.yaml @@ -0,0 +1,5 @@ +pr: 102824 +summary: Change detection aggregation improvements +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/103032.yaml b/docs/changelog/103032.yaml new file mode 100644 index 0000000000000..81d84fca0bdb0 --- /dev/null +++ b/docs/changelog/103032.yaml @@ -0,0 +1,5 @@ +pr: 103032 +summary: "x-pack/plugin/apm-data: Map some APM fields as flattened and fix error.grouping_name script" +area: Data streams +type: enhancement +issues: [] diff --git a/docs/changelog/103112.yaml b/docs/changelog/103112.yaml deleted file mode 100644 index dcb4cf604c179..0000000000000 --- a/docs/changelog/103112.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103112 -summary: Add JIT compiler excludes for `computeCommonPrefixLengthAndBuildHistogram` -area: Search -type: bug -issues: [] diff --git a/docs/changelog/103150.yaml b/docs/changelog/103150.yaml new file mode 100644 index 0000000000000..3f42c882d89fb --- /dev/null +++ b/docs/changelog/103150.yaml @@ -0,0 +1,6 @@ +pr: 103150 +summary: "ES|QL: Fix NPE on single value detection" +area: ES|QL +type: bug +issues: + - 103141 diff --git a/docs/changelog/103160.yaml b/docs/changelog/103160.yaml new file mode 100644 index 0000000000000..7701aa2b4a8d4 --- /dev/null +++ b/docs/changelog/103160.yaml @@ -0,0 +1,5 @@ +pr: 103160 +summary: Set thread name used by REST client +area: Java Low Level REST Client +type: enhancement +issues: [] diff --git a/docs/changelog/103178.yaml b/docs/changelog/103178.yaml new file mode 100644 index 0000000000000..5da0221a68984 --- /dev/null +++ b/docs/changelog/103178.yaml @@ -0,0 +1,5 @@ +pr: 103178 +summary: Expose API key authentication metrics +area: Authentication +type: enhancement +issues: [] diff --git a/docs/changelog/103223.yaml b/docs/changelog/103223.yaml new file mode 100644 index 0000000000000..c2f4c1b6a2cf4 --- /dev/null +++ b/docs/changelog/103223.yaml @@ -0,0 +1,10 @@ +pr: 103223 +summary: "[Synonyms] Mark Synonyms as GA" +area: "Search" +type: feature +issues: [] +highlight: + title: "GA Release of Synonyms API" + body: |- + Removes the beta label for the Synonyms API to make it GA. + notable: true diff --git a/docs/changelog/103232.yaml b/docs/changelog/103232.yaml new file mode 100644 index 0000000000000..b955e7abb7683 --- /dev/null +++ b/docs/changelog/103232.yaml @@ -0,0 +1,5 @@ +pr: 103232 +summary: "Remove leniency in msearch parsing" +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/103309.yaml b/docs/changelog/103309.yaml new file mode 100644 index 0000000000000..94b2a31127870 --- /dev/null +++ b/docs/changelog/103309.yaml @@ -0,0 +1,6 @@ +pr: 103309 +summary: Introduce lazy rollover for mapping updates in data streams +area: Data streams +type: enhancement +issues: + - 89346 diff --git a/docs/changelog/103325.yaml b/docs/changelog/103325.yaml new file mode 100644 index 0000000000000..7de6c41986490 --- /dev/null +++ b/docs/changelog/103325.yaml @@ -0,0 +1,6 @@ +pr: 103325 +summary: Added Duplicate Word Check Feature to Analysis Nori +area: Search +type: feature +issues: + - 103321 diff --git a/docs/changelog/103340.yaml b/docs/changelog/103340.yaml new file mode 100644 index 0000000000000..21280dbfc857d --- /dev/null +++ b/docs/changelog/103340.yaml @@ -0,0 +1,5 @@ +pr: 103340 +summary: Avoid humongous blocks +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/103361.yaml b/docs/changelog/103361.yaml new file mode 100644 index 0000000000000..441acc09895ef --- /dev/null +++ b/docs/changelog/103361.yaml @@ -0,0 +1,5 @@ +pr: 103361 +summary: Prevent attempts to access non-existent node information during rebalancing +area: Machine Learning +type: bug +issues: [ ] diff --git a/docs/changelog/103387.yaml b/docs/changelog/103387.yaml new file mode 100644 index 0000000000000..77239fb9a3778 --- /dev/null +++ b/docs/changelog/103387.yaml @@ -0,0 +1,5 @@ +pr: 103387 +summary: Upgrade to Lucene 9.9.1 +area: Search +type: upgrade +issues: [] diff --git a/docs/changelog/103398.yaml b/docs/changelog/103398.yaml new file mode 100644 index 0000000000000..69452616ddc99 --- /dev/null +++ b/docs/changelog/103398.yaml @@ -0,0 +1,5 @@ +pr: 103398 +summary: ES|QL Async Query API +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/103427.yaml b/docs/changelog/103427.yaml new file mode 100644 index 0000000000000..57a27aa687ab7 --- /dev/null +++ b/docs/changelog/103427.yaml @@ -0,0 +1,5 @@ +pr: 103427 +summary: "[Connector API] Fix bug with nullable tooltip field in parser" +area: Application +type: bug +issues: [] diff --git a/docs/changelog/103434.yaml b/docs/changelog/103434.yaml new file mode 100644 index 0000000000000..56af604fe08f7 --- /dev/null +++ b/docs/changelog/103434.yaml @@ -0,0 +1,11 @@ +pr: 103434 +summary: Lower the `look_ahead_time` index setting's max value from 7 days to 2 hours. +area: TSDB +type: breaking +issues: [] +breaking: + title: Lower the `look_ahead_time` index setting's max value + area: Index setting + details: "Lower the `look_ahead_time` index setting's max value from 7 days to 2 hours." + impact: "Any value between 2 hours and 7 days will be as a look ahead time of 2 hours is defined" + notable: false diff --git a/docs/changelog/103453.yaml b/docs/changelog/103453.yaml new file mode 100644 index 0000000000000..4b7dab77c8b23 --- /dev/null +++ b/docs/changelog/103453.yaml @@ -0,0 +1,5 @@ +pr: 103453 +summary: Add expiration time to update api key api +area: Security +type: enhancement +issues: [] diff --git a/docs/changelog/103461.yaml b/docs/changelog/103461.yaml new file mode 100644 index 0000000000000..3a1bf30aa90c9 --- /dev/null +++ b/docs/changelog/103461.yaml @@ -0,0 +1,5 @@ +pr: 103461 +summary: Add support for Well Known Binary (WKB) in the fields API for spatial fields +area: Geo +type: enhancement +issues: [] diff --git a/docs/changelog/103474.yaml b/docs/changelog/103474.yaml new file mode 100644 index 0000000000000..a1da15a6bfbe5 --- /dev/null +++ b/docs/changelog/103474.yaml @@ -0,0 +1,6 @@ +pr: 103474 +summary: Fix now in millis for ESQL search contexts +area: ES|QL +type: bug +issues: + - 103455 diff --git a/docs/changelog/103508.yaml b/docs/changelog/103508.yaml new file mode 100644 index 0000000000000..9c6f79ef75657 --- /dev/null +++ b/docs/changelog/103508.yaml @@ -0,0 +1,5 @@ +pr: 103508 +summary: "[Connectors API] Fix `ClassCastException` when creating a new sync job" +area: Application +type: bug +issues: [] diff --git a/docs/changelog/103510.yaml b/docs/changelog/103510.yaml new file mode 100644 index 0000000000000..50ec8efd5c440 --- /dev/null +++ b/docs/changelog/103510.yaml @@ -0,0 +1,6 @@ +pr: 103510 +summary: "ES|QL: better management of exact subfields for TEXT fields" +area: ES|QL +type: bug +issues: + - 99899 diff --git a/docs/changelog/103520.yaml b/docs/changelog/103520.yaml new file mode 100644 index 0000000000000..0ef7124eb1ed2 --- /dev/null +++ b/docs/changelog/103520.yaml @@ -0,0 +1,5 @@ +pr: 103520 +summary: Request indexing memory pressure in APM node metrics publisher +area: Distributed +type: bug +issues: [] diff --git a/docs/changelog/103530.yaml b/docs/changelog/103530.yaml new file mode 100644 index 0000000000000..6feb04467b03e --- /dev/null +++ b/docs/changelog/103530.yaml @@ -0,0 +1,5 @@ +pr: 103530 +summary: Exclude quantiles when fetching model snapshots where possible +area: Machine Learning +type: bug +issues: [] diff --git a/docs/changelog/103538.yaml b/docs/changelog/103538.yaml new file mode 100644 index 0000000000000..5aaed771d5ee4 --- /dev/null +++ b/docs/changelog/103538.yaml @@ -0,0 +1,6 @@ +pr: 103538 +summary: "ESQL: Improve pushdown of certain filters" +area: ES|QL +type: bug +issues: + - 103536 diff --git a/docs/changelog/103546.yaml b/docs/changelog/103546.yaml new file mode 100644 index 0000000000000..08584e8555bd4 --- /dev/null +++ b/docs/changelog/103546.yaml @@ -0,0 +1,5 @@ +pr: 103546 +summary: Handle timeout on standalone rewrite calls +area: Search +type: bug +issues: [] diff --git a/docs/changelog/103555.yaml b/docs/changelog/103555.yaml new file mode 100644 index 0000000000000..2b0dc2692e252 --- /dev/null +++ b/docs/changelog/103555.yaml @@ -0,0 +1,6 @@ +pr: 103555 +summary: "[Security Solution] Allow write permission for `kibana_system` role on endpoint\ + \ response index" +area: Authorization +type: enhancement +issues: [] diff --git a/docs/changelog/103574.yaml b/docs/changelog/103574.yaml new file mode 100644 index 0000000000000..ed6ad237f49a2 --- /dev/null +++ b/docs/changelog/103574.yaml @@ -0,0 +1,5 @@ +pr: 103574 +summary: Samples should check if the aggregations result is empty or null +area: EQL +type: bug +issues: [] diff --git a/docs/changelog/103580.yaml b/docs/changelog/103580.yaml new file mode 100644 index 0000000000000..6fd0328017d1f --- /dev/null +++ b/docs/changelog/103580.yaml @@ -0,0 +1,6 @@ +pr: 103580 +summary: Copy counter field properties to downsampled index +area: Downsampling +type: bug +issues: + - 103569 diff --git a/docs/changelog/103591.yaml b/docs/changelog/103591.yaml new file mode 100644 index 0000000000000..41b6e362c5713 --- /dev/null +++ b/docs/changelog/103591.yaml @@ -0,0 +1,6 @@ +pr: 103591 +summary: Wait for the model results on graceful shutdown +area: Machine Learning +type: bug +issues: + - 103414 diff --git a/docs/changelog/103592.yaml b/docs/changelog/103592.yaml new file mode 100644 index 0000000000000..21e06f1f5a10d --- /dev/null +++ b/docs/changelog/103592.yaml @@ -0,0 +1,5 @@ +pr: 103592 +summary: Remove deprecated Block APIs +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/103601.yaml b/docs/changelog/103601.yaml new file mode 100644 index 0000000000000..bf7aaaf835e00 --- /dev/null +++ b/docs/changelog/103601.yaml @@ -0,0 +1,7 @@ +pr: 103601 +summary: Introduce Elasticsearch `PostingFormat` based on Lucene 90 positing format + using PFOR +area: Search +type: bug +issues: + - 103002 diff --git a/docs/changelog/103610.yaml b/docs/changelog/103610.yaml new file mode 100644 index 0000000000000..1ed38cc2822bd --- /dev/null +++ b/docs/changelog/103610.yaml @@ -0,0 +1,6 @@ +pr: 103610 +summary: "ESQL: allow `null` in date math" +area: ES|QL +type: bug +issues: + - 103085 diff --git a/docs/changelog/103611.yaml b/docs/changelog/103611.yaml new file mode 100644 index 0000000000000..51c77cd286d66 --- /dev/null +++ b/docs/changelog/103611.yaml @@ -0,0 +1,6 @@ +pr: 103611 +summary: Fix NPE on missing event queries +area: EQL +type: bug +issues: + - 103608 diff --git a/docs/changelog/103615.yaml b/docs/changelog/103615.yaml new file mode 100644 index 0000000000000..69498c749687f --- /dev/null +++ b/docs/changelog/103615.yaml @@ -0,0 +1,5 @@ +pr: 103615 +summary: Fix downsample api by returning a failure in case one or more downsample persistent tasks failed +area: Downsampling +type: bug +issues: [] diff --git a/docs/changelog/103628.yaml b/docs/changelog/103628.yaml new file mode 100644 index 0000000000000..42259c7bcde46 --- /dev/null +++ b/docs/changelog/103628.yaml @@ -0,0 +1,5 @@ +pr: 103628 +summary: Add ES|QL async delete API +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/103632.yaml b/docs/changelog/103632.yaml new file mode 100644 index 0000000000000..1d83c6528f371 --- /dev/null +++ b/docs/changelog/103632.yaml @@ -0,0 +1,5 @@ +pr: 103632 +summary: "ESQL: Check field exists before load from `_source`" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/103633.yaml b/docs/changelog/103633.yaml new file mode 100644 index 0000000000000..9e36451caafd8 --- /dev/null +++ b/docs/changelog/103633.yaml @@ -0,0 +1,5 @@ +pr: 103633 +summary: Update s3 latency metric to use micros +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/103643.yaml b/docs/changelog/103643.yaml new file mode 100644 index 0000000000000..966fb57acf566 --- /dev/null +++ b/docs/changelog/103643.yaml @@ -0,0 +1,5 @@ +pr: 103643 +summary: "[Profiling] Use shard request cache consistently" +area: Application +type: enhancement +issues: [] diff --git a/docs/changelog/103646.yaml b/docs/changelog/103646.yaml new file mode 100644 index 0000000000000..b7a6fae025771 --- /dev/null +++ b/docs/changelog/103646.yaml @@ -0,0 +1,5 @@ +pr: 103646 +summary: Add index mapping parameter for `counted_keyword` +area: Aggregations +type: enhancement +issues: [] diff --git a/docs/changelog/103669.yaml b/docs/changelog/103669.yaml new file mode 100644 index 0000000000000..57361b9d842e4 --- /dev/null +++ b/docs/changelog/103669.yaml @@ -0,0 +1,5 @@ +pr: 103669 +summary: Validate inference model ids +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/103670.yaml b/docs/changelog/103670.yaml new file mode 100644 index 0000000000000..ad3f0519b5d19 --- /dev/null +++ b/docs/changelog/103670.yaml @@ -0,0 +1,5 @@ +pr: 103670 +summary: "ESQL: Improve local folding of aggregates" +area: ES|QL +type: bug +issues: [] diff --git a/docs/changelog/103673.yaml b/docs/changelog/103673.yaml new file mode 100644 index 0000000000000..f786b57eba411 --- /dev/null +++ b/docs/changelog/103673.yaml @@ -0,0 +1,6 @@ +pr: 103673 +summary: "ESQL: Infer not null for aggregated fields" +area: ES|QL +type: enhancement +issues: + - 102787 diff --git a/docs/changelog/103681.yaml b/docs/changelog/103681.yaml new file mode 100644 index 0000000000000..bba73c8e3a7d4 --- /dev/null +++ b/docs/changelog/103681.yaml @@ -0,0 +1,6 @@ +pr: 103681 +summary: "ESQL: Expand shallow copy with vecs" +area: ES|QL +type: enhancement +issues: + - 100528 diff --git a/docs/changelog/103690.yaml b/docs/changelog/103690.yaml new file mode 100644 index 0000000000000..fa9076789c1cd --- /dev/null +++ b/docs/changelog/103690.yaml @@ -0,0 +1,5 @@ +pr: 103690 +summary: Restore inter-segment search concurrency with synthetic source is enabled +area: Search +type: bug +issues: [] diff --git a/docs/changelog/103698.yaml b/docs/changelog/103698.yaml new file mode 100644 index 0000000000000..d94b70b54e505 --- /dev/null +++ b/docs/changelog/103698.yaml @@ -0,0 +1,5 @@ +pr: 103698 +summary: Reading points from source to reduce precision loss +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/103710.yaml b/docs/changelog/103710.yaml new file mode 100644 index 0000000000000..539b9f553ccc2 --- /dev/null +++ b/docs/changelog/103710.yaml @@ -0,0 +1,5 @@ +pr: 103710 +summary: List hidden shard stores by default +area: Store +type: enhancement +issues: [] diff --git a/docs/changelog/103720.yaml b/docs/changelog/103720.yaml new file mode 100644 index 0000000000000..e0ee879988fa7 --- /dev/null +++ b/docs/changelog/103720.yaml @@ -0,0 +1,6 @@ +pr: 103720 +summary: Add "step":"ERROR" to ILM explain response for missing policy +area: ILM+SLM +type: enhancement +issues: + - 99030 diff --git a/docs/changelog/103727.yaml b/docs/changelog/103727.yaml new file mode 100644 index 0000000000000..f943ee7906d58 --- /dev/null +++ b/docs/changelog/103727.yaml @@ -0,0 +1,5 @@ +pr: 103727 +summary: "ESQL: Track the rest of `DocVector`" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/103758.yaml b/docs/changelog/103758.yaml new file mode 100644 index 0000000000000..e77f228f134a0 --- /dev/null +++ b/docs/changelog/103758.yaml @@ -0,0 +1,5 @@ +pr: 103758 +summary: Fix the transport version of `PlanStreamOutput` +area: ES|QL +type: bug +issues: [] diff --git a/docs/changelog/103783.yaml b/docs/changelog/103783.yaml new file mode 100644 index 0000000000000..47c32dd639310 --- /dev/null +++ b/docs/changelog/103783.yaml @@ -0,0 +1,5 @@ +pr: 103783 +summary: "[Profiling] Mark all templates as managed" +area: Application +type: enhancement +issues: [] diff --git a/docs/changelog/103807.yaml b/docs/changelog/103807.yaml new file mode 100644 index 0000000000000..3849edcc00ced --- /dev/null +++ b/docs/changelog/103807.yaml @@ -0,0 +1,6 @@ +pr: 103807 +summary: "ESQL: Add single value checks on LIKE/RLIKE pushdown" +area: ES|QL +type: bug +issues: + - 103806 diff --git a/docs/changelog/103821.yaml b/docs/changelog/103821.yaml new file mode 100644 index 0000000000000..3279059acbe3e --- /dev/null +++ b/docs/changelog/103821.yaml @@ -0,0 +1,5 @@ +pr: 103821 +summary: "ESQL: Delay finding field load infrastructure" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/103846.yaml b/docs/changelog/103846.yaml new file mode 100644 index 0000000000000..0d34efabc0278 --- /dev/null +++ b/docs/changelog/103846.yaml @@ -0,0 +1,5 @@ +pr: 103846 +summary: Support sampling in `counted_terms` aggregation +area: Aggregations +type: enhancement +issues: [] diff --git a/docs/changelog/103865.yaml b/docs/changelog/103865.yaml new file mode 100644 index 0000000000000..5c9570f32c44e --- /dev/null +++ b/docs/changelog/103865.yaml @@ -0,0 +1,5 @@ +pr: 103865 +summary: Revert change +area: Mapping +type: bug +issues: [] diff --git a/docs/changelog/103873.yaml b/docs/changelog/103873.yaml new file mode 100644 index 0000000000000..937106043ecf4 --- /dev/null +++ b/docs/changelog/103873.yaml @@ -0,0 +1,5 @@ +pr: 103873 +summary: Catch exceptions during `pytorch_inference` startup +area: Machine Learning +type: bug +issues: [] diff --git a/docs/changelog/103898.yaml b/docs/changelog/103898.yaml new file mode 100644 index 0000000000000..73d89e49e8812 --- /dev/null +++ b/docs/changelog/103898.yaml @@ -0,0 +1,14 @@ +pr: 103898 +summary: Change `index.look_ahead_time` index setting's default value from 2 hours to 30 minutes. +area: TSDB +type: breaking +issues: [] +breaking: + title: Change `index.look_ahead_time` index setting's default value from 2 hours to 30 minutes. + area: Index setting + details: Lower the `index.look_ahead_time` index setting's max value from 2 hours to 30 minutes. + impact: > + Documents with @timestamp of 30 minutes or more in the future will be rejected. + Before documents with @timestamp of 2 hours or more in the future were rejected. + If the previous behaviour should be kept, then update the `index.look_ahead_time` setting to two hours before performing the upgrade. + notable: false diff --git a/docs/changelog/103903.yaml b/docs/changelog/103903.yaml new file mode 100644 index 0000000000000..c2e5e710ac439 --- /dev/null +++ b/docs/changelog/103903.yaml @@ -0,0 +1,5 @@ +pr: 103903 +summary: Account for reserved disk size +area: Allocation +type: enhancement +issues: [] diff --git a/docs/changelog/103920.yaml b/docs/changelog/103920.yaml new file mode 100644 index 0000000000000..c4a0d3b06fc82 --- /dev/null +++ b/docs/changelog/103920.yaml @@ -0,0 +1,5 @@ +pr: 103920 +summary: Use search to determine if cluster contains data +area: Application +type: bug +issues: [] diff --git a/docs/changelog/103922.yaml b/docs/changelog/103922.yaml new file mode 100644 index 0000000000000..4181a6e6b1e8a --- /dev/null +++ b/docs/changelog/103922.yaml @@ -0,0 +1,5 @@ +pr: 103922 +summary: Always test for spikes and dips as well as changes in the change point aggregation +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/103923.yaml b/docs/changelog/103923.yaml new file mode 100644 index 0000000000000..80e6880909f3a --- /dev/null +++ b/docs/changelog/103923.yaml @@ -0,0 +1,5 @@ +pr: 103923 +summary: Preserve response headers in Datafeed preview +area: Machine Learning +type: bug +issues: [] diff --git a/docs/changelog/103928.yaml b/docs/changelog/103928.yaml new file mode 100644 index 0000000000000..a9e60ba33a686 --- /dev/null +++ b/docs/changelog/103928.yaml @@ -0,0 +1,5 @@ +pr: 103928 +summary: "ESQL: `MV_FIRST` and `MV_LAST`" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/103948.yaml b/docs/changelog/103948.yaml new file mode 100644 index 0000000000000..3247183fc97bb --- /dev/null +++ b/docs/changelog/103948.yaml @@ -0,0 +1,6 @@ +pr: 103948 +summary: '''elasticsearch-certutil cert'' now verifies the issuing chain of the generated + certificate' +area: TLS +type: enhancement +issues: [] diff --git a/docs/changelog/103996.yaml b/docs/changelog/103996.yaml new file mode 100644 index 0000000000000..699b93fff4f03 --- /dev/null +++ b/docs/changelog/103996.yaml @@ -0,0 +1,5 @@ +pr: 103996 +summary: Ensure unique IDs between inference models and trained model deployments +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/104029.yaml b/docs/changelog/104029.yaml new file mode 100644 index 0000000000000..2b74d3b634dba --- /dev/null +++ b/docs/changelog/104029.yaml @@ -0,0 +1,5 @@ +pr: 104029 +summary: '`AsyncOperator#isFinished` must never return true on failure' +area: ES|QL +type: bug +issues: [] diff --git a/docs/changelog/104030.yaml b/docs/changelog/104030.yaml new file mode 100644 index 0000000000000..8fe30e6258653 --- /dev/null +++ b/docs/changelog/104030.yaml @@ -0,0 +1,5 @@ +pr: 104030 +summary: Add the possibility to transform WKT to WKB directly +area: Geo +type: enhancement +issues: [] diff --git a/docs/changelog/104046.yaml b/docs/changelog/104046.yaml new file mode 100644 index 0000000000000..9b383611b560a --- /dev/null +++ b/docs/changelog/104046.yaml @@ -0,0 +1,5 @@ +pr: 104046 +summary: "ESQL: Update the use of some user-caused exceptions" +area: ES|QL +type: bug +issues: [] diff --git a/docs/changelog/104051.yaml b/docs/changelog/104051.yaml new file mode 100644 index 0000000000000..1aa6d69f5ae20 --- /dev/null +++ b/docs/changelog/104051.yaml @@ -0,0 +1,6 @@ +pr: 104051 +summary: Fix NPE that is thrown by `_update` API +area: Transform +type: bug +issues: + - 104048 diff --git a/docs/changelog/104063.yaml b/docs/changelog/104063.yaml new file mode 100644 index 0000000000000..5f59022472c75 --- /dev/null +++ b/docs/changelog/104063.yaml @@ -0,0 +1,5 @@ +pr: 104063 +summary: Add serverless scopes for Connector APIs +area: Application +type: enhancement +issues: [] diff --git a/docs/changelog/104077.yaml b/docs/changelog/104077.yaml new file mode 100644 index 0000000000000..7550e7388a29d --- /dev/null +++ b/docs/changelog/104077.yaml @@ -0,0 +1,5 @@ +pr: 104077 +summary: Retry updates to model snapshot ID on job config +area: Machine Learning +type: bug +issues: [] diff --git a/docs/changelog/104118.yaml b/docs/changelog/104118.yaml new file mode 100644 index 0000000000000..f5afb199bc5eb --- /dev/null +++ b/docs/changelog/104118.yaml @@ -0,0 +1,6 @@ +pr: 104118 +summary: "ESQL: add `date_diff` function" +area: ES|QL +type: enhancement +issues: + - 101942 diff --git a/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc index ce3d0a367dc4e..3efb8f6de9b3e 100644 --- a/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc @@ -45,6 +45,13 @@ Use `synonyms_set` configuration option to provide a synonym set created via Syn } ---- +[WARNING] +====== +Synonyms sets must exist before they can be added to indices. +If an index is created referencing a nonexistent synonyms set, the index will remain in a partially created and inoperable state. +The only way to recover from this scenario is to ensure the synonyms set exists then either delete and re-create the index, or close and re-open the index. +====== + Use `synonyms_path` to provide a synonym file : [source,JSON] diff --git a/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc index ce055d38092ff..046cd297b5092 100644 --- a/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc @@ -33,6 +33,13 @@ Use `synonyms_set` configuration option to provide a synonym set created via Syn } ---- +[WARNING] +====== +Synonyms sets must exist before they can be added to indices. +If an index is created referencing a nonexistent synonyms set, the index will remain in a partially created and inoperable state. +The only way to recover from this scenario is to ensure the synonyms set exists then either delete and re-create the index, or close and re-open the index. +====== + Use `synonyms_path` to provide a synonym file : [source,JSON] diff --git a/docs/reference/connector/apis/check-in-connector-api.asciidoc b/docs/reference/connector/apis/check-in-connector-api.asciidoc new file mode 100644 index 0000000000000..c0c021f1304dc --- /dev/null +++ b/docs/reference/connector/apis/check-in-connector-api.asciidoc @@ -0,0 +1,76 @@ +[[check-in-connector-api]] +=== Check in connector API + +preview::[] + +++++ +Check in a connector +++++ + +Updates the `last_seen` field of a connector with current timestamp. + +[[check-in-connector-api-request]] +==== {api-request-title} + +`PUT _connector//_check_in` + +[[check-in-connector-api-prereq]] +==== {api-prereq-title} + +* To sync data using connectors, it's essential to have the Elastic connectors service running. +* The `connector_id` parameter should reference an existing connector. + +[[check-in-connector-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) + + +[[check-in-connector-api-response-codes]] +==== {api-response-codes-title} + +`200`:: +Connector `last_seen` field was successfully updated with a current timestamp. + +`400`:: +The `connector_id` was not provided. + +`404` (Missing resources):: +No connector matching `connector_id` could be found. + +[[check-in-connector-api-example]] +==== {api-examples-title} + +The following example updates the `last_seen` property with current timestamp for the connector with ID `my-connector`: + +//// +[source, console] +-------------------------------------------------- +PUT _connector/my-connector +{ + "index_name": "search-google-drive", + "name": "My Connector", + "service_type": "google_drive" +} +-------------------------------------------------- +// TESTSETUP + +[source,console] +-------------------------------------------------- +DELETE _connector/my-connector +-------------------------------------------------- +// TEARDOWN +//// + +[source,console] +---- +PUT _connector/my-connector/_check_in +---- + +[source,console-result] +---- +{ + "result": "updated" +} +---- diff --git a/docs/reference/connector/apis/connector-apis.asciidoc b/docs/reference/connector/apis/connector-apis.asciidoc index 0380682677340..e127dc07446b5 100644 --- a/docs/reference/connector/apis/connector-apis.asciidoc +++ b/docs/reference/connector/apis/connector-apis.asciidoc @@ -26,6 +26,14 @@ Use the following APIs to manage connectors: * <> * <> * <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> [discrete] @@ -40,12 +48,15 @@ Use the following APIs to manage sync jobs: * <> * <> * <> -* < +* <> * <> * <> +* <> +* <> include::cancel-connector-sync-job-api.asciidoc[] +include::check-in-connector-api.asciidoc[] include::check-in-connector-sync-job-api.asciidoc[] include::create-connector-api.asciidoc[] include::create-connector-sync-job-api.asciidoc[] @@ -55,3 +66,12 @@ include::get-connector-api.asciidoc[] include::get-connector-sync-job-api.asciidoc[] include::list-connectors-api.asciidoc[] include::list-connector-sync-jobs-api.asciidoc[] +include::set-connector-sync-job-error-api.asciidoc[] +include::set-connector-sync-job-stats-api.asciidoc[] +include::update-connector-configuration-api.asciidoc[] +include::update-connector-error-api.asciidoc[] +include::update-connector-filtering-api.asciidoc[] +include::update-connector-last-sync-api.asciidoc[] +include::update-connector-name-description-api.asciidoc[] +include::update-connector-pipeline-api.asciidoc[] +include::update-connector-scheduling-api.asciidoc[] diff --git a/docs/reference/connector/apis/delete-connector-api.asciidoc b/docs/reference/connector/apis/delete-connector-api.asciidoc index 2bda7da72cb72..6d3a120df785a 100644 --- a/docs/reference/connector/apis/delete-connector-api.asciidoc +++ b/docs/reference/connector/apis/delete-connector-api.asciidoc @@ -19,6 +19,7 @@ This is a destructive action that is not recoverable. ==== {api-prereq-title} * To sync data using connectors, it's essential to have the Elastic connectors service running. +* The `connector_id` parameter should reference an existing connector. [[delete-connector-api-path-params]] ==== {api-path-parms-title} diff --git a/docs/reference/connector/apis/set-connector-sync-job-error-api.asciidoc b/docs/reference/connector/apis/set-connector-sync-job-error-api.asciidoc new file mode 100644 index 0000000000000..935fcccc77fcf --- /dev/null +++ b/docs/reference/connector/apis/set-connector-sync-job-error-api.asciidoc @@ -0,0 +1,58 @@ +[[set-connector-sync-job-error-api]] +=== Set connector sync job error API +++++ +Set connector sync job error +++++ + +Sets a connector sync job error. + +[[set-connector-sync-job-error-api-request]] +==== {api-request-title} +`PUT _connector/_sync_job//_error` + +[[set-connector-sync-job-error-api-prereqs]] +==== {api-prereq-title} + +* To sync data using connectors, it's essential to have the Elastic connectors service running. +* The `connector_sync_job_id` parameter should reference an existing connector sync job. + +[[set-connector-sync-job-error-api-desc]] +==== {api-description-title} + +Sets the `error` field for the specified connector sync job and sets its `status` to `error`. + +[[set-connector-sync-job-error-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) + +[role="child_attributes"] +[[set-connector-sync-job-error-api-request-body]] +==== {api-request-body-title} + +`error`:: +(Required, string) The error to set the connector sync job `error` field to. + +[[set-connector-sync-job-api-response-codes]] +==== {api-response-codes-title} + +`200`:: +Indicates that the connector sync job error was set successfully. + +`404`:: +No connector sync job matching `connector_sync_job_id` could be found. + +[[set-connector-sync-job-error-api-example]] +==== {api-examples-title} + +The following example sets the error `some-error` in the connector sync job `my-connector-sync-job`: + +[source,console] +---- +PUT _connector/_sync_job/my-connector-sync-job/_error +{ + "error": "some-error" +} +---- +// TEST[skip:there's no way to clean up after creating a connector sync job, as we don't know the id ahead of time. Therefore, skip this test.] diff --git a/docs/reference/connector/apis/set-connector-sync-job-stats-api.asciidoc b/docs/reference/connector/apis/set-connector-sync-job-stats-api.asciidoc new file mode 100644 index 0000000000000..0513155312bb4 --- /dev/null +++ b/docs/reference/connector/apis/set-connector-sync-job-stats-api.asciidoc @@ -0,0 +1,77 @@ +[[set-connector-sync-job-stats-api]] +=== Set connector sync job stats API +++++ +Set connector sync job stats +++++ + +Sets connector sync job stats. + +[[set-connector-sync-job-stats-api-request]] +==== {api-request-title} +`PUT _connector/_sync_job//_stats` + +[[set-connector-sync-job-stats-api-prereqs]] +==== {api-prereq-title} + +* To sync data using connectors, it's essential to have the Elastic connectors service running. +* The `connector_sync_job_id` parameter should reference an existing connector sync job. + +[[set-connector-sync-job-stats-api-desc]] +==== {api-description-title} + +Sets the stats for a connector sync job. +Stats include: `deleted_document_count`, `indexed_document_count`, `indexed_document_volume` and `total_document_count`. +`last_seen` can also be updated using this API. +This API is mainly used by the connector service for updating sync job information. + +[[set-connector-sync-job-stats-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) + +[role="child_attributes"] +[[set-connector-sync-job-stats-api-request-body]] +==== {api-request-body-title} + +`deleted_document_count`:: +(Required, int) The number of documents the sync job deleted. + +`indexed_document_count`:: +(Required, int) The number of documents the sync job indexed. + +`indexed_document_volume`:: +(Required, int) The total size of the data (in MiB) the sync job indexed. + +`total_document_count`:: +(Optional, int) The total number of documents in the target index after the sync job finished. + +`last_seen`:: +(Optional, instant) The timestamp to set the connector sync job's `last_seen` property. + +[[set-connector-sync-job-stats-api-response-codes]] +==== {api-response-codes-title} + +`200`:: +Indicates that the connector sync job stats were successfully updated. + +`404`:: +No connector sync job matching `connector_sync_job_id` could be found. + +[[set-connector-sync-job-stats-api-example]] +==== {api-examples-title} + +The following example sets all mandatory and optional stats for the connector sync job `my-connector-sync-job`: + +[source,console] +---- +PUT _connector/_sync_job/my-connector-sync-job/_stats +{ + "deleted_document_count": 10, + "indexed_document_count": 20, + "indexed_document_volume": 1000, + "total_document_count": 2000, + "last_seen": "2023-01-02T10:00:00Z" +} +---- +// TEST[skip:there's no way to clean up after creating a connector sync job, as we don't know the id ahead of time. Therefore, skip this test.] diff --git a/docs/reference/connector/apis/update-connector-configuration-api.asciidoc b/docs/reference/connector/apis/update-connector-configuration-api.asciidoc new file mode 100644 index 0000000000000..6d6591a6f00bc --- /dev/null +++ b/docs/reference/connector/apis/update-connector-configuration-api.asciidoc @@ -0,0 +1,154 @@ +[[update-connector-configuration-api]] +=== Update connector configuration API + +preview::[] + +++++ +Update connector configuration +++++ + +Updates the `configuration` of a connector. + + +[[update-connector-configuration-api-request]] +==== {api-request-title} + +`PUT _connector//_configuration` + +[[update-connector-configuration-api-prereq]] +==== {api-prereq-title} + +* To sync data using connectors, it's essential to have the Elastic connectors service running. +* The `connector_id` parameter should reference an existing connector. +* The configuration fields definition must be compatible with the specific connector type being used. + +[[update-connector-configuration-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) + +[role="child_attributes"] +[[update-connector-configuration-api-request-body]] +==== {api-request-body-title} + +`configuration`:: +(Required, object) The configuration for the connector. The configuration field is a map where each key represents a specific configuration field name, and the value is a `ConnectorConfiguration` object. + +Each `ConnectorConfiguration` object contains the following attributes: + +* `category` (Optional, string) The category of the configuration field. This helps in grouping related configurations together in the user interface. + +* `default_value` (Required, string | number | bool) The default value for the configuration. This value is used if the value field is empty, applicable only for non-required fields. + +* `depends_on` (Required, array of `ConfigurationDependency`) An array of dependencies on other configurations. A field will not be enabled unless these dependencies are met. Each dependency specifies a field key and the required value for the dependency to be considered fulfilled. + +* `display` (Required, string) The display type for the UI element that represents this configuration. This defines how the field should be rendered in the user interface. Supported types are: `text`, `textbox`, `textarea`, `numeric`, `toggle` and `dropdown`. + +* `label` (Required, string) The display label for the configuration field. This label is shown in the user interface, adjacent to the field. + +* `options` (Required, array of `ConfigurationSelectOption`) An array of options for list-type fields. These options are used for inputs in the user interface, each having a label for display and a value. + +* `order` (Required, number) The order in which this configuration appears in the user interface. This helps in organizing fields logically. + +* `placeholder` (Required, string) Placeholder text for the configuration field. This text is displayed inside the field before a value is entered. + +* `required` (Required, boolean) Indicates whether the configuration is mandatory. If true, a value must be provided for the field. + +* `sensitive` (Required, boolean) Indicates whether the configuration contains sensitive information. Sensitive fields may be obfuscated in the user interface. + +* `tooltip` (Optional, string) Tooltip text providing additional information about the configuration. This text appears when the user hovers over the info icon next to the configuration field. + +* `type` (Required, string) The type of the configuration field, such as `str`, `int`, `bool`, `list`. This defines the data type and format of the field's value. + +* `ui_restrictions` (Required, array of strings) A list of UI restrictions. These restrictions define where in the user interface this field should be available or restricted. + +* `validations` (Required, array of `ConfigurationValidation`) An array of rules for validating the field's value. Each validation specifies a type and a constraint that the field's value must meet. + +* `value` (Required, string | number | bool) The current value of the configuration. This is the actual value set for the field and is used by the connector during its operations. + +`ConfigurationDependency` represents a dependency that a configuration field has on another field's value. It contains the following attributes: + +* `field` (Required, string) The name of the field in the configuration that this dependency relates to. + +* `value` (Required, string | number | bool) The required value of the specified field for this dependency to be met. + +`ConfigurationSelectOption` defines an option within a selectable configuration field. It contains the following attributes: + +* `label` (Required, string) The display label for the option. + +* `value` (Required, string) The actual value associated with the option. + +`ConfigurationValidation` specifies validation rules for configuration fields. Each ConfigurationValidation instance enforces a specific type of validation based on its type and constraint. It contains the following attributes: + +* `constraint` (Required, string | number) The validation constraint. The nature of this constraint depends on the validation type. It could be a numeric value, a list, a regular expression pattern. + +* `type` (Required, ConfigurationValidationType) The type of validation to be performed. Possible values include: `less_than`, `greater_than`, `list_type`, `included_in`, `regex` and `unset`. + + +[[update-connector-configuration-api-response-codes]] +==== {api-response-codes-title} + +`200`:: +Connector configuration was successfully updated. + +`400`:: +The `connector_id` was not provided or the request payload was malformed. + +`404` (Missing resources):: +No connector matching `connector_id` could be found. + +[[update-connector-configuration-api-example]] +==== {api-examples-title} + +The following example updates the `configuration` for the connector with ID `my-connector`: + +//// +[source, console] +-------------------------------------------------- +PUT _connector/my-connector +{ + "index_name": "search-google-drive", + "name": "My Connector", + "service_type": "google_drive" +} +-------------------------------------------------- +// TESTSETUP + +[source,console] +-------------------------------------------------- +DELETE _connector/my-connector +-------------------------------------------------- +// TEARDOWN +//// + +[source,console] +---- +PUT _connector/my-connector/_configuration +{ + "configuration": { + "service_account_credentials": { + "default_value": null, + "depends_on": [], + "display": "textarea", + "label": "Google Drive service account JSON", + "options": [], + "order": 1, + "required": true, + "sensitive": true, + "tooltip": "This connectors authenticates as a service account to synchronize content from Google Drive.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": "...service account JSON..." + } + } +} +---- + +[source,console-result] +---- +{ + "result": "updated" +} +---- diff --git a/docs/reference/connector/apis/update-connector-error-api.asciidoc b/docs/reference/connector/apis/update-connector-error-api.asciidoc new file mode 100644 index 0000000000000..19bc15f0dc60a --- /dev/null +++ b/docs/reference/connector/apis/update-connector-error-api.asciidoc @@ -0,0 +1,86 @@ +[[update-connector-error-api]] +=== Update connector error API + +preview::[] + +++++ +Update connector error +++++ + +Updates the `error` field of a connector. + +[[update-connector-error-api-request]] +==== {api-request-title} + +`PUT _connector//_error` + +[[update-connector-error-api-prereq]] +==== {api-prereq-title} + +* To sync data using connectors, it's essential to have the Elastic connectors service running. +* The `connector_id` parameter should reference an existing connector. + +[[update-connector-error-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) + +[role="child_attributes"] +[[update-connector-error-api-request-body]] +==== {api-request-body-title} + +`error`:: +(Required, string) A messaged related to the last error encountered by the connector. + + +[[update-connector-error-api-response-codes]] +==== {api-response-codes-title} + +`200`:: +Connector `error` field was successfully updated. + +`400`:: +The `connector_id` was not provided or the request payload was malformed. + +`404` (Missing resources):: +No connector matching `connector_id` could be found. + +[[update-connector-error-api-example]] +==== {api-examples-title} + +The following example updates the `error` field for the connector with ID `my-connector`: + +//// +[source, console] +-------------------------------------------------- +PUT _connector/my-connector +{ + "index_name": "search-google-drive", + "name": "My Connector", + "service_type": "google_drive" +} +-------------------------------------------------- +// TESTSETUP + +[source,console] +-------------------------------------------------- +DELETE _connector/my-connector +-------------------------------------------------- +// TEARDOWN +//// + +[source,console] +---- +PUT _connector/my-connector/_error +{ + "error": "Houston, we have a problem!" +} +---- + +[source,console-result] +---- +{ + "result": "updated" +} +---- diff --git a/docs/reference/connector/apis/update-connector-filtering-api.asciidoc b/docs/reference/connector/apis/update-connector-filtering-api.asciidoc new file mode 100644 index 0000000000000..d4c7bb16a3304 --- /dev/null +++ b/docs/reference/connector/apis/update-connector-filtering-api.asciidoc @@ -0,0 +1,186 @@ +[[update-connector-filtering-api]] +=== Update connector filtering API + +preview::[] + +++++ +Update connector filtering +++++ + +Updates the `filtering` configuration of a connector. Learn more about filtering in the {enterprise-search-ref}/sync-rules.html[sync rules] documentation. + +[[update-connector-filtering-api-request]] +==== {api-request-title} + +`PUT _connector//_filtering` + +[[update-connector-filtering-api-prereq]] +==== {api-prereq-title} + +* To sync data using connectors, it's essential to have the Elastic connectors service running. +* The `connector_id` parameter should reference an existing connector. + +[[update-connector-filtering-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) + +[role="child_attributes"] +[[update-connector-filtering-api-request-body]] +==== {api-request-body-title} + +`filtering`:: +(Required, array) The filtering configuration for the connector. This configuration determines the set of rules applied for filtering data during syncs. + +Each entry in the `filtering` array represents a set of filtering rules for a specific data domain and includes the following attributes: + +- `domain` (Required, string) + +Specifies the data domain to which these filtering rules apply. + +- `active` (Required, object) + +Contains the set of rules that are actively used for sync jobs. The `active` object includes: + + * `rules` (Required, array of objects) + + An array of individual filtering rule objects, each with the following sub-attributes: + ** `id` (Required, string) + + A unique identifier for the rule. + ** `policy` (Required, string) + + Specifies the policy, such as "include" or "exclude". + ** `field` (Required, string) + + The field in the document to which this rule applies. + ** `rule` (Required, string) + + The type of rule, such as "regex", "starts_with", "ends_with", "contains", "equals", "<", ">", etc. + ** `value` (Required, string) + + The value to be used in conjunction with the rule for matching the contents of the document's field. + ** `order` (Required, number) + + The order in which the rules are applied. The first rule to match has its policy applied. + ** `created_at` (Optional, datetime) + + The timestamp when the rule was added. + ** `updated_at` (Optional, datetime) + + The timestamp when the rule was last edited. + + * `advanced_snippet` (Optional, object) + + Used for {enterprise-search-ref}/sync-rules.html#sync-rules-advanced[advanced filtering] at query time, with the following sub-attributes: + ** `value` (Required, object) + + A JSON object passed directly to the connector for advanced filtering. + ** `created_at` (Optional, datetime) + + The timestamp when this JSON object was created. + ** `updated_at` (Optional, datetime) + + The timestamp when this JSON object was last edited. + + * `validation` (Optional, object) + + Provides validation status for the rules, including: + ** `state` (Required, string) + + Indicates the validation state: "edited", "valid", or "invalid". + ** `errors` (Optional, object) + + Contains details about any validation errors, with sub-attributes: + *** `ids` (Required, string) + + The ID(s) of any rules deemed invalid. + *** `messages` (Required, string) + + Messages explaining what is invalid about the rules. + +- `draft` (Optional, object) + +An object identical in structure to the `active` object, but used for drafting and editing filtering rules before they become active. + + +[[update-connector-filtering-api-response-codes]] +==== {api-response-codes-title} + +`200`:: +Connector `filtering` field was successfully updated. + +`400`:: +The `connector_id` was not provided or the request payload was malformed. + +`404` (Missing resources):: +No connector matching `connector_id` could be found. + +[[update-connector-filtering-api-example]] +==== {api-examples-title} + +The following example updates the `filtering` property for the connector with ID `my-connector`: + +//// +[source, console] +-------------------------------------------------- +PUT _connector/my-connector +{ + "index_name": "search-google-drive", + "name": "My Connector", + "service_type": "google_drive" +} +-------------------------------------------------- +// TESTSETUP + +[source,console] +-------------------------------------------------- +DELETE _connector/my-connector +-------------------------------------------------- +// TEARDOWN +//// + +[source,console] +---- +PUT _connector/my-connector/_filtering +{ + "filtering": [ + { + "active": { + "advanced_snippet": { + "created_at": "2023-11-09T15:13:08.231Z", + "updated_at": "2023-11-09T15:13:08.231Z", + "value": {} + }, + "rules": [ + { + "created_at": "2023-11-09T15:13:08.231Z", + "field": "_", + "id": "DEFAULT", + "order": 0, + "policy": "include", + "rule": "regex", + "updated_at": "2023-11-09T15:13:08.231Z", + "value": ".*" + } + ], + "validation": { + "errors": [], + "state": "valid" + } + }, + "domain": "DEFAULT", + "draft": { + "advanced_snippet": { + "created_at": "2023-11-09T15:13:08.231Z", + "updated_at": "2023-11-09T15:13:08.231Z", + "value": {} + }, + "rules": [ + { + "created_at": "2023-11-09T15:13:08.231Z", + "field": "_", + "id": "DEFAULT", + "order": 0, + "policy": "include", + "rule": "regex", + "updated_at": "2023-11-09T15:13:08.231Z", + "value": ".*" + } + ], + "validation": { + "errors": [], + "state": "valid" + } + } + } + ] +} +---- + +[source,console-result] +---- +{ + "result": "updated" +} +---- diff --git a/docs/reference/connector/apis/update-connector-last-sync-api.asciidoc b/docs/reference/connector/apis/update-connector-last-sync-api.asciidoc new file mode 100644 index 0000000000000..e9fffd22b21cd --- /dev/null +++ b/docs/reference/connector/apis/update-connector-last-sync-api.asciidoc @@ -0,0 +1,135 @@ +[[update-connector-last-sync-api]] +=== Update connector last sync stats API + +preview::[] + +++++ +Update connector last sync stats +++++ + +Updates the fields related to the last sync of a connector. + +This action is used for analytics and monitoring. + +[[update-connector-last-sync-api-request]] +==== {api-request-title} + +`PUT _connector//_last_sync` + +[[update-connector-last-sync-api-prereq]] +==== {api-prereq-title} + +* To sync data using connectors, it's essential to have the Elastic connectors service running. +* The `connector_id` parameter should reference an existing connector. + +[[update-connector-last-sync-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) + +[role="child_attributes"] +[[update-connector-last-sync-api-request-body]] +==== {api-request-body-title} + +`last_access_control_sync_error`:: +(Optional, string) The last error message related to access control sync, if any. + +`last_access_control_sync_scheduled_at`:: +(Optional, datetime) The datetime indicating when the last access control sync was scheduled. + +`last_access_control_sync_status`:: +(Optional, ConnectorSyncStatus) The status of the last access control sync. + +`last_deleted_document_count`:: +(Optional, long) The number of documents deleted in the last sync process. + +`last_incremental_sync_scheduled_at`:: +(Optional, datetime) The datetime when the last incremental sync was scheduled. + +`last_indexed_document_count`:: +(Optional, long) The number of documents indexed in the last sync. + +`last_sync_error`:: +(Optional, string) The last error message encountered during a sync process, if any. + +`last_sync_scheduled_at`:: +(Optional, datetime) The datetime when the last sync was scheduled. + +`last_sync_status`:: +(Optional, ConnectorSyncStatus) The status of the last sync. + +`last_synced`:: +(Optional, datetime) The datetime of the last successful synchronization. + + +The value of `ConnectorSyncStatus` is one of the following lowercase strings representing different sync states: + +* `canceling`: The sync process is in the process of being canceled. +* `canceled`: The sync process has been canceled. +* `completed`: The sync process completed successfully. +* `error`: An error occurred during the sync process. +* `in_progress`: The sync process is currently underway. +* `pending`: The sync is pending and has not yet started. +* `suspended`: The sync process has been temporarily suspended. + + +[[update-connector-last-sync-api-response-codes]] +==== {api-response-codes-title} + +`200`:: +Connector last sync stats were successfully updated. + +`400`:: +The `connector_id` was not provided or the request payload was malformed. + +`404` (Missing resources):: +No connector matching `connector_id` could be found. + +[[update-connector-last-sync-api-example]] +==== {api-examples-title} + +The following example updates the last sync stats for the connector with ID `my-connector`: + +//// +[source, console] +-------------------------------------------------- +PUT _connector/my-connector +{ + "index_name": "search-google-drive", + "name": "My Connector", + "service_type": "google_drive" +} +-------------------------------------------------- +// TESTSETUP + +[source,console] +-------------------------------------------------- +DELETE _connector/my-connector +-------------------------------------------------- +// TEARDOWN +//// + +[source,console] +---- +PUT _connector/my-connector/_last_sync +{ + "last_access_control_sync_error": "Houston, we have a problem!", + "last_access_control_sync_scheduled_at": "2023-11-09T15:13:08.231Z", + "last_access_control_sync_status": "pending", + "last_deleted_document_count": 42, + "last_incremental_sync_scheduled_at": "2023-11-09T15:13:08.231Z", + "last_indexed_document_count": 42, + "last_sync_error": "Houston, we have a problem!", + "last_sync_scheduled_at": "2024-11-09T15:13:08.231Z", + "last_sync_status": "completed", + "last_synced": "2024-11-09T15:13:08.231Z" +} +---- + +[source,console-result] +---- +{ + "result": "updated" +} +---- diff --git a/docs/reference/connector/apis/update-connector-name-description-api.asciidoc b/docs/reference/connector/apis/update-connector-name-description-api.asciidoc new file mode 100644 index 0000000000000..d45fb545e168b --- /dev/null +++ b/docs/reference/connector/apis/update-connector-name-description-api.asciidoc @@ -0,0 +1,90 @@ +[[update-connector-name-description-api]] +=== Update connector name and description API + +preview::[] + +++++ +Update connector name and description +++++ + +Updates the `name` and `description` fields of a connector. + +[[update-connector-name-description-api-request]] +==== {api-request-title} + +`PUT _connector//_name` + +[[update-connector-name-description-api-prereq]] +==== {api-prereq-title} + +* To sync data using connectors, it's essential to have the Elastic connectors service running. +* The `connector_id` parameter should reference an existing connector. + +[[update-connector-name-description-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) + +[role="child_attributes"] +[[update-connector-name-description-api-request-body]] +==== {api-request-body-title} + +`name`:: +(Required, string) Name of the connector. + +`description`:: +(Optional, string) Description of the connector. + + +[[update-connector-name-description-api-response-codes]] +==== {api-response-codes-title} + +`200`:: +Connector `name` and `description` fields were successfully updated. + +`400`:: +The `connector_id` was not provided or the request payload was malformed. + +`404` (Missing resources):: +No connector matching `connector_id` could be found. + +[[update-connector-name-description-api-example]] +==== {api-examples-title} + +The following example updates the `name` and `description` fields for the connector with ID `my-connector`: + +//// +[source, console] +-------------------------------------------------- +PUT _connector/my-connector +{ + "index_name": "search-google-drive", + "name": "My Connector", + "service_type": "google_drive" +} +-------------------------------------------------- +// TESTSETUP + +[source,console] +-------------------------------------------------- +DELETE _connector/my-connector +-------------------------------------------------- +// TEARDOWN +//// + +[source,console] +---- +PUT _connector/my-connector/_name +{ + "name": "Custom connector", + "description": "This is my customized connector" +} +---- + +[source,console-result] +---- +{ + "result": "updated" +} +---- diff --git a/docs/reference/connector/apis/update-connector-pipeline-api.asciidoc b/docs/reference/connector/apis/update-connector-pipeline-api.asciidoc new file mode 100644 index 0000000000000..6938506703da8 --- /dev/null +++ b/docs/reference/connector/apis/update-connector-pipeline-api.asciidoc @@ -0,0 +1,103 @@ +[[update-connector-pipeline-api]] +=== Update connector pipeline API + +preview::[] + +++++ +Update connector pipeline +++++ + +Updates the `pipeline` configuration of a connector. + +When you create a new connector, the configuration of an <> is populated with default settings. + +[[update-connector-pipeline-api-request]] +==== {api-request-title} + +`PUT _connector//_pipeline` + +[[update-connector-pipeline-api-prereq]] +==== {api-prereq-title} + +* To sync data using connectors, it's essential to have the Elastic connectors service running. +* The `connector_id` parameter should reference an existing connector. + +[[update-connector-pipeline-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) + +[role="child_attributes"] +[[update-connector-pipeline-api-request-body]] +==== {api-request-body-title} + +`pipeline`:: +(Required, object) The pipeline configuration of the connector. The pipeline determines how data is processed during ingestion into Elasticsearch. + +Pipeline configuration must include the following attributes: + +- `extract_binary_content` (Required, boolean) A flag indicating whether to extract binary content during ingestion. + +- `name` (Required, string) The name of the ingest pipeline. + +- `reduce_whitespace` (Required, boolean) A flag indicating whether to reduce extra whitespace in the ingested content. + +- `run_ml_inference` (Required, boolean) A flag indicating whether to run machine learning inference on the ingested content. + + +[[update-connector-pipeline-api-response-codes]] +==== {api-response-codes-title} + +`200`:: +Connector `pipeline` field was successfully updated. + +`400`:: +The `connector_id` was not provided or the request payload was malformed. + +`404` (Missing resources):: +No connector matching `connector_id` could be found. + +[[update-connector-pipeline-api-example]] +==== {api-examples-title} + +The following example updates the `pipeline` property for the connector with ID `my-connector`: + +//// +[source, console] +-------------------------------------------------- +PUT _connector/my-connector +{ + "index_name": "search-google-drive", + "name": "My Connector", + "service_type": "google_drive" +} +-------------------------------------------------- +// TESTSETUP + +[source,console] +-------------------------------------------------- +DELETE _connector/my-connector +-------------------------------------------------- +// TEARDOWN +//// + +[source,console] +---- +PUT _connector/my-connector/_pipeline +{ + "pipeline": { + "extract_binary_content": true, + "name": "my-connector-pipeline", + "reduce_whitespace": true, + "run_ml_inference": true + } +} +---- + +[source,console-result] +---- +{ + "result": "updated" +} +---- diff --git a/docs/reference/connector/apis/update-connector-scheduling-api.asciidoc b/docs/reference/connector/apis/update-connector-scheduling-api.asciidoc new file mode 100644 index 0000000000000..c47e6d4c0367b --- /dev/null +++ b/docs/reference/connector/apis/update-connector-scheduling-api.asciidoc @@ -0,0 +1,113 @@ +[[update-connector-scheduling-api]] +=== Update connector scheduling API + +preview::[] + +++++ +Update connector scheduling +++++ + +Updates the `scheduling` configuration of a connector. + +[[update-connector-scheduling-api-request]] +==== {api-request-title} + +`PUT _connector//_scheduling` + +[[update-connector-scheduling-api-prereq]] +==== {api-prereq-title} + +* To sync data using connectors, it's essential to have the Elastic connectors service running. +* The `connector_id` parameter should reference an existing connector. + +[[update-connector-scheduling-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) + +[role="child_attributes"] +[[update-connector-scheduling-api-request-body]] +==== {api-request-body-title} + +`scheduling`:: +(Required, object) The scheduling configuration for the connector. This configuration determines frequency of synchronization operations for the connector. + +The scheduling configuration includes the following attributes, each represented as a `ScheduleConfig` object: + +- `access_control` (Required, `ScheduleConfig` object) Defines the schedule for synchronizing access control settings of the connector. + +- `full` (Required, `ScheduleConfig` object) Defines the schedule for a full content syncs. + +- `incremental` (Required, `ScheduleConfig` object) Defines the schedule for incremental content syncs. + +Each `ScheduleConfig` object includes the following sub-attributes: + + - `enabled` (Required, boolean) A flag that enables or disables the scheduling. + + - `interval` (Required, string) A CRON expression representing the sync schedule. This expression defines the grequency at which the sync operations should occur. It must be provided in a valid CRON format. + + +[[update-connector-scheduling-api-response-codes]] +==== {api-response-codes-title} + +`200`:: +Connector `scheduling` field was successfully updated. + +`400`:: +The `connector_id` was not provided or the request payload was malformed. + +`404` (Missing resources):: +No connector matching `connector_id` could be found. + +[[update-connector-scheduling-api-example]] +==== {api-examples-title} + +The following example updates the `scheduling` property for the connector with ID `my-connector`: + +//// +[source, console] +-------------------------------------------------- +PUT _connector/my-connector +{ + "index_name": "search-google-drive", + "name": "My Connector", + "service_type": "google_drive" +} +-------------------------------------------------- +// TESTSETUP + +[source,console] +-------------------------------------------------- +DELETE _connector/my-connector +-------------------------------------------------- +// TEARDOWN +//// + +[source,console] +---- +PUT _connector/my-connector/_scheduling +{ + "scheduling": { + "access_control": { + "enabled": true, + "interval": "0 10 0 * * ?" + }, + "full": { + "enabled": true, + "interval": "0 20 0 * * ?" + }, + "incremental": { + "enabled": false, + "interval": "0 30 0 * * ?" + } + } +} +---- + +[source,console-result] +---- +{ + "result": "updated" +} +---- diff --git a/docs/reference/data-streams/change-mappings-and-settings.asciidoc b/docs/reference/data-streams/change-mappings-and-settings.asciidoc index 86d72cf52c9e9..47c3529ceef40 100644 --- a/docs/reference/data-streams/change-mappings-and-settings.asciidoc +++ b/docs/reference/data-streams/change-mappings-and-settings.asciidoc @@ -592,7 +592,8 @@ stream's oldest backing index. "hidden": false, "system": false, "allow_custom_routing": false, - "replicated": false + "replicated": false, + "rollover_on_write": false } ] } diff --git a/docs/reference/data-streams/downsampling-ilm.asciidoc b/docs/reference/data-streams/downsampling-ilm.asciidoc index 94990040d79f1..79af7225ed1ad 100644 --- a/docs/reference/data-streams/downsampling-ilm.asciidoc +++ b/docs/reference/data-streams/downsampling-ilm.asciidoc @@ -326,6 +326,7 @@ following. Note the original `index_name`: `.ds-datastream--000001`. "system": false, "allow_custom_routing": false, "replicated": false, + "rollover_on_write": false, "time_series": { "temporal_ranges": [ { diff --git a/docs/reference/data-streams/downsampling-manual.asciidoc b/docs/reference/data-streams/downsampling-manual.asciidoc index 5bdfaf428d169..5e0c09f9d2be2 100644 --- a/docs/reference/data-streams/downsampling-manual.asciidoc +++ b/docs/reference/data-streams/downsampling-manual.asciidoc @@ -372,6 +372,7 @@ This returns: "system": false, "allow_custom_routing": false, "replicated": false, + "rollover_on_write": false, "time_series": { "temporal_ranges": [ { diff --git a/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc b/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc index aa598b010badc..3125c82120d8d 100644 --- a/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc +++ b/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc @@ -139,7 +139,8 @@ and that the next generation index will also be managed by {ilm-init}: "hidden": false, "system": false, "allow_custom_routing": false, - "replicated": false + "replicated": false, + "rollover_on_write": false } ] } @@ -275,7 +276,8 @@ GET _data_stream/dsl-data-stream "hidden": false, "system": false, "allow_custom_routing": false, - "replicated": false + "replicated": false, + "rollover_on_write": false } ] } @@ -352,7 +354,8 @@ GET _data_stream/dsl-data-stream "hidden": false, "system": false, "allow_custom_routing": false, - "replicated": false + "replicated": false, + "rollover_on_write": false } ] } @@ -449,7 +452,8 @@ GET _data_stream/dsl-data-stream "hidden": false, "system": false, "allow_custom_routing": false, - "replicated": false + "replicated": false, + "rollover_on_write": false } ] } diff --git a/docs/reference/data-streams/set-up-tsds.asciidoc b/docs/reference/data-streams/set-up-tsds.asciidoc index c175da2e991e9..ed6b79653e61f 100644 --- a/docs/reference/data-streams/set-up-tsds.asciidoc +++ b/docs/reference/data-streams/set-up-tsds.asciidoc @@ -176,9 +176,7 @@ PUT _component_template/my-weather-sensor-mappings Optionally, the index settings component template for a TSDS can include: * Your lifecycle policy in the `index.lifecycle.name` index setting. -* The <> index setting. -* The <> index setting. -* Other index settings, such as <>, for your TSDS's +* Other index settings, such as <>, for your TSDS's backing indices. IMPORTANT: Don't specify the `index.routing_path` index setting in a component @@ -191,8 +189,7 @@ PUT _component_template/my-weather-sensor-settings { "template": { "settings": { - "index.lifecycle.name": "my-lifecycle-policy", - "index.look_ahead_time": "3h" + "index.lifecycle.name": "my-lifecycle-policy" } }, "_meta": { diff --git a/docs/reference/data-streams/tsds-index-settings.asciidoc b/docs/reference/data-streams/tsds-index-settings.asciidoc index c0cae9e365114..98976231661ec 100644 --- a/docs/reference/data-streams/tsds-index-settings.asciidoc +++ b/docs/reference/data-streams/tsds-index-settings.asciidoc @@ -28,13 +28,13 @@ value (exclusive) accepted by the index. Only indices with an `index.mode` of `index.look_ahead_time`:: (<<_static_index_settings,Static>>, <>) Interval used to calculate the `index.time_series.end_time` for a TSDS's write -index. Defaults to `2h` (2 hours). Accepts `1m` (one minute) to `7d` (seven -days). Only indices with an `index.mode` of `time_series` support this setting. +index. Defaults to `2h` (2 hours). Accepts `1m` (one minute) to `2h` (two +hours). Only indices with an `index.mode` of `time_series` support this setting. For more information, refer to <>. Additionally this setting can not be less than `time_series.poll_interval` cluster setting. NOTE: Increasing the `look_ahead_time` will also increase the amount of time {ilm-cap} -waits before being able to proceed with executing the actions that expect the +waits before being able to proceed with executing the actions that expect the index to not receive any writes anymore. For more information, refer to <>. [[index-look-back-time]] diff --git a/docs/reference/data-streams/use-a-data-stream.asciidoc b/docs/reference/data-streams/use-a-data-stream.asciidoc index 7f5dda6b8a948..3167d768983a2 100644 --- a/docs/reference/data-streams/use-a-data-stream.asciidoc +++ b/docs/reference/data-streams/use-a-data-stream.asciidoc @@ -117,12 +117,24 @@ GET /_data_stream/my-data-stream/_stats?human=true === Manually roll over a data stream Use the <> to manually -<> a data stream: +<> a data stream. You have +two options when manually rolling over: +1. To immediately trigger a rollover: ++ [source,console] ---- POST /my-data-stream/_rollover/ ---- +2. Or to postpone the rollover until the next indexing event occurs: ++ +[source,console] +---- +POST /my-data-stream/_rollover?lazy +---- ++ +Use the second to avoid having empty backing indices in data streams +that do not get updated often. [discrete] [[open-closed-backing-indices]] diff --git a/docs/reference/esql/esql-apis.asciidoc b/docs/reference/esql/esql-apis.asciidoc new file mode 100644 index 0000000000000..686a71506bc14 --- /dev/null +++ b/docs/reference/esql/esql-apis.asciidoc @@ -0,0 +1,20 @@ +[[esql-apis]] +== {esql} APIs + +The {es} Query Language ({esql}) provides a powerful way to filter, transform, +and analyze data stored in {es}, and in the future in other runtimes. For an +overview of {esql} and related tutorials, see <>. + +* <> +* <> +* <> +* <> + + +include::esql-query-api.asciidoc[] + +include::esql-async-query-api.asciidoc[] + +include::esql-async-query-get-api.asciidoc[] + +include::esql-async-query-delete-api.asciidoc[] diff --git a/docs/reference/esql/esql-async-query-api.asciidoc b/docs/reference/esql/esql-async-query-api.asciidoc new file mode 100644 index 0000000000000..0a78a923523cc --- /dev/null +++ b/docs/reference/esql/esql-async-query-api.asciidoc @@ -0,0 +1,164 @@ +[[esql-async-query-api]] +=== {esql} async query API +++++ +{esql} async query API +++++ + +Runs an async <>. + +The async query API lets you asynchronously execute a query request, +monitor its progress, and retrieve results when they become available. + +The API accepts the same parameters and request body as the synchronous +<>, along with additional async related +properties as outlined below. + +[source,console] +---- +POST /_query/async +{ + "query": """ + FROM library + | EVAL year = DATE_TRUNC(1 YEARS, release_date) + | STATS MAX(page_count) BY year + | SORT year + | LIMIT 5 + """, + "wait_for_completion_timeout": "2s" +} +---- +// TEST[setup:library] + +If the results are not available within the given timeout period, 2 seconds +in this case, no results are returned but rather a response that +includes: + + * A query ID + * An `is_running` value of _true_, indicating the query is ongoing + +The query continues to run in the background without blocking other +requests. + +[source,console-result] +---- +{ + "id": "FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=", + "is_running": true +} +---- +// TEST[skip: no access to query ID - may return response values] + +Otherwise, if the response's `is_running` value is `false`, the async +query has finished and the results are returned. + +[source,console-result] +---- +{ + "is_running": false, + "columns": ... +} +---- +// TEST[skip: no access to query ID - may return response values] + +[[esql-async-query-api-request]] +==== {api-request-title} + +`POST /_query/async` + +[[esql-async-query-api-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have the `read` +<> for the data stream, index, +or alias you query. + +[[esql-async-query-api-path-params]] +==== {api-path-parms-title} + +The API accepts the same parameters as the synchronous +<>. + +[[esql-async-query-api-request-body]] +==== {api-request-body-title} + +The API accepts the same request body as the synchronous +<>, along with the following +parameters: + +[[esql-async-query-api-wait-for-completion-timeout]] +`wait_for_completion_timeout`:: ++ +-- +(Optional, <>) +Timeout duration to wait for the request to finish. Defaults to a 1 second, +meaning the request waits for 1 second for the query results. + +If this parameter is specified and the request completes during this period, +complete results are returned. + +If the request does not complete during this period, a query +<> is returned. +-- + +[[esql-async-query-api-keep-on-completion]] +`keep_on_completion`:: ++ +-- +(Optional, Boolean) +If `true`, the query and its results are stored in the cluster. + +If `false`, the query and its results are stored in the cluster only if the +request does not complete during the period set by the +<> +parameter. Defaults to `false`. +-- + +`keep_alive`:: ++ +-- +(Optional, <>) +Period for which the query and its results are stored in the cluster. Defaults +to `5d` (five days). + +When this period expires, the query and its results are deleted, even if the +query is still ongoing. + +If the <> parameter +is `false`, {es} only stores async queries that do not complete within the period +set by the <> +parameter, regardless of this value. +-- + +[[esql-async-query-api-response-body]] +==== {api-response-body-title} + +The API returns the same response body as the synchronous +<>, along with the following +properties: + +[[esql-async-query-api-response-body-query-id]] +`id`:: ++ +-- +(string) +Identifier for the query. + +This query ID is only provided if one of the following conditions is met: + +* A query request does not return complete results during the +<> +parameter's timeout period. + +* The query request's <> +parameter is `true`. + +You can use this ID with the <> to get the current status and available results for the query. +-- + +`is_running`:: ++ +-- +(Boolean) +If `true`, the query request is still executing. +-- diff --git a/docs/reference/esql/esql-async-query-delete-api.asciidoc b/docs/reference/esql/esql-async-query-delete-api.asciidoc new file mode 100644 index 0000000000000..90f8c06b9124a --- /dev/null +++ b/docs/reference/esql/esql-async-query-delete-api.asciidoc @@ -0,0 +1,42 @@ +[[esql-async-query-delete-api]] +=== {esql} async query delete API +++++ +{esql} async query delete API +++++ + +The {esql} async query delete API is used to manually delete an async query +by ID. If the query is still running, the query will be cancelled. Otherwise, +the stored results are deleted. + +[source,console] +---- +DELETE /query/async/FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM= +---- +// TEST[skip: no access to query ID] + +[[esql-async-query-delete-api-request]] +==== {api-request-title} + +`DELETE /_query/async/` + +[[esql-async-query-delete-api-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, only the following users can +use this API to delete a query: + +** The authenticated user that submitted the original query request +** Users with the `cancel_task` <> + + +[[esql-async-query-delete-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) +Identifier for the query to delete. ++ +A query ID is provided in the <>'s +response for a query that does not complete in the awaited time. A query ID is +also provided if the request's <> +parameter is `true`. diff --git a/docs/reference/esql/esql-async-query-get-api.asciidoc b/docs/reference/esql/esql-async-query-get-api.asciidoc new file mode 100644 index 0000000000000..ec68313b2c490 --- /dev/null +++ b/docs/reference/esql/esql-async-query-get-api.asciidoc @@ -0,0 +1,58 @@ +[[esql-async-query-get-api]] +=== {esql} async query get API +++++ +{esql} async query get API +++++ + +Returns the current status and available results for an <> or a stored results. + +[source,console] +---- +GET /_query/async/FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM= +---- +// TEST[skip: no access to query ID] + +[[esql-async-query-get-api-request]] +==== {api-request-title} + +`GET /_query/async/` + +[[esql-async-query-get-api-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, only the user who first submitted +the {esql} query can retrieve the results using this API. + +[[esql-async-query-get-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) +Identifier for the query. ++ +A query ID is provided in the <>'s +response for a query that does not complete in the awaited time. A query ID is +also provided if the request's <> +parameter is `true`. + +[[esql-async-query-get-api-query-params]] +==== {api-query-parms-title} + +`wait_for_completion_timeout`:: +(Optional, <>) +Timeout duration to wait for the request to finish. Defaults to no timeout, +meaning the request waits for complete query results. ++ +If this parameter is specified and the request completes during this period, +complete query results are returned. ++ +If the request does not complete during this period, the response returns an +`is_running` value of `true` and no results. + +[[esql-async-query-get-api-response-body]] +==== {api-response-body-title} + +The {esql} async query get API returns the same response body as the {esql} +query API. See the {esql} query API's <>. diff --git a/docs/reference/esql/esql-functions.asciidoc b/docs/reference/esql/esql-functions.asciidoc deleted file mode 100644 index c2e943f7555d6..0000000000000 --- a/docs/reference/esql/esql-functions.asciidoc +++ /dev/null @@ -1,142 +0,0 @@ -[[esql-functions]] -== {esql} functions - -++++ -Functions -++++ - -<>, <> and <> support -these functions: - -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> - -include::functions/abs.asciidoc[] -include::functions/acos.asciidoc[] -include::functions/asin.asciidoc[] -include::functions/atan.asciidoc[] -include::functions/atan2.asciidoc[] -include::functions/auto_bucket.asciidoc[] -include::functions/case.asciidoc[] -include::functions/ceil.asciidoc[] -include::functions/cidr_match.asciidoc[] -include::functions/coalesce.asciidoc[] -include::functions/concat.asciidoc[] -include::functions/cos.asciidoc[] -include::functions/cosh.asciidoc[] -include::functions/date_extract.asciidoc[] -include::functions/date_format.asciidoc[] -include::functions/date_parse.asciidoc[] -include::functions/date_trunc.asciidoc[] -include::functions/e.asciidoc[] -include::functions/ends_with.asciidoc[] -include::functions/floor.asciidoc[] -include::functions/greatest.asciidoc[] -include::functions/is_finite.asciidoc[] -include::functions/is_infinite.asciidoc[] -include::functions/is_nan.asciidoc[] -include::functions/least.asciidoc[] -include::functions/left.asciidoc[] -include::functions/length.asciidoc[] -include::functions/log10.asciidoc[] -include::functions/ltrim.asciidoc[] -include::functions/mv_avg.asciidoc[] -include::functions/mv_concat.asciidoc[] -include::functions/mv_count.asciidoc[] -include::functions/mv_dedupe.asciidoc[] -include::functions/mv_max.asciidoc[] -include::functions/mv_median.asciidoc[] -include::functions/mv_min.asciidoc[] -include::functions/mv_sum.asciidoc[] -include::functions/now.asciidoc[] -include::functions/pi.asciidoc[] -include::functions/pow.asciidoc[] -include::functions/replace.asciidoc[] -include::functions/right.asciidoc[] -include::functions/round.asciidoc[] -include::functions/rtrim.asciidoc[] -include::functions/sin.asciidoc[] -include::functions/sinh.asciidoc[] -include::functions/split.asciidoc[] -include::functions/sqrt.asciidoc[] -include::functions/starts_with.asciidoc[] -include::functions/substring.asciidoc[] -include::functions/tan.asciidoc[] -include::functions/tanh.asciidoc[] -include::functions/tau.asciidoc[] -include::functions/to_boolean.asciidoc[] -include::functions/to_cartesianpoint.asciidoc[] -include::functions/to_datetime.asciidoc[] -include::functions/to_degrees.asciidoc[] -include::functions/to_double.asciidoc[] -include::functions/to_geopoint.asciidoc[] -include::functions/to_integer.asciidoc[] -include::functions/to_ip.asciidoc[] -include::functions/to_long.asciidoc[] -include::functions/to_radians.asciidoc[] -include::functions/to_string.asciidoc[] -include::functions/to_unsigned_long.asciidoc[] -include::functions/to_version.asciidoc[] -include::functions/trim.asciidoc[] diff --git a/docs/reference/esql/esql-get-started.asciidoc b/docs/reference/esql/esql-get-started.asciidoc index 6e467e1e7312d..631a961b023ab 100644 --- a/docs/reference/esql/esql-get-started.asciidoc +++ b/docs/reference/esql/esql-get-started.asciidoc @@ -5,6 +5,8 @@ Getting started ++++ +preview::["Do not use {esql} on production environments. This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features."] + This guide shows how you can use {esql} to query and aggregate your data. [discrete] @@ -203,6 +205,31 @@ calculate the median duration per client IP: include::{esql-specs}/stats.csv-spec[tag=gs-stats-by] ---- +[discrete] +[[esql-getting-started-access-columns]] +=== Access columns + +You can access columns by their name. If a name contains special characters, +<> with backticks (+{backtick}+). + +Assigning an explicit name to a column created by `EVAL` or `STATS` is optional. +If you don't provide a name, the new column name is equal to the function +expression. For example: + +[source,esql] +---- +include::{esql-specs}/eval.csv-spec[tag=gs-eval-no-column-name] +---- + +In this query, `EVAL` adds a new column named `event_duration/1000000.0`. +Because its name contains special characters, to access this column, quote it +with backticks: + +[source,esql] +---- +include::{esql-specs}/eval.csv-spec[tag=gs-eval-stats-backticks] +---- + [discrete] [[esql-getting-started-histogram]] === Create a histogram diff --git a/docs/reference/esql/esql-query-api.asciidoc b/docs/reference/esql/esql-query-api.asciidoc index afa9ab7254cfa..bbfa41538528a 100644 --- a/docs/reference/esql/esql-query-api.asciidoc +++ b/docs/reference/esql/esql-query-api.asciidoc @@ -1,5 +1,5 @@ [[esql-query-api]] -== {esql} query API +=== {esql} query API ++++ {esql} query API ++++ @@ -23,13 +23,13 @@ POST /_query [discrete] [[esql-query-api-request]] -=== {api-request-title} +==== {api-request-title} `POST _query` [discrete] [[esql-query-api-prereqs]] -=== {api-prereq-title} +==== {api-prereq-title} * If the {es} {security-features} are enabled, you must have the `read` <> for the data stream, index, @@ -37,7 +37,7 @@ or alias you search. [discrete] [[esql-query-api-query-params]] -=== {api-query-parms-title} +==== {api-query-parms-title} `delimiter`:: (Optional, string) Separator for CSV results. Defaults to `,`. The API only @@ -54,7 +54,7 @@ precedence. [discrete] [role="child_attributes"] [[esql-query-api-request-body]] -=== {api-request-body-title} +==== {api-request-body-title} `columnar`:: (Optional, Boolean) If `true`, returns results in a columnar format. Defaults to @@ -71,7 +71,7 @@ responses. See <>. [discrete] [role="child_attributes"] [[esql-query-api-response-body]] -=== {api-response-body-title} +==== {api-response-body-title} `columns`:: (array of objects) @@ -79,13 +79,13 @@ Column headings for the search results. Each object is a column. + .Properties of `columns` objects [%collapsible%open] -==== +===== `name`:: (string) Name of the column. `type`:: (string) Data type for the column. -==== +===== `rows`:: (array of arrays) diff --git a/docs/reference/esql/esql-rest.asciidoc b/docs/reference/esql/esql-rest.asciidoc index 2d47f6e46ff65..d66ceb2eb4f1e 100644 --- a/docs/reference/esql/esql-rest.asciidoc +++ b/docs/reference/esql/esql-rest.asciidoc @@ -247,3 +247,89 @@ POST /_query } ---- // TEST[setup:library] + +[discrete] +[[esql-rest-async-query]] +==== Running an async {esql} query + +The <> lets you asynchronously +execute a query request, monitor its progress, and retrieve results when +they become available. + +Executing an {esql} query is commonly quite fast, however queries across +large data sets or frozen data can take some time. To avoid long waits, +run an async {esql} query. + +Queries initiated by the async query API may return results or not. The +`wait_for_completion_timeout` property determines how long to wait for +the results. If the results are not available by this time, a +<> is returned which +can be later used to retrieve the results. For example: + +[source,console] +---- +POST /_query/async +{ + "query": """ + FROM library + | EVAL year = DATE_TRUNC(1 YEARS, release_date) + | STATS MAX(page_count) BY year + | SORT year + | LIMIT 5 + """, + "wait_for_completion_timeout": "2s" +} +---- +// TEST[setup:library] + +If the results are not available within the given timeout period, 2 +seconds in this case, no results are returned but rather a response that +includes: + +* A query ID +* An `is_running` value of _true_, indicating the query is ongoing + +The query continues to run in the background without blocking other +requests. + +[source,console-result] +---- +{ + "id": "FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=", + "is_running": true +} +---- +// TEST[skip: no access to query ID - may return response values] + +To check the progress of an async query, use the <> with the query ID. Specify how long you'd like +to wait for complete results in the `wait_for_completion_timeout` parameter. + +[source,console] +---- +GET /_query/async/FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=?wait_for_completion_timeout=30s +---- +// TEST[skip: no access to query ID - may return response values] + +If the response's `is_running` value is `false`, the query has finished +and the results are returned. + +[source,console-result] +---- +{ + "is_running": false, + "columns": ... +} +---- +// TEST[skip: no access to query ID - may return response values] + +Use the <> to +delete an async query before the `keep_alive` period ends. If the query +is still running, {es} cancels it. + +[source,console] +---- +DELETE /_query/async/FmdMX2pIang3UWhLRU5QS0lqdlppYncaMUpYQ05oSkpTc3kwZ21EdC1tbFJXQToxOTI= +---- +// TEST[skip: no access to query ID] + diff --git a/docs/reference/esql/esql-syntax.asciidoc b/docs/reference/esql/esql-syntax.asciidoc index 22c9b1f100827..c5d56ef15fdfd 100644 --- a/docs/reference/esql/esql-syntax.asciidoc +++ b/docs/reference/esql/esql-syntax.asciidoc @@ -40,37 +40,28 @@ source-command | processing-command1 | processing-command2 [[esql-identifiers]] ==== Identifiers -The identifiers can be used as they are and don't require quoting, unless -containing special characters, in which case they must be quoted with -backticks (+{backtick}+). What "special characters" means is command dependent. +Identifiers need to be quoted with backticks (+{backtick}+) if: -For <>, <>, <>, -<>, <> and -<> these are: `=`, +{backtick}+, `,`, ` ` (space), `|` , -`[`, `]`, `\t` (TAB), `\r` (CR), `\n` (LF); one `/` is allowed unquoted, but -a sequence of two or more require quoting. +* they don't start with a letter, `_` or `@` +* any of the other characters is not a letter, number, or `_` -The rest of the commands - those allowing for identifiers be used in -expressions - require quoting if the identifier contains characters other than -letters, numbers and `_` and doesn't start with a letter, `_` or `@`. - -For instance: +For example: [source,esql] ---- -// Retain just one field FROM index -| KEEP 1.field +| KEEP `1.field` ---- -is legal. However, if same field is to be used with an <>, -it'd have to be quoted: +When referencing a function alias that itself uses a quoted identifier, the +backticks of the quoted identifier need to be escaped with another backtick. For +example: [source,esql] ---- -// Copy one field FROM index -| EVAL my_field = `1.field` +| STATS COUNT(`1.field`) +| EVAL my_count = `COUNT(``1.field``)` ---- [discrete] diff --git a/docs/reference/esql/esql-using.asciidoc b/docs/reference/esql/esql-using.asciidoc index 235c7defe559b..f11fdd2d058a5 100644 --- a/docs/reference/esql/esql-using.asciidoc +++ b/docs/reference/esql/esql-using.asciidoc @@ -2,7 +2,7 @@ == Using {esql} <>:: -Information about using the <>. +Information about using the <>. <>:: Using {esql} in {kib} to query and aggregate your data, create visualizations, @@ -10,7 +10,7 @@ and set up alerts. <>:: Using {esql} in {elastic-sec} to investigate events in Timeline, create -detection rules, and build {esql} queries using Elastic AI Assistant. +detection rules, and build {esql} queries using Elastic AI Assistant. <>:: Using the <> to list and cancel {esql} queries. diff --git a/docs/reference/esql/functions/abs.asciidoc b/docs/reference/esql/functions/abs.asciidoc index 3adb7dff07043..32b49bc287a83 100644 --- a/docs/reference/esql/functions/abs.asciidoc +++ b/docs/reference/esql/functions/abs.asciidoc @@ -1,18 +1,41 @@ [discrete] [[esql-abs]] === `ABS` + +*Syntax* + [.text-center] image::esql/functions/signature/abs.svg[Embedded,opts=inline] +*Parameters* + +`n`:: +Numeric expression. If `null`, the function returns `null`. + +*Description* + Returns the absolute value. -[source,esql] +*Supported types* + +include::types/abs.asciidoc[] + +*Examples* + +[source.merge.styled,esql] ---- -FROM employees -| KEEP first_name, last_name, height -| EVAL abs_height = ABS(0.0 - height) +include::{esql-specs}/math.csv-spec[tag=docsAbs] ---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/math.csv-spec[tag=docsAbs-result] +|=== -Supported types: - -include::types/abs.asciidoc[] +[source.merge.styled,esql] +---- +include::{esql-specs}/math.csv-spec[tag=docsAbsEmployees] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/math.csv-spec[tag=docsAbsEmployees-result] +|=== \ No newline at end of file diff --git a/docs/reference/esql/functions/asin.asciidoc b/docs/reference/esql/functions/asin.asciidoc index f03b5276b7dd6..222f6879785ef 100644 --- a/docs/reference/esql/functions/asin.asciidoc +++ b/docs/reference/esql/functions/asin.asciidoc @@ -1,10 +1,28 @@ [discrete] [[esql-asin]] === `ASIN` + +*Syntax* + [.text-center] image::esql/functions/signature/asin.svg[Embedded,opts=inline] -Inverse https://en.wikipedia.org/wiki/Inverse_trigonometric_functions[sine] trigonometric function. +*Parameters* + +`n`:: +Numeric expression. If `null`, the function returns `null`. + +*Description* + +Returns the +https://en.wikipedia.org/wiki/Inverse_trigonometric_functions[arcsine] +of the input numeric expression as an angle, expressed in radians. + +*Supported types* + +include::types/asin.asciidoc[] + +*Example* [source.merge.styled,esql] ---- @@ -14,7 +32,3 @@ include::{esql-specs}/floats.csv-spec[tag=asin] |=== include::{esql-specs}/floats.csv-spec[tag=asin-result] |=== - -Supported types: - -include::types/asin.asciidoc[] diff --git a/docs/reference/esql/functions/atan.asciidoc b/docs/reference/esql/functions/atan.asciidoc index 3813e096aeba1..bdbbd07cbba60 100644 --- a/docs/reference/esql/functions/atan.asciidoc +++ b/docs/reference/esql/functions/atan.asciidoc @@ -1,10 +1,28 @@ [discrete] [[esql-atan]] === `ATAN` + +*Syntax* + [.text-center] image::esql/functions/signature/atan.svg[Embedded,opts=inline] -Inverse https://en.wikipedia.org/wiki/Inverse_trigonometric_functions[tangent] trigonometric function. +*Parameters* + +`n`:: +Numeric expression. If `null`, the function returns `null`. + +*Description* + +Returns the +https://en.wikipedia.org/wiki/Inverse_trigonometric_functions[arctangent] of the +input numeric expression as an angle, expressed in radians. + +*Supported types* + +include::types/atan.asciidoc[] + +*Example* [source.merge.styled,esql] ---- @@ -13,8 +31,4 @@ include::{esql-specs}/floats.csv-spec[tag=atan] [%header.monospaced.styled,format=dsv,separator=|] |=== include::{esql-specs}/floats.csv-spec[tag=atan-result] -|=== - -Supported types: - -include::types/atan.asciidoc[] +|=== \ No newline at end of file diff --git a/docs/reference/esql/functions/atan2.asciidoc b/docs/reference/esql/functions/atan2.asciidoc index e78a219333344..3ecc0ff86fe26 100644 --- a/docs/reference/esql/functions/atan2.asciidoc +++ b/docs/reference/esql/functions/atan2.asciidoc @@ -1,11 +1,31 @@ [discrete] [[esql-atan2]] === `ATAN2` + +*Syntax* + [.text-center] image::esql/functions/signature/atan2.svg[Embedded,opts=inline] -The https://en.wikipedia.org/wiki/Atan2[angle] between the positive x-axis and the -ray from the origin to the point (x , y) in the Cartesian plane. +*Parameters* + +`y`:: +Numeric expression. If `null`, the function returns `null`. + +`x`:: +Numeric expression. If `null`, the function returns `null`. + +*Description* + +The https://en.wikipedia.org/wiki/Atan2[angle] between the positive x-axis and +the ray from the origin to the point (x , y) in the Cartesian plane, expressed +in radians. + +*Supported types* + +include::types/atan2.asciidoc[] + +*Example* [source.merge.styled,esql] ---- @@ -15,7 +35,3 @@ include::{esql-specs}/floats.csv-spec[tag=atan2] |=== include::{esql-specs}/floats.csv-spec[tag=atan2-result] |=== - -Supported types: - -include::types/atan2.asciidoc[] diff --git a/docs/reference/esql/functions/auto_bucket.asciidoc b/docs/reference/esql/functions/auto_bucket.asciidoc index 47e453f382229..2301939cf5050 100644 --- a/docs/reference/esql/functions/auto_bucket.asciidoc +++ b/docs/reference/esql/functions/auto_bucket.asciidoc @@ -1,72 +1,118 @@ [discrete] [[esql-auto_bucket]] === `AUTO_BUCKET` -Creates human-friendly buckets and returns a `datetime` value for each row that -corresponds to the resulting bucket the row falls into. Combine `AUTO_BUCKET` -with <> to create a date histogram. -You provide a target number of buckets, a start date, and an end date, and it -picks an appropriate bucket size to generate the target number of buckets or -fewer. For example, this asks for at most 20 buckets over a whole year, which -picks monthly buckets: +*Syntax* + +[source,esql] +---- +AUTO_BUCKET(field, buckets, from, to) +---- + +*Parameters* + +`field`:: +Numeric or date column from which to derive buckets. + +`buckets`:: +Target number of buckets. + +`from`:: +Start of the range. Can be a number or a date expressed as a string. + +`to`:: +End of the range. Can be a number or a date expressed as a string. + +*Description* + +Creates human-friendly buckets and returns a value for each row that corresponds +to the resulting bucket the row falls into. + +Using a target number of buckets, a start of a range, and an end of a range, +`AUTO_BUCKET` picks an appropriate bucket size to generate the target number of +buckets or fewer. For example, asking for at most 20 buckets over a year results +in monthly buckets: [source.merge.styled,esql] ---- -include::{esql-specs}/date.csv-spec[tag=auto_bucket_month] +include::{esql-specs}/date.csv-spec[tag=docsAutoBucketMonth] ---- [%header.monospaced.styled,format=dsv,separator=|] |=== -include::{esql-specs}/date.csv-spec[tag=auto_bucket_month-result] +include::{esql-specs}/date.csv-spec[tag=docsAutoBucketMonth-result] |=== The goal isn't to provide *exactly* the target number of buckets, it's to pick a -range that people are comfortable with that provides at most the target number of -buckets. +range that people are comfortable with that provides at most the target number +of buckets. -If you ask for more buckets then `AUTO_BUCKET` can pick a smaller range. For example, -asking for at most 100 buckets in a year will get you week long buckets: +Combine `AUTO_BUCKET` with +<> to create a histogram: [source.merge.styled,esql] ---- -include::{esql-specs}/date.csv-spec[tag=auto_bucket_week] +include::{esql-specs}/date.csv-spec[tag=docsAutoBucketMonthlyHistogram] ---- [%header.monospaced.styled,format=dsv,separator=|] |=== -include::{esql-specs}/date.csv-spec[tag=auto_bucket_week-result] +include::{esql-specs}/date.csv-spec[tag=docsAutoBucketMonthlyHistogram-result] |=== -`AUTO_BUCKET` does not filter any rows. It only uses the provided time range to -pick a good bucket size. For rows with a date outside of the range, it returns a -`datetime` that corresponds to a bucket outside the range. Combine `AUTO_BUCKET` -with <> to filter rows. +NOTE: `AUTO_BUCKET` does not create buckets that don't match any documents. +That's why this example is missing `1985-03-01` and other dates. -A more complete example might look like: +Asking for more buckets can result in a smaller range. For example, asking for +at most 100 buckets in a year results in weekly buckets: [source.merge.styled,esql] ---- -include::{esql-specs}/date.csv-spec[tag=auto_bucket_in_agg] +include::{esql-specs}/date.csv-spec[tag=docsAutoBucketWeeklyHistogram] ---- [%header.monospaced.styled,format=dsv,separator=|] |=== -include::{esql-specs}/date.csv-spec[tag=auto_bucket_in_agg-result] +include::{esql-specs}/date.csv-spec[tag=docsAutoBucketWeeklyHistogram-result] |=== -NOTE: `AUTO_BUCKET` does not create buckets that don't match any documents. That's -why the example above is missing `1985-03-01` and other dates. +NOTE: `AUTO_BUCKET` does not filter any rows. It only uses the provided range to +pick a good bucket size. For rows with a value outside of the range, it returns +a bucket value that corresponds to a bucket outside the range. Combine +`AUTO_BUCKET` with <> to filter rows. -==== Numeric fields +`AUTO_BUCKET` can also operate on numeric fields. For example, to create a +salary histogram: -`auto_bucket` can also operate on numeric fields like this: [source.merge.styled,esql] ---- -include::{esql-specs}/ints.csv-spec[tag=auto_bucket] +include::{esql-specs}/ints.csv-spec[tag=docsAutoBucketNumeric] ---- [%header.monospaced.styled,format=dsv,separator=|] |=== -include::{esql-specs}/ints.csv-spec[tag=auto_bucket-result] +include::{esql-specs}/ints.csv-spec[tag=docsAutoBucketNumeric-result] |=== -Unlike the example above where you are intentionally filtering on a date range, -you rarely want to filter on a numeric range. So you have find the `min` and `max` -separately. We don't yet have an easy way to do that automatically. Improvements -coming! +Unlike the earlier example that intentionally filters on a date range, you +rarely want to filter on a numeric range. You have to find the `min` and `max` +separately. {esql} doesn't yet have an easy way to do that automatically. + +*Examples* + +Create hourly buckets for the last 24 hours, and calculate the number of events +per hour: + + +[source.styled,esql] +---- +include::{esql-specs}/date.csv-spec[tag=docsAutoBucketLast24hr] +---- + +Create monthly buckets for the year 1985, and calculate the average salary by +hiring month: + +[source.merge.styled,esql] +---- +include::{esql-specs}/date.csv-spec[tag=auto_bucket_in_agg] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/date.csv-spec[tag=auto_bucket_in_agg-result] +|=== diff --git a/docs/reference/esql/functions/avg.asciidoc b/docs/reference/esql/functions/avg.asciidoc index 972d30545ceb4..6345be99c5d6d 100644 --- a/docs/reference/esql/functions/avg.asciidoc +++ b/docs/reference/esql/functions/avg.asciidoc @@ -1,8 +1,27 @@ [discrete] [[esql-agg-avg]] === `AVG` + +*Syntax* + +[source,esql] +---- +AVG(column) +---- + +`column`:: +Numeric column. If `null`, the function returns `null`. + +*Description* + The average of a numeric field. +*Supported types* + +The result is always a `double` no matter the input type. + +*Example* + [source.merge.styled,esql] ---- include::{esql-specs}/stats.csv-spec[tag=avg] @@ -11,5 +30,3 @@ include::{esql-specs}/stats.csv-spec[tag=avg] |=== include::{esql-specs}/stats.csv-spec[tag=avg-result] |=== - -The result is always a `double` not matter the input type. diff --git a/docs/reference/esql/functions/case.asciidoc b/docs/reference/esql/functions/case.asciidoc index 84ff083147cb9..b5fda636135b2 100644 --- a/docs/reference/esql/functions/case.asciidoc +++ b/docs/reference/esql/functions/case.asciidoc @@ -32,6 +32,8 @@ no condition matches, the function returns `null`. *Example* +Determine whether employees are monolingual, bilingual, or polyglot: + [source,esql] [source.merge.styled,esql] ---- @@ -41,3 +43,28 @@ include::{esql-specs}/docs.csv-spec[tag=case] |=== include::{esql-specs}/docs.csv-spec[tag=case-result] |=== + +Calculate the total connection success rate based on log messages: + +[source,esql] +[source.merge.styled,esql] +---- +include::{esql-specs}/conditional.csv-spec[tag=docsCaseSuccessRate] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/conditional.csv-spec[tag=docsCaseSuccessRate-result] +|=== + +Calculate an hourly error rate as a percentage of the total number of log +messages: + +[source,esql] +[source.merge.styled,esql] +---- +include::{esql-specs}/conditional.csv-spec[tag=docsCaseHourlyErrorRate] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/conditional.csv-spec[tag=docsCaseHourlyErrorRate-result] +|=== diff --git a/docs/reference/esql/functions/ceil.asciidoc b/docs/reference/esql/functions/ceil.asciidoc index f977e544e6c3f..bc132e6bf47e6 100644 --- a/docs/reference/esql/functions/ceil.asciidoc +++ b/docs/reference/esql/functions/ceil.asciidoc @@ -1,11 +1,32 @@ [discrete] [[esql-ceil]] === `CEIL` + +*Syntax* + [.text-center] image::esql/functions/signature/ceil.svg[Embedded,opts=inline] +*Parameters* + +`n`:: +Numeric expression. If `null`, the function returns `null`. + +*Description* + Round a number up to the nearest integer. +NOTE: This is a noop for `long` (including unsigned) and `integer`. + For `double` this picks the closest `double` value to the integer + similar to {javadoc}/java.base/java/lang/Math.html#ceil(double)[Math.ceil]. + +*Supported types* + +include::types/ceil.asciidoc[] + + +*Example* + [source.merge.styled,esql] ---- include::{esql-specs}/math.csv-spec[tag=ceil] @@ -14,11 +35,3 @@ include::{esql-specs}/math.csv-spec[tag=ceil] |=== include::{esql-specs}/math.csv-spec[tag=ceil-result] |=== - -NOTE: This is a noop for `long` (including unsigned) and `integer`. - For `double` this picks the the closest `double` value to the integer ala - {javadoc}/java.base/java/lang/Math.html#ceil(double)[Math.ceil]. - -Supported types: - -include::types/ceil.asciidoc[] diff --git a/docs/reference/esql/functions/cidr_match.asciidoc b/docs/reference/esql/functions/cidr_match.asciidoc index 5072a6eef7fd5..1c7fbb57a0044 100644 --- a/docs/reference/esql/functions/cidr_match.asciidoc +++ b/docs/reference/esql/functions/cidr_match.asciidoc @@ -2,15 +2,33 @@ [[esql-cidr_match]] === `CIDR_MATCH` +*Syntax* + +[source,esql] +---- +CIDR_MATCH(ip, block1[, ..., blockN]) +---- + +*Parameters* + +`ip`:: +IP address of type `ip` (both IPv4 and IPv6 are supported). + +`blockX`:: +CIDR block to test the IP against. + +*Description* + Returns `true` if the provided IP is contained in one of the provided CIDR blocks. -`CIDR_MATCH` accepts two or more arguments. The first argument is the IP -address of type `ip` (both IPv4 and IPv6 are supported). Subsequent arguments -are the CIDR blocks to test the IP against. +*Example* -[source,esql] +[source.merge.styled,esql] ---- -FROM hosts -| WHERE CIDR_MATCH(ip, "127.0.0.2/32", "127.0.0.3/32") +include::{esql-specs}/ip.csv-spec[tag=cdirMatchMultipleArgs] ---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/ip.csv-spec[tag=cdirMatchMultipleArgs-result] +|=== diff --git a/docs/reference/esql/functions/coalesce.asciidoc b/docs/reference/esql/functions/coalesce.asciidoc index 550780eaa070d..1121a75209151 100644 --- a/docs/reference/esql/functions/coalesce.asciidoc +++ b/docs/reference/esql/functions/coalesce.asciidoc @@ -2,7 +2,24 @@ [[esql-coalesce]] === `COALESCE` -Returns the first non-null value. +*Syntax* + +[source,esql] +---- +COALESCE(expression1 [, ..., expressionN]) +---- + +*Parameters* + +`expressionX`:: +Expression to evaluate. + +*Description* + +Returns the first of its arguments that is not null. If all arguments are null, +it returns `null`. + +*Example* [source.merge.styled,esql] ---- diff --git a/docs/reference/esql/functions/concat.asciidoc b/docs/reference/esql/functions/concat.asciidoc index 4864f5623a170..0b30211a72be2 100644 --- a/docs/reference/esql/functions/concat.asciidoc +++ b/docs/reference/esql/functions/concat.asciidoc @@ -1,11 +1,30 @@ [discrete] [[esql-concat]] === `CONCAT` -Concatenates two or more strings. + +*Syntax* [source,esql] ---- -FROM employees -| KEEP first_name, last_name, height -| EVAL fullname = CONCAT(first_name, " ", last_name) +CONCAT(string1, string2[, ..., stringN]) +---- + +*Parameters* + +`stringX`:: +Strings to concatenate. + +*Description* + +Concatenates two or more strings. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/eval.csv-spec[tag=docsConcat] ---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/eval.csv-spec[tag=docsConcat-result] +|=== diff --git a/docs/reference/esql/functions/cos.asciidoc b/docs/reference/esql/functions/cos.asciidoc index 7227f57e28120..f7874d46c558a 100644 --- a/docs/reference/esql/functions/cos.asciidoc +++ b/docs/reference/esql/functions/cos.asciidoc @@ -1,10 +1,27 @@ [discrete] [[esql-cos]] === `COS` + +*Syntax* + [.text-center] image::esql/functions/signature/cos.svg[Embedded,opts=inline] -https://en.wikipedia.org/wiki/Sine_and_cosine[Cosine] trigonometric function. Input expected in radians. +*Parameters* + +`n`:: +Numeric expression. If `null`, the function returns `null`. + +*Description* + +Returns the https://en.wikipedia.org/wiki/Sine_and_cosine[cosine] of `n`. Input +expected in radians. + +*Supported types* + +include::types/cos.asciidoc[] + +*Example* [source.merge.styled,esql] ---- @@ -14,7 +31,3 @@ include::{esql-specs}/floats.csv-spec[tag=cos] |=== include::{esql-specs}/floats.csv-spec[tag=cos-result] |=== - -Supported types: - -include::types/cos.asciidoc[] diff --git a/docs/reference/esql/functions/cosh.asciidoc b/docs/reference/esql/functions/cosh.asciidoc index 7bf0840958655..ae813e91ec9bb 100644 --- a/docs/reference/esql/functions/cosh.asciidoc +++ b/docs/reference/esql/functions/cosh.asciidoc @@ -1,10 +1,27 @@ [discrete] [[esql-cosh]] === `COSH` + +*Syntax* + [.text-center] image::esql/functions/signature/cosh.svg[Embedded,opts=inline] -https://en.wikipedia.org/wiki/Hyperbolic_functions[Cosine] hyperbolic function. +*Parameters* + +`n`:: +Numeric expression. If `null`, the function returns `null`. + +*Supported types* + +include::types/cosh.asciidoc[] + +*Description* + +Returns the https://en.wikipedia.org/wiki/Hyperbolic_functions[hyperbolic +cosine]. + +*Example* [source.merge.styled,esql] ---- @@ -14,7 +31,3 @@ include::{esql-specs}/floats.csv-spec[tag=cosh] |=== include::{esql-specs}/floats.csv-spec[tag=cosh-result] |=== - -Supported types: - -include::types/cosh.asciidoc[] diff --git a/docs/reference/esql/functions/count-distinct.asciidoc b/docs/reference/esql/functions/count-distinct.asciidoc index b5b1659140f63..14fa6eff39d4c 100644 --- a/docs/reference/esql/functions/count-distinct.asciidoc +++ b/docs/reference/esql/functions/count-distinct.asciidoc @@ -1,21 +1,28 @@ [discrete] [[esql-agg-count-distinct]] === `COUNT_DISTINCT` -The approximate number of distinct values. -[source.merge.styled,esql] +*Syntax* + +[source,esql] ---- -include::{esql-specs}/stats_count_distinct.csv-spec[tag=count-distinct] +COUNT_DISTINCT(column[, precision]) ---- -[%header.monospaced.styled,format=dsv,separator=|] -|=== -include::{esql-specs}/stats_count_distinct.csv-spec[tag=count-distinct-result] -|=== -Can take any field type as input and the result is always a `long` not matter -the input type. +*Parameters* + +`column`:: +Column for which to count the number of distinct values. + +`precision`:: +Precision. Refer to <>. + +*Description* + +Returns the approximate number of distinct values. [discrete] +[[esql-agg-count-distinct-approximate]] ==== Counts are approximate Computing exact counts requires loading values into a set and returning its @@ -30,11 +37,25 @@ properties: include::../../aggregations/metrics/cardinality-aggregation.asciidoc[tag=explanation] -[discrete] -==== Precision is configurable - The `COUNT_DISTINCT` function takes an optional second parameter to configure the -precision discussed previously. +precision. + +*Supported types* + +Can take any field type as input. + +*Examples* + +[source.merge.styled,esql] +---- +include::{esql-specs}/stats_count_distinct.csv-spec[tag=count-distinct] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/stats_count_distinct.csv-spec[tag=count-distinct-result] +|=== + +With the optional second parameter to configure the precision: [source.merge.styled,esql] ---- diff --git a/docs/reference/esql/functions/count.asciidoc b/docs/reference/esql/functions/count.asciidoc index a148df07edb4d..70b13d7fc16b3 100644 --- a/docs/reference/esql/functions/count.asciidoc +++ b/docs/reference/esql/functions/count.asciidoc @@ -1,7 +1,29 @@ [discrete] [[esql-agg-count]] === `COUNT` -Counts field values. + +*Syntax* + +[source,esql] +---- +COUNT([input]) +---- + +*Parameters* + +`input`:: +Column or literal for which to count the number of values. If omitted, returns a +count all (the number of rows). + +*Description* + +Returns the total number (count) of input values. + +*Supported types* + +Can take any field type as input. + +*Examples* [source.merge.styled,esql] ---- @@ -12,10 +34,7 @@ include::{esql-specs}/stats.csv-spec[tag=count] include::{esql-specs}/stats.csv-spec[tag=count-result] |=== -Can take any field type as input and the result is always a `long` not matter -the input type. - -To count the number of rows, use `COUNT(*)`: +To count the number of rows, use `COUNT()` or `COUNT(*)`: [source.merge.styled,esql] ---- @@ -24,4 +43,4 @@ include::{esql-specs}/docs.csv-spec[tag=countAll] [%header.monospaced.styled,format=dsv,separator=|] |=== include::{esql-specs}/docs.csv-spec[tag=countAll-result] -|=== \ No newline at end of file +|=== diff --git a/docs/reference/esql/functions/date_diff.asciidoc b/docs/reference/esql/functions/date_diff.asciidoc new file mode 100644 index 0000000000000..6127290466b10 --- /dev/null +++ b/docs/reference/esql/functions/date_diff.asciidoc @@ -0,0 +1,37 @@ +[discrete] +[[esql-date_diff]] +=== `DATE_DIFF` +Subtract the second argument from the third argument and return their difference in multiples of the unit specified in the first argument. +If the second argument (start) is greater than the third argument (end), then negative values are returned. + +[cols="^,^"] +|=== +2+h|Datetime difference units + +s|unit +s|abbreviations + +| year | years, yy, yyyy +| quarter | quarters, qq, q +| month | months, mm, m +| dayofyear | dy, y +| day | days, dd, d +| week | weeks, wk, ww +| weekday | weekdays, dw +| hour | hours, hh +| minute | minutes, mi, n +| second | seconds, ss, s +| millisecond | milliseconds, ms +| microsecond | microseconds, mcs +| nanosecond | nanoseconds, ns +|=== + +[source.merge.styled,esql] +---- +include::{esql-specs}/docs.csv-spec[tag=dateDiff] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/docs.csv-spec[tag=dateDiff-result] +|=== + diff --git a/docs/reference/esql/functions/date_extract.asciidoc b/docs/reference/esql/functions/date_extract.asciidoc index 89ef1cf261094..ce949483494a5 100644 --- a/docs/reference/esql/functions/date_extract.asciidoc +++ b/docs/reference/esql/functions/date_extract.asciidoc @@ -1,15 +1,56 @@ [discrete] [[esql-date_extract]] === `DATE_EXTRACT` -Extracts parts of a date, like year, month, day, hour. -The supported field types are those provided by https://docs.oracle.com/javase/8/docs/api/java/time/temporal/ChronoField.html[java.time.temporal.ChronoField]. + +*Syntax* + +[source,esql] +---- +DATE_EXTRACT(date_part, date) +---- + +*Parameters* + +`date_part`:: +Part of the date to extract. Can be: `aligned_day_of_week_in_month`, +`aligned_day_of_week_in_year`, `aligned_week_of_month`, `aligned_week_of_year`, +`ampm_of_day`, `clock_hour_of_ampm`, `clock_hour_of_day`, `day_of_month`, +`day_of_week`, `day_of_year`, `epoch_day`, `era`, `hour_of_ampm`, `hour_of_day`, +`instant_seconds`, `micro_of_day`, `micro_of_second`, `milli_of_day`, +`milli_of_second`, `minute_of_day`, `minute_of_hour`, `month_of_year`, +`nano_of_day`, `nano_of_second`, `offset_seconds`, `proleptic_month`, +`second_of_day`, `second_of_minute`, `year`, or `year_of_era`. Refer to +https://docs.oracle.com/javase/8/docs/api/java/time/temporal/ChronoField.html[java.time.temporal.ChronoField] +for a description of these values. ++ +If `null`, the function returns `null`. + +`date`:: +Date expression. If `null`, the function returns `null`. + +*Description* + +Extracts parts of a date, like year, month, day, hour. + +*Examples* [source.merge.styled,esql] ---- -include::{esql-specs}/docs.csv-spec[tag=dateExtract] +include::{esql-specs}/date.csv-spec[tag=dateExtract] ---- [%header.monospaced.styled,format=dsv,separator=|] |=== -include::{esql-specs}/docs.csv-spec[tag=dateExtract-result] +include::{esql-specs}/date.csv-spec[tag=dateExtract-result] |=== +Find all events that occurred outside of business hours (before 9 AM or after 5 +PM), on any given date: + +[source.merge.styled,esql] +---- +include::{esql-specs}/date.csv-spec[tag=docsDateExtractBusinessHours] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/date.csv-spec[tag=docsDateExtractBusinessHours-result] +|=== diff --git a/docs/reference/esql/functions/date_format.asciidoc b/docs/reference/esql/functions/date_format.asciidoc index 5a87f31412cc8..4a0d36d133a4c 100644 --- a/docs/reference/esql/functions/date_format.asciidoc +++ b/docs/reference/esql/functions/date_format.asciidoc @@ -1,12 +1,35 @@ [discrete] [[esql-date_format]] === `DATE_FORMAT` -Returns a string representation of a date in the provided format. If no format -is specified, the `yyyy-MM-dd'T'HH:mm:ss.SSSZ` format is used. + +*Syntax* [source,esql] ---- -FROM employees -| KEEP first_name, last_name, hire_date -| EVAL hired = DATE_FORMAT("YYYY-MM-dd", hire_date) +DATE_FORMAT([format,] date) +---- + +*Parameters* + +`format`:: +Date format (optional). If no format is specified, the +`yyyy-MM-dd'T'HH:mm:ss.SSSZ` format is used. If `null`, the function returns +`null`. + +`date`:: +Date expression. If `null`, the function returns `null`. + +*Description* + +Returns a string representation of a date, in the provided format. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/date.csv-spec[tag=docsDateFormat] ---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/date.csv-spec[tag=docsDateFormat-result] +|=== diff --git a/docs/reference/esql/functions/date_trunc.asciidoc b/docs/reference/esql/functions/date_trunc.asciidoc index ad0e1eb1170b4..4aa228dc14e65 100644 --- a/docs/reference/esql/functions/date_trunc.asciidoc +++ b/docs/reference/esql/functions/date_trunc.asciidoc @@ -1,13 +1,57 @@ [discrete] [[esql-date_trunc]] === `DATE_TRUNC` -Rounds down a date to the closest interval. Intervals can be expressed using the -<>. + +*Syntax* [source,esql] ---- -FROM employees -| EVAL year_hired = DATE_TRUNC(1 year, hire_date) -| STATS COUNT(emp_no) BY year_hired -| SORT year_hired +DATE_TRUNC(interval, date) +---- + +*Parameters* + +`interval`:: +Interval, expressed using the <>. If `null`, the function returns `null`. + +`date`:: +Date expression. If `null`, the function returns `null`. + +*Description* + +Rounds down a date to the closest interval. + +*Examples* + +[source.merge.styled,esql] +---- +include::{esql-specs}/date.csv-spec[tag=docsDateTrunc] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/date.csv-spec[tag=docsDateTrunc-result] +|=== + +Combine `DATE_TRUNC` with <> to create date histograms. For +example, the number of hires per year: + +[source.merge.styled,esql] +---- +include::{esql-specs}/date.csv-spec[tag=docsDateTruncHistogram] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/date.csv-spec[tag=docsDateTruncHistogram-result] +|=== + +Or an hourly error rate: + +[source.merge.styled,esql] +---- +include::{esql-specs}/conditional.csv-spec[tag=docsCaseHourlyErrorRate] ---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/conditional.csv-spec[tag=docsCaseHourlyErrorRate-result] +|=== diff --git a/docs/reference/esql/functions/is_finite.asciidoc b/docs/reference/esql/functions/is_finite.asciidoc index f7b7ad73a3952..482c7bcd3d61b 100644 --- a/docs/reference/esql/functions/is_finite.asciidoc +++ b/docs/reference/esql/functions/is_finite.asciidoc @@ -1,8 +1,27 @@ [discrete] [[esql-is_finite]] === `IS_FINITE` + +*Syntax* + +[.text-center] +image::esql/functions/signature/is_finite.svg[Embedded,opts=inline] + +*Parameters* + +`n`:: +Numeric expression. If `null`, the function returns `null`. + +*Description* + Returns a boolean that indicates whether its input is a finite number. +*Supported types* + +include::types/is_finite.asciidoc[] + +*Example* + [source,esql] ---- ROW d = 1.0 diff --git a/docs/reference/esql/functions/is_infinite.asciidoc b/docs/reference/esql/functions/is_infinite.asciidoc index 56158a786c020..69f0ab7ba98a3 100644 --- a/docs/reference/esql/functions/is_infinite.asciidoc +++ b/docs/reference/esql/functions/is_infinite.asciidoc @@ -1,7 +1,26 @@ [discrete] [[esql-is_infinite]] === `IS_INFINITE` -Returns a boolean that indicates whether its input is infinite. + +*Syntax* + +[.text-center] +image::esql/functions/signature/is_infinite.svg[Embedded,opts=inline] + +*Parameters* + +`n`:: +Numeric expression. If `null`, the function returns `null`. + +*Description* + +Returns a boolean that indicates whether its input is an infinite number. + +*Supported types* + +include::types/is_infinite.asciidoc[] + +*Example* [source,esql] ---- diff --git a/docs/reference/esql/functions/is_nan.asciidoc b/docs/reference/esql/functions/is_nan.asciidoc index 25b50a9e96bba..dbe93b9dbb817 100644 --- a/docs/reference/esql/functions/is_nan.asciidoc +++ b/docs/reference/esql/functions/is_nan.asciidoc @@ -1,7 +1,26 @@ [discrete] [[esql-is_nan]] === `IS_NAN` -Returns a boolean that indicates whether its input is not a number. + +*Syntax* + +[.text-center] +image::esql/functions/signature/is_nan.svg[Embedded,opts=inline] + +*Parameters* + +`n`:: +Numeric expression. If `null`, the function returns `null`. + +*Description* + +Returns a boolean that indicates whether its input is {wikipedia}/NaN[Not-a-Number] (NaN). + +*Supported types* + +include::types/is_nan.asciidoc[] + +*Example* [source,esql] ---- diff --git a/docs/reference/esql/functions/mv-functions.asciidoc b/docs/reference/esql/functions/mv-functions.asciidoc index 83dbaaadc5c06..a95a3d36a9963 100644 --- a/docs/reference/esql/functions/mv-functions.asciidoc +++ b/docs/reference/esql/functions/mv-functions.asciidoc @@ -12,6 +12,8 @@ * <> * <> * <> +* <> +* <> * <> * <> * <> @@ -22,6 +24,8 @@ include::mv_avg.asciidoc[] include::mv_concat.asciidoc[] include::mv_count.asciidoc[] include::mv_dedupe.asciidoc[] +include::mv_first.asciidoc[] +include::mv_last.asciidoc[] include::mv_max.asciidoc[] include::mv_median.asciidoc[] include::mv_min.asciidoc[] diff --git a/docs/reference/esql/functions/mv_first.asciidoc b/docs/reference/esql/functions/mv_first.asciidoc new file mode 100644 index 0000000000000..42ac8930136cc --- /dev/null +++ b/docs/reference/esql/functions/mv_first.asciidoc @@ -0,0 +1,27 @@ +[discrete] +[[esql-mv_first]] +=== `MV_FIRST` +[.text-center] +image::esql/functions/signature/mv_first.svg[Embedded,opts=inline] + +Converts a multivalued field into a single valued field containing the first value. This is most +useful when reading from a function that emits multivalued fields in a known order like <>: + +[source.merge.styled,esql] +---- +include::{esql-specs}/string.csv-spec[tag=mv_first] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/string.csv-spec[tag=mv_first-result] +|=== + +The order that <> are read from underlying storage is not +guaranteed. It is *frequently* ascending, but don't rely on that. If you need the minimum field value +use <> instead of `MV_FIRST`. `MV_MIN` has optimizations for sorted values so there isn't +a performance benefit to `MV_FIRST`. `MV_FIRST` is mostly useful with functions that create multivalued +fields like `SPLIT`. + +Supported types: + +include::types/mv_first.asciidoc[] diff --git a/docs/reference/esql/functions/mv_last.asciidoc b/docs/reference/esql/functions/mv_last.asciidoc new file mode 100644 index 0000000000000..aa6fc40d0af07 --- /dev/null +++ b/docs/reference/esql/functions/mv_last.asciidoc @@ -0,0 +1,27 @@ +[discrete] +[[esql-mv_last]] +=== `MV_LAST` +[.text-center] +image::esql/functions/signature/mv_last.svg[Embedded,opts=inline] + +Converts a multivalued field into a single valued field containing the last value. This is most +useful when reading from a function that emits multivalued fields in a known order like <>: + +[source.merge.styled,esql] +---- +include::{esql-specs}/string.csv-spec[tag=mv_last] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/string.csv-spec[tag=mv_last-result] +|=== + +The order that <> are read from underlying storage is not +guaranteed. It is *frequently* ascending, but don't rely on that. If you need the maximum field value +use <> instead of `MV_LAST`. `MV_MAX` has optimizations for sorted values so there isn't +a performance benefit to `MV_LAST`. `MV_LAST` is mostly useful with functions that create multivalued +fields like `SPLIT`. + +Supported types: + +include::types/mv_last.asciidoc[] diff --git a/docs/reference/esql/functions/signature/date_diff.svg b/docs/reference/esql/functions/signature/date_diff.svg new file mode 100644 index 0000000000000..6563ec6576927 --- /dev/null +++ b/docs/reference/esql/functions/signature/date_diff.svg @@ -0,0 +1 @@ +DATE_DIFF(unit,startTimestamp,endTimestamp) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/is_finite.svg b/docs/reference/esql/functions/signature/is_finite.svg index 0ff65a876d21f..36a8f1f34de9b 100644 --- a/docs/reference/esql/functions/signature/is_finite.svg +++ b/docs/reference/esql/functions/signature/is_finite.svg @@ -1 +1 @@ -IS_FINITE(arg1) \ No newline at end of file +IS_FINITE(n) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/is_infinite.svg b/docs/reference/esql/functions/signature/is_infinite.svg index aef9e3873c918..f3d8d44fde947 100644 --- a/docs/reference/esql/functions/signature/is_infinite.svg +++ b/docs/reference/esql/functions/signature/is_infinite.svg @@ -1 +1 @@ -IS_INFINITE(arg1) \ No newline at end of file +IS_INFINITE(n) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/is_nan.svg b/docs/reference/esql/functions/signature/is_nan.svg new file mode 100644 index 0000000000000..a3697ee9f8b2c --- /dev/null +++ b/docs/reference/esql/functions/signature/is_nan.svg @@ -0,0 +1 @@ +IS_NAN(n) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/mv_first.svg b/docs/reference/esql/functions/signature/mv_first.svg new file mode 100644 index 0000000000000..20d201eab0add --- /dev/null +++ b/docs/reference/esql/functions/signature/mv_first.svg @@ -0,0 +1 @@ +MV_FIRST(v) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/mv_last.svg b/docs/reference/esql/functions/signature/mv_last.svg new file mode 100644 index 0000000000000..eb32bb49f8ccc --- /dev/null +++ b/docs/reference/esql/functions/signature/mv_last.svg @@ -0,0 +1 @@ +MV_LAST(v) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/to_degrees.svg b/docs/reference/esql/functions/signature/to_degrees.svg new file mode 100644 index 0000000000000..01fe0a4770156 --- /dev/null +++ b/docs/reference/esql/functions/signature/to_degrees.svg @@ -0,0 +1 @@ +TO_DEGREES(v) \ No newline at end of file diff --git a/docs/reference/esql/functions/to_integer.asciidoc b/docs/reference/esql/functions/to_integer.asciidoc index e185b87d6d95d..e62256930c5aa 100644 --- a/docs/reference/esql/functions/to_integer.asciidoc +++ b/docs/reference/esql/functions/to_integer.asciidoc @@ -26,7 +26,7 @@ provide information on the source of the failure: A following header will contain the failure reason and the offending value: -`"org.elasticsearch.xpack.ql.QlIllegalArgumentException: [501379200000] out of [integer] range"` +`"org.elasticsearch.xpack.ql.InvalidArgumentException: [501379200000] out of [integer] range"` If the input parameter is of a date type, its value will be interpreted as diff --git a/docs/reference/esql/functions/types/add.asciidoc b/docs/reference/esql/functions/types/add.asciidoc index 7783d08bc3aaa..3665c112d802d 100644 --- a/docs/reference/esql/functions/types/add.asciidoc +++ b/docs/reference/esql/functions/types/add.asciidoc @@ -6,7 +6,15 @@ date_period | datetime | datetime datetime | date_period | datetime datetime | time_duration | datetime double | double | double +double | integer | double +double | long | double +integer | double | double integer | integer | integer +integer | long | long +long | double | double +long | integer | long long | long | long +time_duration | datetime | datetime time_duration | time_duration | time_duration +unsigned_long | unsigned_long | unsigned_long |=== diff --git a/docs/reference/esql/functions/types/date_diff.asciidoc b/docs/reference/esql/functions/types/date_diff.asciidoc new file mode 100644 index 0000000000000..b4e5c6ad5e0b5 --- /dev/null +++ b/docs/reference/esql/functions/types/date_diff.asciidoc @@ -0,0 +1,6 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +unit | startTimestamp | endTimestamp | result +keyword | datetime | datetime | integer +text | datetime | datetime | integer +|=== diff --git a/docs/reference/esql/functions/types/is_finite.asciidoc b/docs/reference/esql/functions/types/is_finite.asciidoc index 0c555059004c1..e4883bdc1c076 100644 --- a/docs/reference/esql/functions/types/is_finite.asciidoc +++ b/docs/reference/esql/functions/types/is_finite.asciidoc @@ -1,5 +1,5 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== -arg1 | result +n | result double | boolean |=== diff --git a/docs/reference/esql/functions/types/is_infinite.asciidoc b/docs/reference/esql/functions/types/is_infinite.asciidoc index 0c555059004c1..e4883bdc1c076 100644 --- a/docs/reference/esql/functions/types/is_infinite.asciidoc +++ b/docs/reference/esql/functions/types/is_infinite.asciidoc @@ -1,5 +1,5 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== -arg1 | result +n | result double | boolean |=== diff --git a/docs/reference/esql/functions/types/is_nan.asciidoc b/docs/reference/esql/functions/types/is_nan.asciidoc new file mode 100644 index 0000000000000..e4883bdc1c076 --- /dev/null +++ b/docs/reference/esql/functions/types/is_nan.asciidoc @@ -0,0 +1,5 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +n | result +double | boolean +|=== diff --git a/docs/reference/esql/functions/types/mv_first.asciidoc b/docs/reference/esql/functions/types/mv_first.asciidoc new file mode 100644 index 0000000000000..e6c67a454b96b --- /dev/null +++ b/docs/reference/esql/functions/types/mv_first.asciidoc @@ -0,0 +1,16 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +v | result +boolean | boolean +cartesian_point | cartesian_point +datetime | datetime +double | double +geo_point | geo_point +integer | integer +ip | ip +keyword | keyword +long | long +text | text +unsigned_long | unsigned_long +version | version +|=== diff --git a/docs/reference/esql/functions/types/mv_last.asciidoc b/docs/reference/esql/functions/types/mv_last.asciidoc new file mode 100644 index 0000000000000..e6c67a454b96b --- /dev/null +++ b/docs/reference/esql/functions/types/mv_last.asciidoc @@ -0,0 +1,16 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +v | result +boolean | boolean +cartesian_point | cartesian_point +datetime | datetime +double | double +geo_point | geo_point +integer | integer +ip | ip +keyword | keyword +long | long +text | text +unsigned_long | unsigned_long +version | version +|=== diff --git a/docs/reference/esql/functions/types/to_degrees.asciidoc b/docs/reference/esql/functions/types/to_degrees.asciidoc new file mode 100644 index 0000000000000..7cb7ca46022c2 --- /dev/null +++ b/docs/reference/esql/functions/types/to_degrees.asciidoc @@ -0,0 +1,8 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +v | result +double | double +integer | double +long | double +unsigned_long | double +|=== diff --git a/docs/reference/esql/index.asciidoc b/docs/reference/esql/index.asciidoc index dcbe426b1bcac..8fb20b981b93e 100644 --- a/docs/reference/esql/index.asciidoc +++ b/docs/reference/esql/index.asciidoc @@ -6,7 +6,7 @@ [partintro] -preview::[] +preview::["Do not use {esql} on production environments. This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features."] The {es} Query Language ({esql}) provides a powerful way to filter, transform, and analyze data stored in {es}, and in the future in other runtimes. It is diff --git a/docs/reference/esql/processing-commands/eval.asciidoc b/docs/reference/esql/processing-commands/eval.asciidoc index eb69a587014ab..9b34fca7ceeff 100644 --- a/docs/reference/esql/processing-commands/eval.asciidoc +++ b/docs/reference/esql/processing-commands/eval.asciidoc @@ -6,7 +6,7 @@ [source,esql] ---- -EVAL column1 = value1[, ..., columnN = valueN] +EVAL [column1 =] value1[, ..., [columnN =] valueN] ---- *Parameters* @@ -28,11 +28,11 @@ values. `EVAL` supports various functions for calculating values. Refer to [source.merge.styled,esql] ---- -include::{esql-specs}/docs.csv-spec[tag=eval] +include::{esql-specs}/eval.csv-spec[tag=eval] ---- [%header.monospaced.styled,format=dsv,separator=|] |=== -include::{esql-specs}/docs.csv-spec[tag=eval-result] +include::{esql-specs}/eval.csv-spec[tag=eval-result] |=== If the specified column already exists, the existing column will be dropped, and @@ -40,9 +40,34 @@ the new column will be appended to the table: [source.merge.styled,esql] ---- -include::{esql-specs}/docs.csv-spec[tag=evalReplace] +include::{esql-specs}/eval.csv-spec[tag=evalReplace] ---- [%header.monospaced.styled,format=dsv,separator=|] |=== -include::{esql-specs}/docs.csv-spec[tag=evalReplace-result] +include::{esql-specs}/eval.csv-spec[tag=evalReplace-result] +|=== + +Specifying the output column name is optional. If not specified, the new column +name is equal to the expression. The following query adds a column named +`height*3.281`: + +[source.merge.styled,esql] +---- +include::{esql-specs}/eval.csv-spec[tag=evalUnnamedColumn] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/eval.csv-spec[tag=evalUnnamedColumn-result] +|=== + +Because this name contains special characters, <> with backticks (+{backtick}+) when using it in subsequent commands: + +[source.merge.styled,esql] +---- +include::{esql-specs}/eval.csv-spec[tag=evalUnnamedColumnStats] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/eval.csv-spec[tag=evalUnnamedColumnStats-result] |=== diff --git a/docs/reference/esql/processing-commands/stats.asciidoc b/docs/reference/esql/processing-commands/stats.asciidoc index cbdb74d350fb1..a34bc444578d6 100644 --- a/docs/reference/esql/processing-commands/stats.asciidoc +++ b/docs/reference/esql/processing-commands/stats.asciidoc @@ -47,11 +47,11 @@ Calculating a statistic and grouping by the values of another column: [source.merge.styled,esql] ---- -include::{esql-specs}/docs.csv-spec[tag=stats] +include::{esql-specs}/stats.csv-spec[tag=stats] ---- [%header.monospaced.styled,format=dsv,separator=|] |=== -include::{esql-specs}/docs.csv-spec[tag=stats-result] +include::{esql-specs}/stats.csv-spec[tag=stats-result] |=== Omitting `BY` returns one row with the aggregations applied over the entire @@ -59,18 +59,18 @@ dataset: [source.merge.styled,esql] ---- -include::{esql-specs}/docs.csv-spec[tag=statsWithoutBy] +include::{esql-specs}/stats.csv-spec[tag=statsWithoutBy] ---- [%header.monospaced.styled,format=dsv,separator=|] |=== -include::{esql-specs}/docs.csv-spec[tag=statsWithoutBy-result] +include::{esql-specs}/stats.csv-spec[tag=statsWithoutBy-result] |=== It's possible to calculate multiple values: [source,esql] ---- -include::{esql-specs}/docs.csv-spec[tag=statsCalcMultipleValues] +include::{esql-specs}/stats.csv-spec[tag=statsCalcMultipleValues] ---- It's also possible to group by multiple values (only supported for long and @@ -78,5 +78,30 @@ keyword family fields): [source,esql] ---- -include::{esql-specs}/docs.csv-spec[tag=statsGroupByMultipleValues] +include::{esql-specs}/stats.csv-spec[tag=statsGroupByMultipleValues] ---- + +Specifying the output column name is optional. If not specified, the new column +name is equal to the expression. The following query returns a column named +`AVG(salary)`: + +[source.merge.styled,esql] +---- +include::{esql-specs}/stats.csv-spec[tag=statsUnnamedColumn] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/stats.csv-spec[tag=statsUnnamedColumn-result] +|=== + +Because this name contains special characters, <> with backticks (+{backtick}+) when using it in subsequent commands: + +[source.merge.styled,esql] +---- +include::{esql-specs}/stats.csv-spec[tag=statsUnnamedColumnEval] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/stats.csv-spec[tag=statsUnnamedColumnEval-result] +|=== diff --git a/docs/reference/how-to/knn-search.asciidoc b/docs/reference/how-to/knn-search.asciidoc index 066008ce26110..2e0462c193f63 100644 --- a/docs/reference/how-to/knn-search.asciidoc +++ b/docs/reference/how-to/knn-search.asciidoc @@ -11,53 +11,14 @@ the indexing algorithm runs searches under the hood to create the vector index structures. So these same recommendations also help with indexing speed. [discrete] -=== Prefer `dot_product` over `cosine` - -When indexing vectors for approximate kNN search, you need to specify the -<> for comparing the vectors. -If you'd like to compare vectors through cosine similarity, there are two -options. - -The `cosine` option accepts any float vector and computes the cosine -similarity. While this is convenient for testing, it's not the most efficient -approach. Instead, we recommend using the `dot_product` option to compute the -similarity. To use `dot_product`, all vectors need to be normalized in advance -to have length 1. The `dot_product` option is significantly faster, since it -avoids performing extra vector length computations during the search. - -[discrete] -=== Ensure data nodes have enough memory - -{es} uses the https://arxiv.org/abs/1603.09320[HNSW] algorithm for approximate -kNN search. HNSW is a graph-based algorithm which only works efficiently when -most vector data is held in memory. You should ensure that data nodes have at -least enough RAM to hold the vector data and index structures. To check the -size of the vector data, you can use the <> API. As a -loose rule of thumb, and assuming the default HNSW options, the bytes used will -be `num_vectors * 4 * (num_dimensions + 12)`. When using the `byte` <> -the space required will be closer to `num_vectors * (num_dimensions + 12)`. Note that -the required RAM is for the filesystem cache, which is separate from the Java -heap. - -The data nodes should also leave a buffer for other ways that RAM is needed. -For example your index might also include text fields and numerics, which also -benefit from using filesystem cache. It's recommended to run benchmarks with -your specific dataset to ensure there's a sufficient amount of memory to give -good search performance. -You can find https://elasticsearch-benchmarks.elastic.co/#tracks/so_vector[here] -and https://elasticsearch-benchmarks.elastic.co/#tracks/dense_vector[here] some examples -of datasets and configurations that we use for our nightly benchmarks. - -[discrete] -include::search-speed.asciidoc[tag=warm-fs-cache] +=== Reduce vector memory foot-print -The following file extensions are used for the approximate kNN search: -+ --- -* `vec` and `veq` for vector values -* `vex` for HNSW graph -* `vem`, `vemf`, and `vemq` for metadata --- +The default <> is `float`. But this +can be automatically quantized during index time through +<>. Quantization will reduce the +required memory by 4x, but it will also reduce the precision of the vectors. For +`float` vectors with `dim` greater than or equal to `384`, using a +<> index is highly recommended. [discrete] === Reduce vector dimensionality @@ -71,14 +32,6 @@ reduction techniques like PCA. When experimenting with different approaches, it's important to measure the impact on relevance to ensure the search quality is still acceptable. -[discrete] -=== Reduce vector memory foot-print - -The default <> is `float`. But this can be -automatically quantized during index time through <>. Quantization will -reduce the required memory by 4x, but it will also reduce the precision of the vectors. For `float` vectors with -`dim` greater than or equal to `384`, using a <> index is highly recommended. - [discrete] === Exclude vector fields from `_source` @@ -99,6 +52,37 @@ downsides of omitting fields from `_source`. Another option is to use <> if all your index fields support it. +[discrete] +=== Ensure data nodes have enough memory + +{es} uses the https://arxiv.org/abs/1603.09320[HNSW] algorithm for approximate +kNN search. HNSW is a graph-based algorithm which only works efficiently when +most vector data is held in memory. You should ensure that data nodes have at +least enough RAM to hold the vector data and index structures. To check the +size of the vector data, you can use the <> API. As a +loose rule of thumb, and assuming the default HNSW options, the bytes used will +be `num_vectors * 4 * (num_dimensions + 12)`. When using the `byte` <> +the space required will be closer to `num_vectors * (num_dimensions + 12)`. Note that +the required RAM is for the filesystem cache, which is separate from the Java +heap. + +The data nodes should also leave a buffer for other ways that RAM is needed. +For example your index might also include text fields and numerics, which also +benefit from using filesystem cache. It's recommended to run benchmarks with +your specific dataset to ensure there's a sufficient amount of memory to give +good search performance. +You can find https://elasticsearch-benchmarks.elastic.co/#tracks/so_vector[here] +and https://elasticsearch-benchmarks.elastic.co/#tracks/dense_vector[here] some examples +of datasets and configurations that we use for our nightly benchmarks. + +[discrete] +include::search-speed.asciidoc[tag=warm-fs-cache] + +The following file extensions are used for the approximate kNN search: + +* `vec` and `veq` for vector values +* `vex` for HNSW graph +* `vem`, `vemf`, and `vemq` for metadata [discrete] === Reduce the number of index segments diff --git a/docs/reference/images/ingest/document-enrichment-add-inference-pipeline.png b/docs/reference/images/ingest/document-enrichment-add-inference-pipeline.png index ddcf42e24ab83..336228fc0aef0 100644 Binary files a/docs/reference/images/ingest/document-enrichment-add-inference-pipeline.png and b/docs/reference/images/ingest/document-enrichment-add-inference-pipeline.png differ diff --git a/docs/reference/indices/get-data-stream.asciidoc b/docs/reference/indices/get-data-stream.asciidoc index 7701aa9f64cfe..d35e7a3d5e2ee 100644 --- a/docs/reference/indices/get-data-stream.asciidoc +++ b/docs/reference/indices/get-data-stream.asciidoc @@ -262,6 +262,11 @@ The conditions which will trigger the rollover of a backing index as configured `cluster.lifecycle.default.rollover`. This property is an implementation detail and it will only be retrieved when the query param `include_defaults` is set to `true`. The contents of this field are subject to change. ===== + +`rollover_on_write`:: +(Boolean) +If `true`, the next write to this data stream will trigger a rollover first and the document will be +indexed in the new backing index. If the rollover fails the indexing request will fail too. ==== [[get-data-stream-api-example]] @@ -311,7 +316,8 @@ The API returns the following response: "hidden": false, "system": false, "allow_custom_routing": false, - "replicated": false + "replicated": false, + "rollover_on_write": false }, { "name": "my-data-stream-two", @@ -339,7 +345,8 @@ The API returns the following response: "hidden": false, "system": false, "allow_custom_routing": false, - "replicated": false + "replicated": false, + "rollover_on_write": false } ] } diff --git a/docs/reference/indices/rollover-index.asciidoc b/docs/reference/indices/rollover-index.asciidoc index 5a893da552bb6..aa6f978b237d7 100644 --- a/docs/reference/indices/rollover-index.asciidoc +++ b/docs/reference/indices/rollover-index.asciidoc @@ -114,6 +114,11 @@ include::{es-repo-dir}/indices/create-index.asciidoc[tag=index-name-reqs] If `true`, checks whether the current index satisfies the specified `conditions` but does not perform a rollover. Defaults to `false`. +`lazy`:: +(Optional, Boolean) +If `true`, signals that the data stream will be rolled over when the next +indexing operation occurs. Applies only to data streams. Defaults to `false`. + include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=wait_for_active_shards] include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms] @@ -204,6 +209,11 @@ conditions were specified, this is an empty object. index met the condition. ==== +`lazy`:: +(Boolean) +If `true`, {es} did not perform the rollover, but successfully marked the data stream to be rolled +over at the next indexing event. + [[rollover-index-api-example]] ==== {api-examples-title} @@ -218,6 +228,17 @@ POST my-data-stream/_rollover ---- // TEST[setup:my_data_stream] +The following request rolls over a data stream lazily, meaning that the data stream +will roll over at the next indexing event. This ensures that mapping and setting changes +will be applied to the coming data, but it will avoid creating extra backing indices for +data streams with slow ingestion. + +[source,console] +---- +POST my-data-stream/_rollover?lazy +---- +// TEST[continued] + :target: data stream :index: write index @@ -257,6 +278,7 @@ The API returns: "new_index": ".ds-my-data-stream-2099.05.07-000002", "rolled_over": true, "dry_run": false, + "lazy": false, "conditions": { "[max_age: 7d]": false, "[max_docs: 1000]": true, @@ -328,6 +350,7 @@ The API returns: "new_index": "my-index-2099.05.07-000002", "rolled_over": true, "dry_run": false, + "lazy": false, "conditions": { "[max_age: 7d]": false, "[max_docs: 1000]": true, @@ -399,6 +422,7 @@ The API returns: "new_index": "my-index-2099.05.07-000002", "rolled_over": true, "dry_run": false, + "lazy": false, "conditions": { "[max_age: 7d]": false, "[max_docs: 1000]": true, diff --git a/docs/reference/inference/delete-inference.asciidoc b/docs/reference/inference/delete-inference.asciidoc index c9c3e16458618..692a96212f5ca 100644 --- a/docs/reference/inference/delete-inference.asciidoc +++ b/docs/reference/inference/delete-inference.asciidoc @@ -6,6 +6,11 @@ experimental[] Deletes an {infer} model deployment. +IMPORTANT: The {infer} APIs enable you to use certain services, such as ELSER, +OpenAI, or Hugging Face, in your cluster. This is not the same feature that you +can use on an ML node with custom {ml} models. If you want to train and use your +own model, use the <>. + [discrete] [[delete-inference-api-request]] diff --git a/docs/reference/inference/get-inference.asciidoc b/docs/reference/inference/get-inference.asciidoc index b81f2663ec9e1..45f4cb67e7674 100644 --- a/docs/reference/inference/get-inference.asciidoc +++ b/docs/reference/inference/get-inference.asciidoc @@ -6,6 +6,12 @@ experimental[] Retrieves {infer} model information. +IMPORTANT: The {infer} APIs enable you to use certain services, such as ELSER, +OpenAI, or Hugging Face, in your cluster. This is not the same feature that you +can use on an ML node with custom {ml} models. If you want to train and use your +own model, use the <>. + + [discrete] [[get-inference-api-request]] ==== {api-request-title} diff --git a/docs/reference/inference/inference-apis.asciidoc b/docs/reference/inference/inference-apis.asciidoc index 0476ac57287d9..cdc6bfe254ea2 100644 --- a/docs/reference/inference/inference-apis.asciidoc +++ b/docs/reference/inference/inference-apis.asciidoc @@ -4,6 +4,11 @@ experimental[] +IMPORTANT: The {infer} APIs enable you to use certain services, such as ELSER, +OpenAI, or Hugging Face, in your cluster. This is not the same feature that you +can use on an ML node with custom {ml} models. If you want to train and use your +own model, use the <>. + You can use the following APIs to manage {infer} models and perform {infer}: * <> diff --git a/docs/reference/inference/post-inference.asciidoc b/docs/reference/inference/post-inference.asciidoc index f8515a8b33c39..9ef633160f162 100644 --- a/docs/reference/inference/post-inference.asciidoc +++ b/docs/reference/inference/post-inference.asciidoc @@ -6,6 +6,11 @@ experimental[] Performs an inference task on an input text by using an {infer} model. +IMPORTANT: The {infer} APIs enable you to use certain services, such as ELSER, +OpenAI, or Hugging Face, in your cluster. This is not the same feature that you +can use on an ML node with custom {ml} models. If you want to train and use your +own model, use the <>. + [discrete] [[post-inference-api-request]] diff --git a/docs/reference/inference/put-inference.asciidoc b/docs/reference/inference/put-inference.asciidoc index 9f0539fb551cb..5d517d313b9ea 100644 --- a/docs/reference/inference/put-inference.asciidoc +++ b/docs/reference/inference/put-inference.asciidoc @@ -6,6 +6,11 @@ experimental[] Creates a model to perform an {infer} task. +IMPORTANT: The {infer} APIs enable you to use certain services, such as ELSER, +OpenAI, or Hugging Face, in your cluster. This is not the same feature that you +can use on an ML node with custom {ml} models. If you want to train and use your +own model, use the <>. + [discrete] [[put-inference-api-request]] @@ -27,6 +32,10 @@ Creates a model to perform an {infer} task. The create {infer} API enables you to create and configure an {infer} model to perform a specific {infer} task. +The following services are available through the {infer} API: +* ELSER +* OpenAI +* Hugging Face [discrete] [[put-inference-api-path-params]] @@ -52,8 +61,9 @@ The type of the {infer} task that the model will perform. Available task types: (Required, string) The type of service supported for the specified task type. Available services: -* `elser`, -* `openai`. +* `elser`: specify the `sparse_embedding` task type to use the ELSER service. +* `openai`: specify the `text_embedding` task type to use the OpenAI service. +* `hugging_face`: specify the `text_embedding` task type to use the Hugging Face service. `service_settings`:: (Required, object) diff --git a/docs/reference/ingest/search-inference-processing.asciidoc b/docs/reference/ingest/search-inference-processing.asciidoc index 48505ab314c1e..006cc96294477 100644 --- a/docs/reference/ingest/search-inference-processing.asciidoc +++ b/docs/reference/ingest/search-inference-processing.asciidoc @@ -102,10 +102,13 @@ Here, you'll be able to: 1. Choose a name for your pipeline. - This name will need to be unique across the whole deployment. If you want this pipeline to be index-specific, we recommend including the name of your index in the pipeline name. + - If you do not set the pipeline name, a default unique name will be provided upon selecting a trained model. 2. Select the ML trained model you want to use. + - The model must be deployed before you can select it. + To begin deployment of a model, click the *Deploy* button. 3. Select one or more source fields as input for the inference processor. - If there are no source fields available, your index will need a <>. -4. (Optional) Choose a name for your target field. +4. (Optional) Choose a name for your target field(s). This is where the output of the inference model will be stored. Changing the default name is only possible if you have a single source field selected. 5. Add the source-target field mapping to the configuration by clicking the *Add* button. 6. Repeat steps 3-5 for each field mapping you want to add. @@ -123,51 +126,12 @@ These pipelines can also be viewed, edited, and deleted in Kibana via *Stack Man You may also use the <>. If you delete any of these pipelines outside of the *Content* UI in Kibana, make sure to edit the ML inference pipelines that reference them. -[discrete#ingest-pipeline-search-inference-update-mapping] -==== Update mappings to use ML inference pipelines - -After setting up an ML inference pipeline or attaching an existing one, it may be necessary to manually create the field mappings in order to support the referenced trained ML model's output. -This needs to happen before the pipeline is first used to index some documents, otherwise the model output fields could be inferred with the wrong type. - -[NOTE] -==== -This doesn't apply when you're creating a pipeline with the ELSER model, for which the index mappings are automatically updated in the process. -==== - -The required field name and type depends on the configuration of the pipeline and the trained model it uses. -For example, if you configure a `text_embedding` model, select `summary` as a source field, and `ml.inference.summary` as the target field, the inference output will be stored in `ml.inference..predicted_value` as a <> type. -In order to support semantic search on this field, it must be added to the mapping: - -[source,console] ----- -PUT my-index-0001/_mapping -{ - "properties": { - "ml.inference.summary.predicted_value": { <1> - "type": "dense_vector", <2> - "dims": 768, <3> - "index": true, - "similarity": "dot_product" - } - } -} ----- -// NOTCONSOLE -// TEST[skip:TODO] - -<1> The output of the ML model is stored in the configured target field suffixed with `predicted_value`. -<2> Choose a field type that is compatible with the inference output and supports your search use cases. -<3> Set additional properties as necessary. - -[TIP] -==== -You can check the shape of the generated output before indexing any documents while creating the ML inference pipeline under the *Test* tab. -Simply provide a sample document, click *Simulate*, and look for the `ml.inference` object in the results. -==== - [discrete#ingest-pipeline-search-inference-test-inference-pipeline] ==== Test your ML inference pipeline +You can verify the expected structure of the inference output before indexing any documents while creating the {ml} inference pipeline under the *Test* tab. +Provide a sample document, click *Simulate*, and look for the `ml.inference` object in the results. + To ensure the ML inference pipeline will be run when ingesting documents, you must make sure the documents you are ingesting have a field named `_run_ml_inference` that is set to `true` and you must set the pipeline to `{index_name}`. For connector and crawler indices, this will happen automatically if you've configured the settings appropriately for the pipeline name `{index_name}`. To manage these settings: diff --git a/docs/reference/query-dsl/exists-query.asciidoc b/docs/reference/query-dsl/exists-query.asciidoc index 75d1b07ea3851..9a9f642daa3f4 100644 --- a/docs/reference/query-dsl/exists-query.asciidoc +++ b/docs/reference/query-dsl/exists-query.asciidoc @@ -9,7 +9,7 @@ Returns documents that contain an indexed value for a field. An indexed value may not exist for a document's field due to a variety of reasons: * The field in the source JSON is `null` or `[]` -* The field has `"index" : false` set in the mapping +* The field has `"index" : false` and `"doc_values" : false` set in the mapping * The length of the field value exceeded an `ignore_above` setting in the mapping * The field value was malformed and `ignore_malformed` was defined in the mapping diff --git a/docs/reference/query-dsl/span-containing-query.asciidoc b/docs/reference/query-dsl/span-containing-query.asciidoc index ec1c0bdf0a8d6..8a8eeba12a7b2 100644 --- a/docs/reference/query-dsl/span-containing-query.asciidoc +++ b/docs/reference/query-dsl/span-containing-query.asciidoc @@ -4,8 +4,7 @@ Span containing ++++ -Returns matches which enclose another span query. The span containing -query maps to Lucene `SpanContainingQuery`. Here is an example: +Returns matches which enclose another span query. Here is an example: [source,console] -------------------------------------------------- diff --git a/docs/reference/query-dsl/span-field-masking-query.asciidoc b/docs/reference/query-dsl/span-field-masking-query.asciidoc index 3a869f64b45f3..b0a9a0a1d6207 100644 --- a/docs/reference/query-dsl/span-field-masking-query.asciidoc +++ b/docs/reference/query-dsl/span-field-masking-query.asciidoc @@ -4,11 +4,11 @@ Span field masking ++++ -Wrapper to allow span queries to participate in composite single-field span queries by 'lying' about their search field. The span field masking query maps to Lucene's `SpanFieldMaskingQuery` +Wrapper to allow span queries to participate in composite single-field span queries by 'lying' about their search field. This can be used to support queries like `span-near` or `span-or` across different fields, which is not ordinarily permitted. -Span field masking query is invaluable in conjunction with *multi-fields* when same content is indexed with multiple analyzers. For instance we could index a field with the standard analyzer which breaks text up into words, and again with the english analyzer which stems words into their root form. +Span field masking query is invaluable in conjunction with *multi-fields* when same content is indexed with multiple analyzers. For instance, we could index a field with the standard analyzer which breaks text up into words, and again with the english analyzer which stems words into their root form. Example: @@ -28,18 +28,33 @@ GET /_search "span_field_masking": { "query": { "span_term": { - "text.stems": "fox" + "text.stems": "fox" <1> } }, - "field": "text" + "field": "text" <2> } } ], "slop": 5, "in_order": false } + }, + "highlight": { + "require_field_match" : false, <3> + "fields": { + "*": {} + } } } -------------------------------------------------- +<1> Original field on which we do the search +<2> Masked field, which we are masking with the original field +<3> Use "require_field_match" : false to highlight the masked field + +Note: `span_field_masking` query may have unexpected scoring and highlighting +behaviour. This is because the query returns and highlights the masked field, +but scoring and highlighting are done using the terms statistics and offsets +of the original field. -Note: as span field masking query returns the masked field, scoring will be done using the norms of the field name supplied. This may lead to unexpected scoring behaviour. +Note: For highlighting to work the parameter: `require_field_match` should +be set to `false` on the highlighter. diff --git a/docs/reference/query-dsl/span-first-query.asciidoc b/docs/reference/query-dsl/span-first-query.asciidoc index 77e3f557fd982..0b6d4ef80adfb 100644 --- a/docs/reference/query-dsl/span-first-query.asciidoc +++ b/docs/reference/query-dsl/span-first-query.asciidoc @@ -4,8 +4,7 @@ Span first ++++ -Matches spans near the beginning of a field. The span first query maps -to Lucene `SpanFirstQuery`. Here is an example: +Matches spans near the beginning of a field. Here is an example: [source,console] -------------------------------------------------- @@ -19,7 +18,7 @@ GET /_search "end": 3 } } -} +} -------------------------------------------------- The `match` clause can be any other span type query. The `end` controls diff --git a/docs/reference/query-dsl/span-near-query.asciidoc b/docs/reference/query-dsl/span-near-query.asciidoc index 0a1aa7082fbb2..1c68cfa12f72c 100644 --- a/docs/reference/query-dsl/span-near-query.asciidoc +++ b/docs/reference/query-dsl/span-near-query.asciidoc @@ -6,8 +6,7 @@ Matches spans which are near one another. One can specify _slop_, the maximum number of intervening unmatched positions, as well as whether -matches are required to be in-order. The span near query maps to Lucene -`SpanNearQuery`. Here is an example: +matches are required to be in-order. Here is an example: [source,console] -------------------------------------------------- diff --git a/docs/reference/query-dsl/span-not-query.asciidoc b/docs/reference/query-dsl/span-not-query.asciidoc index 99814eba9d88a..c1ddf00a7a939 100644 --- a/docs/reference/query-dsl/span-not-query.asciidoc +++ b/docs/reference/query-dsl/span-not-query.asciidoc @@ -6,8 +6,8 @@ Removes matches which overlap with another span query or which are within x tokens before (controlled by the parameter `pre`) or y tokens -after (controlled by the parameter `post`) another SpanQuery. The span not -query maps to Lucene `SpanNotQuery`. Here is an example: +after (controlled by the parameter `post`) another SpanQuery. +Here is an example: [source,console] -------------------------------------------------- diff --git a/docs/reference/query-dsl/span-or-query.asciidoc b/docs/reference/query-dsl/span-or-query.asciidoc index 6c0e78ab266d9..4ab12073c5d2c 100644 --- a/docs/reference/query-dsl/span-or-query.asciidoc +++ b/docs/reference/query-dsl/span-or-query.asciidoc @@ -4,8 +4,7 @@ Span or ++++ -Matches the union of its span clauses. The span or query maps to Lucene -`SpanOrQuery`. Here is an example: +Matches the union of its span clauses. Here is an example: [source,console] -------------------------------------------------- diff --git a/docs/reference/query-dsl/span-term-query.asciidoc b/docs/reference/query-dsl/span-term-query.asciidoc index 0dac73c9f7019..8e5e49d14e452 100644 --- a/docs/reference/query-dsl/span-term-query.asciidoc +++ b/docs/reference/query-dsl/span-term-query.asciidoc @@ -4,8 +4,7 @@ Span term ++++ -Matches spans containing a term. The span term query maps to Lucene -`SpanTermQuery`. Here is an example: +Matches spans containing a term. Here is an example: [source,console] -------------------------------------------------- @@ -14,7 +13,7 @@ GET /_search "query": { "span_term" : { "user.id" : "kimchy" } } -} +} -------------------------------------------------- A boost can also be associated with the query: @@ -26,7 +25,7 @@ GET /_search "query": { "span_term" : { "user.id" : { "value" : "kimchy", "boost" : 2.0 } } } -} +} -------------------------------------------------- Or : @@ -38,5 +37,5 @@ GET /_search "query": { "span_term" : { "user.id" : { "term" : "kimchy", "boost" : 2.0 } } } -} +} -------------------------------------------------- diff --git a/docs/reference/query-dsl/span-within-query.asciidoc b/docs/reference/query-dsl/span-within-query.asciidoc index 62a12fc719613..0592e83117014 100644 --- a/docs/reference/query-dsl/span-within-query.asciidoc +++ b/docs/reference/query-dsl/span-within-query.asciidoc @@ -4,8 +4,8 @@ Span within ++++ -Returns matches which are enclosed inside another span query. The span within -query maps to Lucene `SpanWithinQuery`. Here is an example: +Returns matches which are enclosed inside another span query. +Here is an example: [source,console] -------------------------------------------------- diff --git a/docs/reference/query-dsl/special-queries.asciidoc b/docs/reference/query-dsl/special-queries.asciidoc index d46377f698359..8fb23ca4dbb64 100644 --- a/docs/reference/query-dsl/special-queries.asciidoc +++ b/docs/reference/query-dsl/special-queries.asciidoc @@ -60,3 +60,5 @@ include::wrapper-query.asciidoc[] include::pinned-query.asciidoc[] include::rule-query.asciidoc[] + +include::weighted-tokens-query.asciidoc[] diff --git a/docs/reference/query-dsl/text-expansion-query.asciidoc b/docs/reference/query-dsl/text-expansion-query.asciidoc index 46a9aafdd1af8..cb0a7c6ea9c01 100644 --- a/docs/reference/query-dsl/text-expansion-query.asciidoc +++ b/docs/reference/query-dsl/text-expansion-query.asciidoc @@ -62,7 +62,7 @@ Default: Disabled. Parameters for `` are: `tokens_freq_ratio_threshold`:: -(Optional, float) +(Optional, integer) preview:[] Tokens whose frequency is more than `tokens_freq_ratio_threshold` times the average frequency of all tokens in the specified field are considered outliers and pruned. This value must between 1 and 100. @@ -110,29 +110,96 @@ GET my-index/_search ---- // TEST[skip: TBD] -[discrete] -[[text-expansion-query-with-pruning-config-example]] -=== Example ELSER query with pruning configuration +Multiple `text_expansion` queries can be combined with each other or other query types. +This can be achieved by wrapping them in <> and using linear boosting: -The following is an extension to the above example that adds a preview:[] pruning configuration to the `text_expansion` query. -The pruning configuration identifies non-significant tokens to prune from the query in order to improve query performance. [source,console] ---- GET my-index/_search { - "query":{ - "text_expansion":{ - "ml.tokens":{ - "model_id":".elser_model_2", - "model_text":"How is the weather in Jamaica?" - }, - "pruning_config": { - "tokens_freq_ratio_threshold": 5, - "tokens_weight_threshold": 0.4, - "only_score_pruned_tokens": false - } + "query": { + "bool": { + "should": [ + { + "text_expansion": { + "ml.inference.title_expanded.predicted_value": { + "model_id": ".elser_model_2", + "model_text": "How is the weather in Jamaica?", + "boost": 1 + } + } + }, + { + "text_expansion": { + "ml.inference.description_expanded.predicted_value": { + "model_id": ".elser_model_2", + "model_text": "How is the weather in Jamaica?", + "boost": 1 + } + } + }, + { + "multi_match": { + "query": "How is the weather in Jamaica?", + "fields": [ + "title", + "description" + ], + "boost": 4 + } + } + ] + } + } +} +---- +// TEST[skip: TBD] + +This can also be achieved by using sub searches combined with <>. + +[source,console] +---- +GET my-index/_search +{ + "sub_searches": [ + { + "query": { + "multi_match": { + "query": "How is the weather in Jamaica?", + "fields": [ + "title", + "description" + ] + } } - } + }, + { + "query": { + "text_expansion": { + "ml.inference.title_expanded.predicted_value": { + "model_id": ".elser_model_2", + "model_text": "How is the weather in Jamaica?" + } + } + } + }, + { + "query": { + "text_expansion": { + "ml.inference.description_expanded.predicted_value": { + "model_id": ".elser_model_2", + "model_text": "How is the weather in Jamaica?" + } + } + } + } + ], + "rank": { + "rrf": { + "window_size": 10, + "rank_constant": 20 + } + } } ---- // TEST[skip: TBD] @@ -141,9 +208,13 @@ GET my-index/_search [[text-expansion-query-with-pruning-config-and-rescore-example]] === Example ELSER query with pruning configuration and rescore -The following is an extension to the above example that adds a <> function on top of the preview:[] pruning configuration to the `text_expansion` query. +The following is an extension to the above example that adds a preview:[] pruning configuration to the `text_expansion` query. The pruning configuration identifies non-significant tokens to prune from the query in order to improve query performance. -Rescoring the query with the tokens that were originally pruned from the query may improve overall search relevance when using this pruning strategy. + +Token pruning happens at the shard level. +While this should result in the same tokens being labeled as insignificant across shards, this is not guaranteed based on the composition of each shard. +Therefore, if you are running `text_expansion` with a `pruning_config` on a multi-shard index, we strongly recommend adding a <> function with the tokens that were originally pruned from the query. +This will help mitigate any shard-level inconsistency with pruned tokens and provide better relevance overall. [source,console] ---- @@ -188,30 +259,3 @@ GET my-index/_search ==== Depending on your data, the text expansion query may be faster with `track_total_hits: false`. ==== - -[discrete] -[[weighted-tokens-query-example]] -=== Example Weighted token query - -In order to quickly iterate during tests, we exposed a new preview:[] `weighted_tokens` query for evaluation of tokenized datasets. -While this is not a query that is intended for production use, it can be used to quickly evaluate relevance using various pruning configurations. - -[source,console] ----- -POST /docs/_search -{ - "query": { - "weighted_tokens": { - "query_expansion": { - "tokens": {"2161": 0.4679, "2621": 0.307, "2782": 0.1299, "2851": 0.1056, "3088": 0.3041, "3376": 0.1038, "3467": 0.4873, "3684": 0.8958, "4380": 0.334, "4542": 0.4636, "4633": 2.2805, "4785": 1.2628, "4860": 1.0655, "5133": 1.0709, "7139": 1.0016, "7224": 0.2486, "7387": 0.0985, "7394": 0.0542, "8915": 0.369, "9156": 2.8947, "10505": 0.2771, "11464": 0.3996, "13525": 0.0088, "14178": 0.8161, "16893": 0.1376, "17851": 1.5348, "19939": 0.6012}, - "pruning_config": { - "tokens_freq_ratio_threshold": 5, - "tokens_weight_threshold": 0.4, - "only_score_pruned_tokens": false - } - } - } - } -} ----- -//TEST[skip: TBD] diff --git a/docs/reference/query-dsl/weighted-tokens-query.asciidoc b/docs/reference/query-dsl/weighted-tokens-query.asciidoc new file mode 100644 index 0000000000000..cbd88eb3290dc --- /dev/null +++ b/docs/reference/query-dsl/weighted-tokens-query.asciidoc @@ -0,0 +1,122 @@ +[[query-dsl-weighted-tokens-query]] +=== Weighted tokens query +++++ +Weighted tokens +++++ + +preview::[] + +The weighted tokens query requires a list of token-weight pairs that are sent in with a query rather than calculated using a {nlp} model. +These token pairs are then used in a query against a <> or <> field. + +Weighted tokens queries are useful when you want to use an external query expansion model, or quickly prototype changes without reindexing a new model. + +[discrete] +[[weighted-tokens-query-ex-request]] +==== Example request + +[source,console] +---- +POST _search +{ + "query": { + "weighted_tokens": { + "query_expansion_field": { + "tokens": {"2161": 0.4679, "2621": 0.307, "2782": 0.1299, "2851": 0.1056, "3088": 0.3041, "3376": 0.1038, "3467": 0.4873, "3684": 0.8958, "4380": 0.334, "4542": 0.4636, "4633": 2.2805, "4785": 1.2628, "4860": 1.0655, "5133": 1.0709, "7139": 1.0016, "7224": 0.2486, "7387": 0.0985, "7394": 0.0542, "8915": 0.369, "9156": 2.8947, "10505": 0.2771, "11464": 0.3996, "13525": 0.0088, "14178": 0.8161, "16893": 0.1376, "17851": 1.5348, "19939": 0.6012}, + "pruning_config": { + "tokens_freq_ratio_threshold": 5, + "tokens_weight_threshold": 0.4, + "only_score_pruned_tokens": false + } + } + } + } +} +---- +// TEST[skip: TBD] + +[discrete] +[[weighted-token-query-params]] +=== Top level parameters for `weighted_token` + +``::: +(Required, dictionary) +A dictionary of token-weight pairs. + +`pruning_config` :::: +(Optional, object) +Optional pruning configuration. If enabled, this will omit non-significant tokens from the query in order to improve query performance. +Default: Disabled. ++ +-- +Parameters for `` are: + +`tokens_freq_ratio_threshold`:: +(Optional, integer) +Tokens whose frequency is more than `tokens_freq_ratio_threshold` times the average frequency of all tokens in the specified field are considered outliers and pruned. +This value must between 1 and 100. +Default: `5`. + +`tokens_weight_threshold`:: +(Optional, float) +Tokens whose weight is less than `tokens_weight_threshold` are considered nonsignificant and pruned. +This value must be between 0 and 1. +Default: `0.4`. + +`only_score_pruned_tokens`:: +(Optional, boolean) +If `true` we only input pruned tokens into scoring, and discard non-pruned tokens. +It is strongly recommended to set this to `false` for the main query, but this can be set to `true` for a rescore query to get more relevant results. +Default: `false`. + +NOTE: The default values for `tokens_freq_ratio_threshold` and `tokens_weight_threshold` were chosen based on tests using ELSER that provided the most optimal results. +-- + +[discrete] +[[weighted-tokens-query-with-pruning-config-and-rescore-example]] +==== Example weighted tokens query with pruning configuration and rescore + +The following example adds a pruning configuration to the `text_expansion` query. +The pruning configuration identifies non-significant tokens to prune from the query in order to improve query performance. + +Token pruning happens at the shard level. +While this should result in the same tokens being labeled as insignificant across shards, this is not guaranteed based on the composition of each shard. +Therefore, if you are running `text_expansion` with a `pruning_config` on a multi-shard index, we strongly recommend adding a <> function with the tokens that were originally pruned from the query. +This will help mitigate any shard-level inconsistency with pruned tokens and provide better relevance overall. + +[source,console] +---- +GET my-index/_search +{ + "query":{ + "weighted_tokens": { + "query_expansion_field": { + "tokens": {"2161": 0.4679, "2621": 0.307, "2782": 0.1299, "2851": 0.1056, "3088": 0.3041, "3376": 0.1038, "3467": 0.4873, "3684": 0.8958, "4380": 0.334, "4542": 0.4636, "4633": 2.2805, "4785": 1.2628, "4860": 1.0655, "5133": 1.0709, "7139": 1.0016, "7224": 0.2486, "7387": 0.0985, "7394": 0.0542, "8915": 0.369, "9156": 2.8947, "10505": 0.2771, "11464": 0.3996, "13525": 0.0088, "14178": 0.8161, "16893": 0.1376, "17851": 1.5348, "19939": 0.6012}, + "pruning_config": { + "tokens_freq_ratio_threshold": 5, + "tokens_weight_threshold": 0.4, + "only_score_pruned_tokens": false + } + } + } + }, + "rescore": { + "window_size": 100, + "query": { + "rescore_query": { + "weighted_tokens": { + "query_expansion_field": { + "tokens": {"2161": 0.4679, "2621": 0.307, "2782": 0.1299, "2851": 0.1056, "3088": 0.3041, "3376": 0.1038, "3467": 0.4873, "3684": 0.8958, "4380": 0.334, "4542": 0.4636, "4633": 2.2805, "4785": 1.2628, "4860": 1.0655, "5133": 1.0709, "7139": 1.0016, "7224": 0.2486, "7387": 0.0985, "7394": 0.0542, "8915": 0.369, "9156": 2.8947, "10505": 0.2771, "11464": 0.3996, "13525": 0.0088, "14178": 0.8161, "16893": 0.1376, "17851": 1.5348, "19939": 0.6012}, + "pruning_config": { + "tokens_freq_ratio_threshold": 5, + "tokens_weight_threshold": 0.4, + "only_score_pruned_tokens": true + } + } + } + } + } + } +} +---- +//TEST[skip: TBD] diff --git a/docs/reference/release-notes.asciidoc b/docs/reference/release-notes.asciidoc index 340ef3a5c57c4..068cb3d2f127b 100644 --- a/docs/reference/release-notes.asciidoc +++ b/docs/reference/release-notes.asciidoc @@ -8,6 +8,7 @@ This section summarizes the changes in each release. * <> * <> +* <> * <> * <> * <> @@ -59,6 +60,7 @@ This section summarizes the changes in each release. include::release-notes/8.13.0.asciidoc[] include::release-notes/8.12.0.asciidoc[] +include::release-notes/8.11.3.asciidoc[] include::release-notes/8.11.2.asciidoc[] include::release-notes/8.11.1.asciidoc[] include::release-notes/8.11.0.asciidoc[] diff --git a/docs/reference/release-notes/8.11.3.asciidoc b/docs/reference/release-notes/8.11.3.asciidoc index 48ab82d0d4391..ddeb50dad1f75 100644 --- a/docs/reference/release-notes/8.11.3.asciidoc +++ b/docs/reference/release-notes/8.11.3.asciidoc @@ -1,6 +1,8 @@ [[release-notes-8.11.3]] == {es} version 8.11.3 +coming[8.11.3] + Also see <>. [[bug-8.11.3]] diff --git a/docs/reference/rest-api/index.asciidoc b/docs/reference/rest-api/index.asciidoc index 59d96d1a26904..7757e7c2f7926 100644 --- a/docs/reference/rest-api/index.asciidoc +++ b/docs/reference/rest-api/index.asciidoc @@ -24,7 +24,7 @@ not be included yet. * <> * <> * <> -* <> +* <> * <> * <> * <> @@ -72,7 +72,7 @@ include::{es-repo-dir}/data-streams/data-stream-apis.asciidoc[] include::{es-repo-dir}/docs.asciidoc[] include::{es-repo-dir}/ingest/apis/enrich/index.asciidoc[] include::{es-repo-dir}/eql/eql-apis.asciidoc[] -include::{es-repo-dir}/esql/esql-query-api.asciidoc[] +include::{es-repo-dir}/esql/esql-apis.asciidoc[] include::{es-repo-dir}/features/apis/features-apis.asciidoc[] include::{es-repo-dir}/fleet/index.asciidoc[] include::{es-repo-dir}/text-structure/apis/find-structure.asciidoc[leveloffset=+1] diff --git a/docs/reference/rest-api/security/bulk-update-api-keys.asciidoc b/docs/reference/rest-api/security/bulk-update-api-keys.asciidoc index 2e7034caaae89..faf87c67d1ccc 100644 --- a/docs/reference/rest-api/security/bulk-update-api-keys.asciidoc +++ b/docs/reference/rest-api/security/bulk-update-api-keys.asciidoc @@ -30,7 +30,7 @@ This operation can greatly improve performance over making individual updates. It's not possible to update expired or <> API keys. -This API supports updates to API key access scope and metadata. +This API supports updates to API key access scope, metadata and expiration. The access scope of each API key is derived from the <> you specify in the request, and a snapshot of the owner user's permissions at the time of the request. The snapshot of the owner's permissions is updated automatically on every call. @@ -63,6 +63,9 @@ The structure of a role descriptor is the same as the request for the <>. -This API supports updates to an API key's access scope and metadata. +This API supports updates to an API key's access scope, metadata and expiration. The access scope of an API key is derived from the <> you specify in the request, and a snapshot of the owner user's permissions at the time of the request. The snapshot of the owner's permissions is updated automatically on every call. @@ -67,6 +67,9 @@ It supports nested data structure. Within the `metadata` object, top-level keys beginning with `_` are reserved for system usage. When specified, this fully replaces metadata previously associated with the API key. +`expiration`:: +(Optional, string) Expiration time for the API key. By default, API keys never expire. Can be omitted to leave unchanged. + [[security-api-update-api-key-response-body]] ==== {api-response-body-title} diff --git a/docs/reference/rest-api/security/update-cross-cluster-api-key.asciidoc b/docs/reference/rest-api/security/update-cross-cluster-api-key.asciidoc index f0dfb11f1c98b..c22a1347c8262 100644 --- a/docs/reference/rest-api/security/update-cross-cluster-api-key.asciidoc +++ b/docs/reference/rest-api/security/update-cross-cluster-api-key.asciidoc @@ -34,7 +34,7 @@ Use this API to update cross-cluster API keys created by the <>. -This API supports updates to an API key's access scope and metadata. +This API supports updates to an API key's access scope, metadata and expiration. The owner user's information, e.g. `username`, `realm`, is also updated automatically on every call. NOTE: This API cannot update <>, which should be updated by @@ -66,6 +66,9 @@ It supports nested data structure. Within the `metadata` object, top-level keys beginning with `_` are reserved for system usage. When specified, this fully replaces metadata previously associated with the API key. +`expiration`:: +(Optional, string) Expiration time for the API key. By default, API keys never expire. Can be omitted to leave unchanged. + [[security-api-update-cross-cluster-api-key-response-body]] ==== {api-response-body-title} diff --git a/docs/reference/search/search-your-data/search-with-synonyms.asciidoc b/docs/reference/search/search-your-data/search-with-synonyms.asciidoc index fb6abd6d36099..16952f94890c7 100644 --- a/docs/reference/search/search-your-data/search-with-synonyms.asciidoc +++ b/docs/reference/search/search-your-data/search-with-synonyms.asciidoc @@ -75,6 +75,13 @@ A large number of inline synonyms increases cluster size unnecessarily and can l Once your synonyms sets are created, you can start configuring your token filters and analyzers to use them. +[WARNING] +====== +Synonyms sets must exist before they can be added to indices. +If an index is created referencing a nonexistent synonyms set, the index will remain in a partially created and inoperable state. +The only way to recover from this scenario is to ensure the synonyms set exists then either delete and re-create the index, or close and re-open the index. +====== + {es} uses synonyms as part of the <>. You can use two types of <> to include synonyms: diff --git a/docs/reference/security/authentication/configuring-pki-realm.asciidoc b/docs/reference/security/authentication/configuring-pki-realm.asciidoc index 54118027e59af..428e6f73fb239 100644 --- a/docs/reference/security/authentication/configuring-pki-realm.asciidoc +++ b/docs/reference/security/authentication/configuring-pki-realm.asciidoc @@ -6,11 +6,6 @@ the desired network layers (transport or http), and map the Distinguished Names (DNs) from the Subject field in the user certificates to roles. You create the mappings in a role mapping file or use the role mappings API. -TIP: You can use a combination of PKI and username/password authentication. For -example, you can enable SSL/TLS on the transport layer and define a PKI realm to -require transport clients to authenticate with X.509 certificates, while still -authenticating HTTP traffic using username and password credentials. - . Add a realm configuration for a `pki` realm to `elasticsearch.yml` under the `xpack.security.authc.realms.pki` namespace. You must explicitly set the `order` attribute. See <> for all of the options you can set for a @@ -42,7 +37,8 @@ realms you specify are used for authentication. If you also want to use the -- -. Optional: If you want to use something other than the CN of the Subject DN as +. Optional: The username is defined by the <>. +If you want to use something other than the CN of the Subject DN as the username, you can specify a regex to extract the desired username. The regex is applied on the Subject DN. + diff --git a/docs/reference/settings/security-settings.asciidoc b/docs/reference/settings/security-settings.asciidoc index f4875fd096b00..6a60a8a6703fe 100644 --- a/docs/reference/settings/security-settings.asciidoc +++ b/docs/reference/settings/security-settings.asciidoc @@ -71,6 +71,11 @@ the sensitive nature of the information. (<>) Enables fips mode of operation. Set this to `true` if you run this {es} instance in a FIPS 140-2 enabled JVM. For more information, see <>. Defaults to `false`. +`xpack.security.fips_mode.required_providers`:: +(<>) +Optionally enforce specific Java JCE/JSSE security providers. For example, set this to `["BCFIPS", "BCJSSE"]` (case-insensitive) to require +the Bouncy Castle FIPS JCE and JSSE security providers. Only applicable when `xpack.security.fips_mode.enabled` is set to `true`. + [discrete] [[password-hashing-settings]] ==== Password hashing settings @@ -1010,8 +1015,10 @@ the following settings: `username_pattern`:: (<>) The regular expression pattern used to extract the username from the -certificate DN. The first match group is the used as the username. -Defaults to `CN=(.*?)(?:,\|$)`. +certificate DN. The username is used for auditing and logging. The username can also be used +with the <> and <>. +The first match group is the used as the username. +Defaults to `CN=(.*?)(?:,|$)`. `certificate_authorities`:: (<>) diff --git a/docs/reference/setup/secure-settings.asciidoc b/docs/reference/setup/secure-settings.asciidoc index d51c0dd684871..22e828f96f5d2 100644 --- a/docs/reference/setup/secure-settings.asciidoc +++ b/docs/reference/setup/secure-settings.asciidoc @@ -6,11 +6,11 @@ their values is not sufficient. For this use case, {es} provides a keystore and the <> to manage the settings in the keystore. -IMPORTANT: Only some settings are designed to be read from the keystore. However, -the keystore has no validation to block unsupported settings. Adding unsupported -settings to the keystore causes {es} to fail to start. To see whether a setting -is supported in the keystore, look for a "Secure" qualifier in the setting -reference. +IMPORTANT: Only some settings are designed to be read from the keystore. +Adding unsupported settings to the keystore causes the validation in the +`_nodes/reload_secure_settings` API to fail and if not addressed, will +cause {es} to fail to start. To see whether a setting is supported in the +keystore, look for a "Secure" qualifier in the setting reference. All the modifications to the keystore take effect only after restarting {es}. @@ -42,12 +42,12 @@ POST _nodes/reload_secure_settings <1> The password that the {es} keystore is encrypted with. -This API decrypts and re-reads the entire keystore, on every cluster node, -but only the *reloadable* secure settings are applied. Changes to other -settings do not go into effect until the next restart. Once the call returns, -the reload has been completed, meaning that all internal data structures -dependent on these settings have been changed. Everything should look as if the -settings had the new value from the start. +This API decrypts, re-reads the entire keystore and validates all settings on +every cluster node, but only the *reloadable* secure settings are applied. +Changes to other settings do not go into effect until the next restart. Once +the call returns, the reload has been completed, meaning that all internal data +structures dependent on these settings have been changed. Everything should +look as if the settings had the new value from the start. When changing multiple *reloadable* secure settings, modify all of them on each cluster node, then issue a <> diff --git a/docs/reference/snapshot-restore/restore-snapshot.asciidoc b/docs/reference/snapshot-restore/restore-snapshot.asciidoc index 04f9f55ef13b4..632573de02b69 100644 --- a/docs/reference/snapshot-restore/restore-snapshot.asciidoc +++ b/docs/reference/snapshot-restore/restore-snapshot.asciidoc @@ -359,12 +359,7 @@ POST _watcher/_start ---- // TEST[continued] //// --- - -. {blank} -+ --- * Universal Profiling + Check if Universal Profiling index template management is enabled: @@ -385,22 +380,25 @@ PUT _cluster/settings } } ---- +-- -[[restore-create-file-realm-user]] -If you use {es} security features, log in to a node host, navigate to the {es} -installation directory, and add a user with the `superuser` role to the file -realm using the <> tool. +. [[restore-create-file-realm-user]]If you use {es} security features, log in to +a node host, navigate to the {es} installation directory, and add a user with +the `superuser` role to the file realm using the +<> tool. ++ For example, the following command creates a user named `restore_user`. ++ [source,sh] ---- ./bin/elasticsearch-users useradd restore_user -p my_password -r superuser ---- ++ Use this file realm user to authenticate requests until the restore operation is complete. --- . Use the <> to set <> to diff --git a/docs/reference/synonyms/apis/delete-synonym-rule.asciidoc b/docs/reference/synonyms/apis/delete-synonym-rule.asciidoc index eaff47f5d7909..74cbab8c0b4a2 100644 --- a/docs/reference/synonyms/apis/delete-synonym-rule.asciidoc +++ b/docs/reference/synonyms/apis/delete-synonym-rule.asciidoc @@ -1,8 +1,6 @@ [[delete-synonym-rule]] === Delete synonym rule -beta::[] - ++++ Delete synonym rule ++++ diff --git a/docs/reference/synonyms/apis/delete-synonyms-set.asciidoc b/docs/reference/synonyms/apis/delete-synonyms-set.asciidoc index 6ba4dcdc8f7be..9ba33ff3a5c75 100644 --- a/docs/reference/synonyms/apis/delete-synonyms-set.asciidoc +++ b/docs/reference/synonyms/apis/delete-synonyms-set.asciidoc @@ -1,8 +1,6 @@ [[delete-synonyms-set]] === Delete synonyms set -beta::[] - ++++ Delete synonyms set ++++ diff --git a/docs/reference/synonyms/apis/get-synonym-rule.asciidoc b/docs/reference/synonyms/apis/get-synonym-rule.asciidoc index 6ce978ae68ac6..c6c35e0efecca 100644 --- a/docs/reference/synonyms/apis/get-synonym-rule.asciidoc +++ b/docs/reference/synonyms/apis/get-synonym-rule.asciidoc @@ -1,8 +1,6 @@ [[get-synonym-rule]] === Get synonym rule -beta::[] - ++++ Get synonym rule ++++ diff --git a/docs/reference/synonyms/apis/get-synonyms-set.asciidoc b/docs/reference/synonyms/apis/get-synonyms-set.asciidoc index ddd7d2079dbf5..70bb5fb69526d 100644 --- a/docs/reference/synonyms/apis/get-synonyms-set.asciidoc +++ b/docs/reference/synonyms/apis/get-synonyms-set.asciidoc @@ -1,8 +1,6 @@ [[get-synonyms-set]] === Get synonyms set -beta::[] - ++++ Get synonyms set ++++ diff --git a/docs/reference/synonyms/apis/list-synonyms-sets.asciidoc b/docs/reference/synonyms/apis/list-synonyms-sets.asciidoc index 2522542886d9e..705a24c809e99 100644 --- a/docs/reference/synonyms/apis/list-synonyms-sets.asciidoc +++ b/docs/reference/synonyms/apis/list-synonyms-sets.asciidoc @@ -1,8 +1,6 @@ [[list-synonyms-sets]] === List synonyms sets -beta::[] - ++++ List synonyms sets ++++ diff --git a/docs/reference/synonyms/apis/put-synonym-rule.asciidoc b/docs/reference/synonyms/apis/put-synonym-rule.asciidoc index 95492c95d36fe..de2865632d55e 100644 --- a/docs/reference/synonyms/apis/put-synonym-rule.asciidoc +++ b/docs/reference/synonyms/apis/put-synonym-rule.asciidoc @@ -1,8 +1,6 @@ [[put-synonym-rule]] === Create or update synonym rule -beta::[] - ++++ Create or update synonym rule ++++ diff --git a/docs/reference/synonyms/apis/put-synonyms-set.asciidoc b/docs/reference/synonyms/apis/put-synonyms-set.asciidoc index a3c06c70db17b..5651c4c99adcd 100644 --- a/docs/reference/synonyms/apis/put-synonyms-set.asciidoc +++ b/docs/reference/synonyms/apis/put-synonyms-set.asciidoc @@ -1,8 +1,6 @@ [[put-synonyms-set]] === Create or update synonyms set -beta::[] - ++++ Create or update synonyms set ++++ diff --git a/docs/reference/synonyms/apis/synonyms-apis.asciidoc b/docs/reference/synonyms/apis/synonyms-apis.asciidoc index 6849477177dcf..9b92ba8e8579d 100644 --- a/docs/reference/synonyms/apis/synonyms-apis.asciidoc +++ b/docs/reference/synonyms/apis/synonyms-apis.asciidoc @@ -1,8 +1,6 @@ [[synonyms-apis]] == Synonyms APIs -beta::[] - ++++ Synonyms APIs ++++ diff --git a/docs/reference/tab-widgets/esql/esql-getting-started-enrich-policy.asciidoc b/docs/reference/tab-widgets/esql/esql-getting-started-enrich-policy.asciidoc index c51a46bdef3b3..3e9796f89e94d 100644 --- a/docs/reference/tab-widgets/esql/esql-getting-started-enrich-policy.asciidoc +++ b/docs/reference/tab-widgets/esql/esql-getting-started-enrich-policy.asciidoc @@ -58,9 +58,9 @@ DELETE /_enrich/policy/clientip_policy // tag::demo-env[] -On the demo environment at https://esql.demo.elastic.co/[esql.demo.elastic.co], +On the demo environment at https://ela.st/ql/[ela.st/ql], an enrich policy called `clientip_policy` has already been created an executed. The policy links an IP address to an environment ("Development", "QA", or -"Production") +"Production"). // end::demo-env[] diff --git a/docs/reference/tab-widgets/esql/esql-getting-started-sample-data.asciidoc b/docs/reference/tab-widgets/esql/esql-getting-started-sample-data.asciidoc index 2a899a9f1ea33..d9b08b7281f77 100644 --- a/docs/reference/tab-widgets/esql/esql-getting-started-sample-data.asciidoc +++ b/docs/reference/tab-widgets/esql/esql-getting-started-sample-data.asciidoc @@ -43,6 +43,6 @@ PUT sample_data/_bulk The data set used in this guide has been preloaded into the Elastic {esql} public demo environment. Visit -https://esql.demo.elastic.co/[esql.demo.elastic.co] to start using it. +https://ela.st/ql[ela.st/ql] to start using it. // end::demo-env[] diff --git a/docs/reference/troubleshooting/corruption-issues.asciidoc b/docs/reference/troubleshooting/corruption-issues.asciidoc index cb4cada6bddd0..4a245daba0904 100644 --- a/docs/reference/troubleshooting/corruption-issues.asciidoc +++ b/docs/reference/troubleshooting/corruption-issues.asciidoc @@ -38,13 +38,20 @@ well-tested, so you can be very confident that a checksum mismatch really does indicate that the data read from disk is different from the data that {es} previously wrote. +It is also possible that {es} reports a corruption if a file it needs is +entirely missing, with an exception such as: + +- `java.io.FileNotFoundException` +- `java.nio.file.NoSuchFileException` + The files that make up a Lucene index are written in full before they are used. If a file is needed to recover an index after a restart then your storage system previously confirmed to {es} that this file was durably synced to disk. On Linux this means that the `fsync()` system call returned successfully. {es} sometimes reports that an index is corrupt because a file needed for recovery -has been truncated or is missing its footer. This indicates that your storage -system acknowledges durable writes incorrectly. +is missing, or it exists but has been truncated or is missing its footer. This +indicates that your storage system acknowledges durable writes incorrectly or +that some external process has modified the data {es} previously wrote to disk. There are many possible explanations for {es} detecting corruption in your cluster. Databases like {es} generate a challenging I/O workload that may find diff --git a/docs/reference/watcher/actions/index.asciidoc b/docs/reference/watcher/actions/index.asciidoc index a0244778127d7..8fb6193206016 100644 --- a/docs/reference/watcher/actions/index.asciidoc +++ b/docs/reference/watcher/actions/index.asciidoc @@ -40,7 +40,7 @@ The following snippet shows a simple `index` action definition: |====== |Name |Required | Default | Description -| `index` | yes^*^ | - a| The index, alias, or data stream to index into. +| `index` | yes^*^ | - a| The index, alias, or data stream to index into. Date math expressions like `` are also supported. ^*^If you dynamically set an `_index` value, this parameter isn't required. See <>. diff --git a/docs/src/yamlRestTest/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java b/docs/src/yamlRestTest/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java index 6df51189e918e..1d6df60df0f88 100644 --- a/docs/src/yamlRestTest/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java +++ b/docs/src/yamlRestTest/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java @@ -16,7 +16,6 @@ import org.apache.http.util.EntityUtils; import org.apache.lucene.tests.util.TimeUnits; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.RestClient; import org.elasticsearch.common.settings.SecureString; @@ -48,7 +47,6 @@ import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.function.Predicate; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; @@ -98,20 +96,9 @@ protected boolean randomizeContentType() { protected ClientYamlTestClient initClientYamlTestClient( final ClientYamlSuiteRestSpec restSpec, final RestClient restClient, - final List hosts, - final Version esVersion, - final Predicate clusterFeaturesPredicate, - final String os + final List hosts ) { - return new ClientYamlDocsTestClient( - restSpec, - restClient, - hosts, - esVersion, - clusterFeaturesPredicate, - os, - this::getClientBuilderWithSniffedHosts - ); + return new ClientYamlDocsTestClient(restSpec, restClient, hosts, this::getClientBuilderWithSniffedHosts); } @Before diff --git a/gradle/build.versions.toml b/gradle/build.versions.toml index f1965fc5400ea..be69b2341d6e2 100644 --- a/gradle/build.versions.toml +++ b/gradle/build.versions.toml @@ -20,7 +20,7 @@ forbiddenApis = "de.thetaphi:forbiddenapis:3.6" gradle-enterprise = "com.gradle:gradle-enterprise-gradle-plugin:3.14.1" hamcrest = "org.hamcrest:hamcrest:2.1" httpcore = "org.apache.httpcomponents:httpcore:4.4.12" -httpclient = "org.apache.httpcomponents:httpclient:4.5.10" +httpclient = "org.apache.httpcomponents:httpclient:4.5.14" idea-ext = "gradle.plugin.org.jetbrains.gradle.plugin.idea-ext:gradle-idea-ext:1.1.4" json-schema-validator = "com.networknt:json-schema-validator:1.0.72" json-assert = "org.skyscreamer:jsonassert:1.5.0" diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index 263602c9841a8..24b81106dcea3 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -811,14 +811,14 @@ - - - + + + - - - + + + @@ -2389,9 +2389,9 @@ - - - + + + @@ -2399,24 +2399,24 @@ - - - - - + + + + + - - - + + + @@ -2429,6 +2429,11 @@ + + + + + @@ -2664,9 +2669,9 @@ - - - + + + @@ -2674,9 +2679,9 @@ - - - + + + @@ -2684,9 +2689,9 @@ - - - + + + @@ -2694,9 +2699,9 @@ - - - + + + @@ -2704,9 +2709,9 @@ - - - + + + @@ -2714,9 +2719,9 @@ - - - + + + @@ -2724,9 +2729,9 @@ - - - + + + @@ -2734,9 +2739,9 @@ - - - + + + @@ -2744,9 +2749,9 @@ - - - + + + @@ -2754,9 +2759,9 @@ - - - + + + @@ -2764,9 +2769,9 @@ - - - + + + @@ -2774,9 +2779,9 @@ - - - + + + @@ -2784,9 +2789,9 @@ - - - + + + @@ -2794,9 +2799,9 @@ - - - + + + @@ -2804,9 +2809,9 @@ - - - + + + @@ -2814,9 +2819,9 @@ - - - + + + @@ -2824,9 +2829,9 @@ - - - + + + @@ -2834,9 +2839,9 @@ - - - + + + @@ -2844,9 +2849,9 @@ - - - + + + @@ -2854,9 +2859,9 @@ - - - + + + @@ -2864,9 +2869,9 @@ - - - + + + @@ -2874,9 +2879,9 @@ - - - + + + @@ -2884,9 +2889,9 @@ - - - + + + @@ -2894,9 +2899,9 @@ - - - + + + @@ -4127,6 +4132,11 @@ + + + + + @@ -4192,6 +4202,11 @@ + + + + + diff --git a/libs/cli/build.gradle b/libs/cli/build.gradle index c12ae87ee65fe..dc045ba09e531 100644 --- a/libs/cli/build.gradle +++ b/libs/cli/build.gradle @@ -11,9 +11,12 @@ apply plugin: 'elasticsearch.publish' dependencies { api 'net.sf.jopt-simple:jopt-simple:5.0.2' api project(':libs:elasticsearch-core') + + testImplementation(project(":test:framework")) { + exclude group: 'org.elasticsearch', module: 'elasticsearch-cli' + } } -tasks.named("test").configure { enabled = false } // Since CLI does not depend on :server, it cannot run the jarHell task tasks.named("jarHell").configure { enabled = false } diff --git a/libs/cli/src/main/java/org/elasticsearch/cli/Terminal.java b/libs/cli/src/main/java/org/elasticsearch/cli/Terminal.java index 856dfc6a5a078..69cb76636a996 100644 --- a/libs/cli/src/main/java/org/elasticsearch/cli/Terminal.java +++ b/libs/cli/src/main/java/org/elasticsearch/cli/Terminal.java @@ -18,6 +18,8 @@ import java.io.OutputStream; import java.io.PrintWriter; import java.io.Reader; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; import java.nio.charset.Charset; import java.util.Arrays; import java.util.Locale; @@ -274,8 +276,8 @@ public boolean isHeadless() { } private static class ConsoleTerminal extends Terminal { - - private static final Console CONSOLE = System.console(); + private static final int JDK_VERSION_WITH_IS_TERMINAL = 22; + private static final Console CONSOLE = detectTerminal(); ConsoleTerminal() { super(CONSOLE.reader(), CONSOLE.writer(), ERROR_WRITER); @@ -285,6 +287,23 @@ static boolean isSupported() { return CONSOLE != null; } + static Console detectTerminal() { + // JDK >= 22 returns a console even if the terminal is redirected unless using -Djdk.console=java.base + // https://bugs.openjdk.org/browse/JDK-8308591 + Console console = System.console(); + if (console != null && Runtime.version().feature() >= JDK_VERSION_WITH_IS_TERMINAL) { + try { + // verify the console is a terminal using isTerminal() on JDK >= 22 + // TODO: Remove reflection once Java 22 sources are supported, e.g. using a MRJAR + Method isTerminal = Console.class.getMethod("isTerminal"); + return Boolean.TRUE.equals(isTerminal.invoke(console)) ? console : null; + } catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException e) { + throw new AssertionError(e); + } + } + return console; + } + @Override public String readText(String prompt) { return CONSOLE.readLine("%s", prompt); diff --git a/libs/cli/src/test/java/org/elasticsearch/cli/TerminalTests.java b/libs/cli/src/test/java/org/elasticsearch/cli/TerminalTests.java new file mode 100644 index 0000000000000..9c1faf911a829 --- /dev/null +++ b/libs/cli/src/test/java/org/elasticsearch/cli/TerminalTests.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cli; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.ESTestCase.WithoutSecurityManager; + +@WithoutSecurityManager +public class TerminalTests extends ESTestCase { + + public void testSystemTerminalIfRedirected() { + // Expect system terminal if redirected for tests. + // To force new behavior in JDK 22 this should run without security manager. + // Otherwise, JDK 22 doesn't provide a console if redirected. + assertEquals(Terminal.SystemTerminal.class, Terminal.DEFAULT.getClass()); + } +} diff --git a/libs/core/src/main/java/org/elasticsearch/core/RefCounted.java b/libs/core/src/main/java/org/elasticsearch/core/RefCounted.java index 1f725ac48a16f..865b1afb5d4fe 100644 --- a/libs/core/src/main/java/org/elasticsearch/core/RefCounted.java +++ b/libs/core/src/main/java/org/elasticsearch/core/RefCounted.java @@ -74,4 +74,27 @@ default void mustIncRef() { assert false : AbstractRefCounted.ALREADY_CLOSED_MESSAGE; incRef(); // throws an ISE } + + /** + * A noop implementation that always behaves as if it is referenced and cannot be released. + */ + RefCounted ALWAYS_REFERENCED = new RefCounted() { + @Override + public void incRef() {} + + @Override + public boolean tryIncRef() { + return true; + } + + @Override + public boolean decRef() { + return false; + } + + @Override + public boolean hasReferences() { + return true; + } + }; } diff --git a/libs/core/src/main/java/org/elasticsearch/core/Releasables.java b/libs/core/src/main/java/org/elasticsearch/core/Releasables.java index c2b48c4706573..0840258f0a86d 100644 --- a/libs/core/src/main/java/org/elasticsearch/core/Releasables.java +++ b/libs/core/src/main/java/org/elasticsearch/core/Releasables.java @@ -11,6 +11,7 @@ import java.io.IOException; import java.io.UncheckedIOException; import java.util.Arrays; +import java.util.Iterator; import java.util.concurrent.atomic.AtomicReference; /** Utility methods to work with {@link Releasable}s. */ @@ -103,6 +104,24 @@ public String toString() { }; } + /** + * Similar to {@link #wrap(Iterable)} except that it accepts an {@link Iterator} of releasables. The resulting resource must therefore + * only be released once. + */ + public static Releasable wrap(final Iterator releasables) { + return assertOnce(wrap(new Iterable<>() { + @Override + public Iterator iterator() { + return releasables; + } + + @Override + public String toString() { + return releasables.toString(); + } + })); + } + /** @see #wrap(Iterable) */ public static Releasable wrap(final Releasable... releasables) { return new Releasable() { diff --git a/libs/core/src/test/java/org/elasticsearch/core/ReleasablesTests.java b/libs/core/src/test/java/org/elasticsearch/core/ReleasablesTests.java index 1520b0224c116..d54c9b8104e8b 100644 --- a/libs/core/src/test/java/org/elasticsearch/core/ReleasablesTests.java +++ b/libs/core/src/test/java/org/elasticsearch/core/ReleasablesTests.java @@ -107,5 +107,27 @@ public String toString() { assertEquals("wrapped[list]", wrapIterable.toString()); wrapIterable.close(); assertEquals(5, count.get()); + + final var wrapIterator = Releasables.wrap(new Iterator<>() { + final Iterator innerIterator = List.of(releasable, releasable, releasable).iterator(); + + @Override + public boolean hasNext() { + return innerIterator.hasNext(); + } + + @Override + public Releasable next() { + return innerIterator.next(); + } + + @Override + public String toString() { + return "iterator"; + } + }); + assertEquals("wrapped[iterator]", wrapIterator.toString()); + wrapIterator.close(); + assertEquals(8, count.get()); } } diff --git a/libs/core/src/test/java/org/elasticsearch/core/internal/provider/EmbeddedModulePathTests.java b/libs/core/src/test/java/org/elasticsearch/core/internal/provider/EmbeddedModulePathTests.java index f4c83f1a77902..4571591bd6649 100644 --- a/libs/core/src/test/java/org/elasticsearch/core/internal/provider/EmbeddedModulePathTests.java +++ b/libs/core/src/test/java/org/elasticsearch/core/internal/provider/EmbeddedModulePathTests.java @@ -31,7 +31,7 @@ import static org.elasticsearch.test.hamcrest.ModuleDescriptorMatchers.exportsOf; import static org.elasticsearch.test.hamcrest.ModuleDescriptorMatchers.opensOf; import static org.elasticsearch.test.hamcrest.OptionalMatchers.isEmpty; -import static org.elasticsearch.test.hamcrest.OptionalMatchers.isPresent; +import static org.elasticsearch.test.hamcrest.OptionalMatchers.isPresentWith; import static org.hamcrest.Matchers.aMapWithSize; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; @@ -51,16 +51,13 @@ public void testVersion() { assertThat(over, isEmpty()); over = EmbeddedModulePath.version("foo-1.2.jar"); - assertThat(over, isPresent()); - assertThat(over.get(), is(Version.parse("1.2"))); + assertThat(over, isPresentWith(Version.parse("1.2"))); over = EmbeddedModulePath.version("foo-bar-1.2.3-SNAPSHOT.jar"); - assertThat(over, isPresent()); - assertThat(over.get(), is(Version.parse("1.2.3-SNAPSHOT"))); + assertThat(over, isPresentWith(Version.parse("1.2.3-SNAPSHOT"))); over = EmbeddedModulePath.version("elasticsearch-8.3.0-SNAPSHOT.jar"); - assertThat(over, isPresent()); - assertThat(over.get(), is(Version.parse("8.3.0-SNAPSHOT"))); + assertThat(over, isPresentWith(Version.parse("8.3.0-SNAPSHOT"))); expectThrows(IAE, () -> EmbeddedModulePath.version("")); expectThrows(IAE, () -> EmbeddedModulePath.version("foo")); diff --git a/libs/core/src/test/java/org/elasticsearch/core/internal/provider/InMemoryModuleFinderTests.java b/libs/core/src/test/java/org/elasticsearch/core/internal/provider/InMemoryModuleFinderTests.java index 5f7cc6374339f..361b9ea7fae0c 100644 --- a/libs/core/src/test/java/org/elasticsearch/core/internal/provider/InMemoryModuleFinderTests.java +++ b/libs/core/src/test/java/org/elasticsearch/core/internal/provider/InMemoryModuleFinderTests.java @@ -29,12 +29,14 @@ import static org.elasticsearch.test.hamcrest.ModuleDescriptorMatchers.providesOf; import static org.elasticsearch.test.hamcrest.ModuleDescriptorMatchers.requiresOf; import static org.elasticsearch.test.hamcrest.OptionalMatchers.isPresent; +import static org.elasticsearch.test.hamcrest.OptionalMatchers.isPresentWith; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.iterableWithSize; import static org.hamcrest.Matchers.not; public class InMemoryModuleFinderTests extends ESTestCase { @@ -45,7 +47,7 @@ public void testOfModuleDescriptor() { ModuleDescriptor fooMd = ModuleDescriptor.newModule("foo").build(); ModuleDescriptor barMd = ModuleDescriptor.newModule("bar").build(); var finder = InMemoryModuleFinder.of(fooMd, barMd); - assertThat(finder.findAll().size(), is(2)); + assertThat(finder.findAll(), hasSize(2)); var fooMod = finder.find("foo"); var barMod = finder.find("bar"); assertThat(fooMod, isPresent()); @@ -79,7 +81,7 @@ public void testAutoModuleEmbeddedJar() throws Exception { // automatic module, and no filtering var finder = InMemoryModuleFinder.of(Set.of(), fooRoot); - assertThat(finder.findAll().size(), is(1)); + assertThat(finder.findAll(), hasSize(1)); var mod = finder.find("foo"); assertThat(mod, isPresent()); assertThat(mod.get().descriptor().isAutomatic(), is(true)); @@ -135,7 +137,7 @@ private void testExplicitModuleEmbeddedJarVersionSpecific(int version) throws Ex try (FileSystem fileSystem = FileSystems.newFileSystem(outerJar, Map.of(), InMemoryModuleFinderTests.class.getClassLoader())) { Path mRoot = fileSystem.getPath("/a/b/m.jar"); var finder = InMemoryModuleFinder.of(Set.of(), mRoot); - assertThat(finder.findAll().size(), is(1)); + assertThat(finder.findAll(), hasSize(1)); var mref = finder.find("m"); assertThat(mref, isPresent()); assertThat(mref.get().descriptor().isAutomatic(), is(false)); @@ -161,7 +163,7 @@ public void testAutoModuleExplodedPath() throws Exception { // automatic module, and no filtering var finder = InMemoryModuleFinder.of(Set.of(), fooRoot); - assertThat(finder.findAll().size(), is(1)); + assertThat(finder.findAll(), hasSize(1)); var mod = finder.find("foo"); assertThat(mod, isPresent()); assertThat(mod.get().descriptor().isAutomatic(), is(true)); @@ -218,8 +220,7 @@ public void testFilterRequiresBasic() { { // filter the bar module var md = InMemoryModuleFinder.filterRequires(initialMd, Set.of("bar")); assertThat(md.name(), is("foo")); - assertThat(md.version(), isPresent()); - assertThat(md.version().get(), is(Version.parse("1.0"))); + assertThat(md.version(), isPresentWith(Version.parse("1.0"))); assertThat(md.requires(), hasItem(requiresOf("baz"))); assertThat(md.requires(), not(hasItem(requiresOf("bar")))); assertThat(md.exports(), containsInAnyOrder(exportsOf("p"), exportsOf("q", Set.of("baz")))); @@ -240,8 +241,8 @@ public void testFilterRequiresOpenModule() { assertThat(md.isOpen(), is(true)); assertThat(md.name(), equalTo("openMod")); assertThat(md.requires(), not(hasItem(requiresOf("bar")))); - assertThat(md.exports(), iterableWithSize(0)); - assertThat(md.opens(), iterableWithSize(0)); + assertThat(md.exports(), empty()); + assertThat(md.opens(), empty()); } public void testFilterRequiresAutoModule() { diff --git a/libs/geo/src/main/java/org/elasticsearch/geometry/utils/WellKnownText.java b/libs/geo/src/main/java/org/elasticsearch/geometry/utils/WellKnownText.java index da7386d6bb00f..d233dcc81a3fc 100644 --- a/libs/geo/src/main/java/org/elasticsearch/geometry/utils/WellKnownText.java +++ b/libs/geo/src/main/java/org/elasticsearch/geometry/utils/WellKnownText.java @@ -24,6 +24,8 @@ import java.io.IOException; import java.io.StreamTokenizer; import java.io.StringReader; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; import java.text.ParseException; import java.util.ArrayList; import java.util.Collections; @@ -215,6 +217,196 @@ public Void visit(Rectangle rectangle) { } } + public static String fromWKB(byte[] wkb, int offset, int length) { + final StringBuilder builder = new StringBuilder(); + final ByteBuffer byteBuffer = ByteBuffer.wrap(wkb, offset, length); + parseGeometry(byteBuffer, builder); + assert byteBuffer.remaining() == 0; + return builder.toString(); + } + + private static void parseGeometry(ByteBuffer byteBuffer, StringBuilder sb) { + byteBuffer.order(byteBuffer.get() == 0 ? ByteOrder.BIG_ENDIAN : ByteOrder.LITTLE_ENDIAN); + final int type = byteBuffer.getInt(); + switch (type) { + case 1 -> parsePoint(byteBuffer, false, sb); + case 1001 -> parsePoint(byteBuffer, true, sb); + case 2 -> parseLine(byteBuffer, false, sb); + case 1002 -> parseLine(byteBuffer, true, sb); + case 3 -> parsePolygon(byteBuffer, false, sb); + case 1003 -> parsePolygon(byteBuffer, true, sb); + case 4 -> parseMultiPoint(byteBuffer, false, sb); + case 1004 -> parseMultiPoint(byteBuffer, true, sb); + case 5 -> parseMultiLine(byteBuffer, false, sb); + case 1005 -> parseMultiLine(byteBuffer, true, sb); + case 6 -> parseMultiPolygon(byteBuffer, false, sb); + case 1006 -> parseMultiPolygon(byteBuffer, true, sb); + case 7, 1007 -> parseGeometryCollection(byteBuffer, sb); + case 17 -> parseCircle(byteBuffer, false, sb); + case 1017 -> parseCircle(byteBuffer, true, sb); + case 18 -> parseBBox(byteBuffer, false, sb); + case 1018 -> parseBBox(byteBuffer, true, sb); + default -> throw new IllegalArgumentException("Unknown geometry type: " + type); + } + ; + } + + private static void writeCoordinate(ByteBuffer byteBuffer, boolean hasZ, StringBuilder sb) { + sb.append(byteBuffer.getDouble()).append(SPACE).append(byteBuffer.getDouble()); + if (hasZ) { + sb.append(SPACE).append(byteBuffer.getDouble()); + } + } + + private static void parsePoint(ByteBuffer byteBuffer, boolean hasZ, StringBuilder sb) { + sb.append("POINT").append(SPACE); + sb.append(LPAREN); + writeCoordinate(byteBuffer, hasZ, sb); + sb.append(RPAREN); + } + + private static void parseMultiPoint(ByteBuffer byteBuffer, boolean hasZ, StringBuilder sb) { + sb.append("MULTIPOINT").append(SPACE); + final int numPoints = byteBuffer.getInt(); + if (numPoints == 0) { + sb.append(EMPTY); + return; + } + sb.append(LPAREN); + for (int i = 0; i < numPoints; i++) { + byteBuffer.order(byteBuffer.get() == 0 ? ByteOrder.BIG_ENDIAN : ByteOrder.LITTLE_ENDIAN); + byteBuffer.getInt(); + writeCoordinate(byteBuffer, hasZ, sb); + if (i != numPoints - 1) { + sb.append(COMMA); + sb.append(SPACE); + } + } + sb.append(RPAREN); + } + + private static void parseLine(ByteBuffer byteBuffer, boolean hasZ, StringBuilder sb) { + sb.append("LINESTRING").append(SPACE); + parseLineString(byteBuffer, hasZ, sb); + } + + private static void parseMultiLine(ByteBuffer byteBuffer, boolean hasZ, StringBuilder sb) { + sb.append("MULTILINESTRING").append(SPACE); + final int numLines = byteBuffer.getInt(); + if (numLines == 0) { + sb.append(EMPTY); + return; + } + sb.append(LPAREN); + for (int i = 0; i < numLines; i++) { + byteBuffer.order(byteBuffer.get() == 0 ? ByteOrder.BIG_ENDIAN : ByteOrder.LITTLE_ENDIAN); + byteBuffer.getInt(); + parseLineString(byteBuffer, hasZ, sb); + if (i != numLines - 1) { + sb.append(COMMA); + } + } + sb.append(RPAREN); + } + + private static void parsePolygon(ByteBuffer byteBuffer, boolean hasZ, StringBuilder sb) { + sb.append("POLYGON").append(SPACE); + parseRings(byteBuffer, hasZ, sb, byteBuffer.getInt()); + + } + + private static void parseRings(ByteBuffer byteBuffer, boolean hasZ, StringBuilder sb, int numRings) { + if (numRings == 0) { + sb.append(EMPTY); + return; + } + sb.append(LPAREN); + parseLineString(byteBuffer, hasZ, sb); + for (int i = 1; i < numRings; i++) { + sb.append(COMMA); + sb.append(SPACE); + parseLineString(byteBuffer, hasZ, sb); + } + sb.append(RPAREN); + } + + private static void parseMultiPolygon(ByteBuffer byteBuffer, boolean hasZ, StringBuilder sb) { + sb.append("MULTIPOLYGON").append(SPACE); + final int numPolygons = byteBuffer.getInt(); + if (numPolygons == 0) { + sb.append(EMPTY); + return; + } + sb.append(LPAREN); + for (int i = 0; i < numPolygons; i++) { + byteBuffer.order(byteBuffer.get() == 0 ? ByteOrder.BIG_ENDIAN : ByteOrder.LITTLE_ENDIAN); + byteBuffer.getInt(); + parseRings(byteBuffer, hasZ, sb, byteBuffer.getInt()); + if (i != numPolygons - 1) { + sb.append(COMMA); + } + } + sb.append(RPAREN); + } + + private static void parseLineString(ByteBuffer byteBuffer, boolean hasZ, StringBuilder sb) { + final int length = byteBuffer.getInt(); + if (length == 0) { + sb.append(EMPTY); + return; + } + sb.append(LPAREN); + for (int i = 0; i < length; i++) { + writeCoordinate(byteBuffer, hasZ, sb); + if (i != length - 1) { + sb.append(COMMA); + sb.append(SPACE); + } + } + sb.append(RPAREN); + } + + private static void parseGeometryCollection(ByteBuffer byteBuffer, StringBuilder sb) { + sb.append("GEOMETRYCOLLECTION").append(SPACE); + final int numGeometries = byteBuffer.getInt(); + if (numGeometries == 0) { + sb.append(EMPTY); + return; + } + sb.append(LPAREN); + for (int i = 0; i < numGeometries; i++) { + parseGeometry(byteBuffer, sb); + if (i != numGeometries - 1) { + sb.append(COMMA); + } + } + sb.append(RPAREN); + } + + private static void parseCircle(ByteBuffer byteBuffer, boolean hasZ, StringBuilder sb) { + sb.append("CIRCLE").append(SPACE); + sb.append(LPAREN); + sb.append(byteBuffer.getDouble()).append(SPACE).append(byteBuffer.getDouble()); + final double r = byteBuffer.getDouble(); + if (hasZ) { + sb.append(SPACE).append(byteBuffer.getDouble()).append(SPACE).append(r); + } else { + sb.append(SPACE).append(r); + } + sb.append(RPAREN); + } + + private static void parseBBox(ByteBuffer byteBuffer, boolean hasZ, StringBuilder sb) { + sb.append("BBOX").append(SPACE); + sb.append(LPAREN); + sb.append(byteBuffer.getDouble()).append(COMMA).append(SPACE).append(byteBuffer.getDouble()); + sb.append(COMMA).append(SPACE).append(byteBuffer.getDouble()).append(COMMA).append(SPACE).append(byteBuffer.getDouble()); + if (hasZ) { + sb.append(COMMA).append(SPACE).append(byteBuffer.getDouble()).append(COMMA).append(SPACE).append(byteBuffer.getDouble()); + } + sb.append(RPAREN); + } + public static Geometry fromWKT(GeometryValidator validator, boolean coerce, String wkt) throws IOException, ParseException { StringReader reader = new StringReader(wkt); try { diff --git a/libs/geo/src/test/java/org/elasticsearch/geometry/utils/WKBTests.java b/libs/geo/src/test/java/org/elasticsearch/geometry/utils/WKBTests.java index 5369475e4ed4f..3c5264d177db1 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geometry/utils/WKBTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geometry/utils/WKBTests.java @@ -149,9 +149,10 @@ private void assertWKB(Geometry geometry) { final int offset = randomInt(extraBytes); System.arraycopy(b, 0, oversizeB, offset, b.length); assertEquals(geometry, WellKnownBinary.fromWKB(StandardValidator.instance(hasZ), randomBoolean(), oversizeB, offset, b.length)); + assertEquals(WellKnownText.toWKT(geometry), WellKnownText.fromWKB(oversizeB, offset, b.length)); } else { assertEquals(geometry, WellKnownBinary.fromWKB(StandardValidator.instance(hasZ), randomBoolean(), b)); + assertEquals(WellKnownText.toWKT(geometry), WellKnownText.fromWKB(b, 0, b.length)); } } - } diff --git a/libs/plugin-scanner/build.gradle b/libs/plugin-scanner/build.gradle index d7138363e09fa..fbe9c02092577 100644 --- a/libs/plugin-scanner/build.gradle +++ b/libs/plugin-scanner/build.gradle @@ -19,8 +19,8 @@ dependencies { api project(':libs:elasticsearch-plugin-api') api project(":libs:elasticsearch-x-content") - api 'org.ow2.asm:asm:9.5' - api 'org.ow2.asm:asm-tree:9.5' + api 'org.ow2.asm:asm:9.6' + api 'org.ow2.asm:asm-tree:9.6' testImplementation "junit:junit:${versions.junit}" testImplementation(project(":test:framework")) { diff --git a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/XContentImplUtils.java b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/XContentImplUtils.java new file mode 100644 index 0000000000000..3e3fc12f9c16a --- /dev/null +++ b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/XContentImplUtils.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.xcontent.provider; + +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.StreamReadConstraints; +import com.fasterxml.jackson.core.TSFBuilder; + +public class XContentImplUtils { + public static > F configure(TSFBuilder builder) { + // jackson 2.15 introduced a max string length. We have other limits in place to constrain max doc size, + // so here we set to max value (2GiB) so as not to constrain further than those existing limits. + return builder.streamReadConstraints(StreamReadConstraints.builder().maxStringLength(Integer.MAX_VALUE).build()).build(); + } +} diff --git a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/cbor/CborXContentImpl.java b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/cbor/CborXContentImpl.java index 1b2a6d02822ba..2a8e7a4dfa12c 100644 --- a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/cbor/CborXContentImpl.java +++ b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/cbor/CborXContentImpl.java @@ -21,6 +21,7 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xcontent.provider.XContentImplUtils; import java.io.IOException; import java.io.InputStream; @@ -45,7 +46,7 @@ public static XContent cborXContent() { } static { - cborFactory = new CBORFactory(); + cborFactory = XContentImplUtils.configure(CBORFactory.builder()); cborFactory.configure(CBORFactory.Feature.FAIL_ON_SYMBOL_HASH_OVERFLOW, false); // this trips on many mappings now... // Do not automatically close unclosed objects/arrays in com.fasterxml.jackson.dataformat.cbor.CBORGenerator#close() method cborFactory.configure(JsonGenerator.Feature.AUTO_CLOSE_JSON_CONTENT, false); diff --git a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentImpl.java b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentImpl.java index cbd3e7378b6df..2e4925b4a853e 100644 --- a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentImpl.java +++ b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentImpl.java @@ -13,7 +13,6 @@ import com.fasterxml.jackson.core.JsonFactoryBuilder; import com.fasterxml.jackson.core.JsonGenerator; import com.fasterxml.jackson.core.JsonParser; -import com.fasterxml.jackson.core.StreamReadConstraints; import org.elasticsearch.xcontent.XContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -21,6 +20,7 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xcontent.provider.XContentImplUtils; import java.io.IOException; import java.io.InputStream; @@ -46,12 +46,7 @@ public static final XContent jsonXContent() { } static { - var builder = new JsonFactoryBuilder(); - // jackson 2.15 introduced a max string length. We have other limits in place to constrain max doc size, - // so here we set to max value (2GiB) so as not to constrain further than those existing limits. - builder.streamReadConstraints(StreamReadConstraints.builder().maxStringLength(Integer.MAX_VALUE).build()); - - jsonFactory = builder.build(); + jsonFactory = XContentImplUtils.configure(new JsonFactoryBuilder()); jsonFactory.configure(JsonGenerator.Feature.QUOTE_FIELD_NAMES, true); jsonFactory.configure(JsonParser.Feature.ALLOW_COMMENTS, true); jsonFactory.configure(JsonFactory.Feature.FAIL_ON_SYMBOL_HASH_OVERFLOW, false); // this trips on many mappings now... diff --git a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/smile/SmileXContentImpl.java b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/smile/SmileXContentImpl.java index 46f8aeec20f29..3c774c582c638 100644 --- a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/smile/SmileXContentImpl.java +++ b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/smile/SmileXContentImpl.java @@ -21,6 +21,7 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xcontent.provider.XContentImplUtils; import java.io.IOException; import java.io.InputStream; @@ -45,7 +46,7 @@ public static XContent smileXContent() { } static { - smileFactory = new SmileFactory(); + smileFactory = XContentImplUtils.configure(SmileFactory.builder()); // for now, this is an overhead, might make sense for web sockets smileFactory.configure(SmileGenerator.Feature.ENCODE_BINARY_AS_7BIT, false); smileFactory.configure(SmileFactory.Feature.FAIL_ON_SYMBOL_HASH_OVERFLOW, false); // this trips on many mappings now... diff --git a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/yaml/YamlXContentImpl.java b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/yaml/YamlXContentImpl.java index 2d59b9588ab38..6a22508ba51c6 100644 --- a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/yaml/YamlXContentImpl.java +++ b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/yaml/YamlXContentImpl.java @@ -11,6 +11,7 @@ import com.fasterxml.jackson.core.JsonEncoding; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; +import com.fasterxml.jackson.dataformat.yaml.YAMLParser; import org.elasticsearch.xcontent.XContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -18,6 +19,7 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xcontent.provider.XContentImplUtils; import java.io.IOException; import java.io.InputStream; @@ -42,7 +44,10 @@ public static XContent yamlXContent() { } static { - yamlFactory = new YAMLFactory(); + yamlFactory = XContentImplUtils.configure(YAMLFactory.builder()); + // YAMLFactory.builder() differs from new YAMLFactory() in that builder() does not set the default yaml parser feature flags. + // So set the only default feature flag, EMPTY_STRING_AS_NULL, here. + yamlFactory.configure(YAMLParser.Feature.EMPTY_STRING_AS_NULL, true); yamlFactory.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true); yamlFactory.configure(JsonParser.Feature.USE_FAST_DOUBLE_PARSER, true); yamlXContent = new YamlXContentImpl(); diff --git a/libs/x-content/src/main/java/org/elasticsearch/xcontent/FilterXContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/xcontent/FilterXContentParser.java index 4a166a03ecdf0..96d186dd612b0 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/xcontent/FilterXContentParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/xcontent/FilterXContentParser.java @@ -236,7 +236,10 @@ public boolean isClosed() { @Override public void close() throws IOException { - delegate().close(); + var closeable = delegate(); + if (closeable != null) { + closeable.close(); + } } @Override diff --git a/libs/x-content/src/test/java/org/elasticsearch/xcontent/ConstructingObjectParserTests.java b/libs/x-content/src/test/java/org/elasticsearch/xcontent/ConstructingObjectParserTests.java index 446fb21471961..f0703c626c583 100644 --- a/libs/x-content/src/test/java/org/elasticsearch/xcontent/ConstructingObjectParserTests.java +++ b/libs/x-content/src/test/java/org/elasticsearch/xcontent/ConstructingObjectParserTests.java @@ -102,22 +102,24 @@ public void testRandomOrder() throws Exception { } public void testMissingAllConstructorArgs() throws IOException { - XContentParser parser = createParser(JsonXContent.jsonXContent, "{ \"mineral\": 1 }"); - ConstructingObjectParser objectParser = randomBoolean() - ? HasCtorArguments.PARSER - : HasCtorArguments.PARSER_VEGETABLE_OPTIONAL; - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> objectParser.apply(parser, null)); - if (objectParser == HasCtorArguments.PARSER) { - assertEquals("Required [animal, vegetable]", e.getMessage()); - } else { - assertEquals("Required [animal]", e.getMessage()); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, "{ \"mineral\": 1 }")) { + ConstructingObjectParser objectParser = randomBoolean() + ? HasCtorArguments.PARSER + : HasCtorArguments.PARSER_VEGETABLE_OPTIONAL; + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> objectParser.apply(parser, null)); + if (objectParser == HasCtorArguments.PARSER) { + assertEquals("Required [animal, vegetable]", e.getMessage()); + } else { + assertEquals("Required [animal]", e.getMessage()); + } } } public void testMissingAllConstructorArgsButNotRequired() throws IOException { - XContentParser parser = createParser(JsonXContent.jsonXContent, "{ \"mineral\": 1 }"); - HasCtorArguments parsed = HasCtorArguments.PARSER_ALL_OPTIONAL.apply(parser, null); - assertEquals(1, parsed.mineral); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, "{ \"mineral\": 1 }")) { + HasCtorArguments parsed = HasCtorArguments.PARSER_ALL_OPTIONAL.apply(parser, null); + assertEquals(1, parsed.mineral); + } } public void testMissingSecondConstructorArg() throws IOException { diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeries.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeries.java index 44fa62fef3fa0..ba838e9b4046f 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeries.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeries.java @@ -18,7 +18,6 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.bucket.IteratorAndCurrent; -import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; @@ -28,23 +27,8 @@ import java.util.Map; import java.util.Objects; -import static org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation.declareMultiBucketAggregationFields; - public class InternalTimeSeries extends InternalMultiBucketAggregation { - private static final ObjectParser PARSER = new ObjectParser<>( - ParsedTimeSeries.class.getSimpleName(), - true, - ParsedTimeSeries::new - ); - static { - declareMultiBucketAggregationFields( - PARSER, - parser -> ParsedTimeSeries.ParsedBucket.fromXContent(parser, false), - parser -> ParsedTimeSeries.ParsedBucket.fromXContent(parser, true) - ); - } - /** * A bucket associated with a specific time series (identified by its key) */ diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ArrayValuesSourceAggregationBuilder.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ArrayValuesSourceAggregationBuilder.java index d578aeff28011..0f277ecd6c478 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ArrayValuesSourceAggregationBuilder.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ArrayValuesSourceAggregationBuilder.java @@ -110,7 +110,7 @@ private void read(StreamInput in) throws IOException { fields = (ArrayList) in.readGenericValue(); userValueTypeHint = in.readOptionalWriteable(ValueType::readFromStream); format = in.readOptionalString(); - missingMap = in.readMap(); + missingMap = in.readGenericMap(); } @Override diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/MovFnPipelineAggregationBuilder.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/MovFnPipelineAggregationBuilder.java index 1132507d520f4..eadf3839ed479 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/MovFnPipelineAggregationBuilder.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/MovFnPipelineAggregationBuilder.java @@ -191,25 +191,6 @@ protected XContentBuilder internalXContent(XContentBuilder builder, Params param return builder; } - /** - * Used for serialization testing, since pipeline aggs serialize themselves as a named object but are parsed - * as a regular object with the name passed in. - */ - static MovFnPipelineAggregationBuilder parse(XContentParser parser) throws IOException { - parser.nextToken(); - if (parser.currentToken().equals(XContentParser.Token.START_OBJECT)) { - parser.nextToken(); - if (parser.currentToken().equals(XContentParser.Token.FIELD_NAME)) { - String aggName = parser.currentName(); - parser.nextToken(); // "moving_fn" - parser.nextToken(); // start_object - return PARSER.apply(parser, aggName); - } - } - - throw new IllegalStateException("Expected aggregation name but none found"); - } - @Override protected boolean overrideBucketsPath() { return true; diff --git a/modules/apm/NAMING.md b/modules/apm/NAMING.md index d56a0a441e764..31cad34d0470a 100644 --- a/modules/apm/NAMING.md +++ b/modules/apm/NAMING.md @@ -17,13 +17,13 @@ The **hierarchy** should be built by putting "more common" elements at the begin Example: * prefer `es.indices.docs.deleted.total `to `es.indices.total.deleted.docs` -* This way you can later add` es.indices.docs.count, es.indices.docs.ingested.total`, etc.) +* This way you can later add` es.indices.docs.total, es.indices.docs.ingested.total`, etc.) Prefix metrics: * Always use `es` as our root application name: this will give us a separate namespace and avoid any possibility of clashes with other metrics, and quick identification of Elasticsearch metrics on a dashboard. * Follow the root prefix with a simple module name, team or area of code. E.g. `snapshot, repositories, indices, threadpool`. Notice the mix of singular and plural - here this is intentional, to reflect closely the existing names in the codebase (e.g. `reindex` and `indices`) -* In building a metric name, look for existing prefixes (e.g. module name and/or area of code, e.g. `blob_cache`) and for existing sub-elements as well (e.g. `error`) to build a good, consistent name. E.g. prefer the consistent use of `error.count` rather than introducing `failures`, `failed.count` or `errors`.` ` -* Avoid having sub-metrics under a name that is also a metric (e.g. do not create names like `es.repositories.elements`,` es.repositories.elements.utilization`; use` es.repositories.element.count` and` es.repositories.element.utilization `instead). Such metrics are hard to handle well in Elasticsearch, or in some internal structures (e.g. nested maps). +* In building a metric name, look for existing prefixes (e.g. module name and/or area of code, e.g. `blob_cache`) and for existing sub-elements as well (e.g. `error`) to build a good, consistent name. E.g. prefer the consistent use of `error.total` rather than introducing `failures`, `failed.total` or `errors`.` ` +* Avoid having sub-metrics under a name that is also a metric (e.g. do not create names like `es.repositories.elements`,` es.repositories.elements.utilization`; use` es.repositories.element.total` and` es.repositories.element.utilization `instead). Such metrics are hard to handle well in Elasticsearch, or in some internal structures (e.g. nested maps). Keep the hierarchy compact: do not add elements if you don’t need to. There is a description field when registering a metric, prefer using that as an explanation. \ For example, if emitting existing metrics from node stats, do not use the whole “object path”, but choose the most significant terms. @@ -35,7 +35,7 @@ The metric name can be generated but there should be no dynamic or variable cont * Rule of thumb: you should be able to do aggregations (e.g. sum, avg) across a dimension of a given metric (without the need to aggregate over different metric names); on the other hand, any aggregation across any dimension of a given metric should be meaningful. * There might be exceptions of course. For example: * When similar metrics have significantly different implementations/related metrics. \ - If we have only common metrics like `es.repositories.element.count, es.repositories.element.utilization, es.repositories.writes.total` for every blob storage implementation, then `s3,azure` should be an attribute. \ + If we have only common metrics like `es.repositories.element.total, es.repositories.element.utilization, es.repositories.writes.total` for every blob storage implementation, then `s3,azure` should be an attribute. \ If we have specific metrics, e.g. for s3 storage classes, prefer using prefixed metric names for the specific metrics: es.repositories.s3.deep_archive_access.total (but keep `es.repositories.elements`) * When you have a finite and fixed set of names it might be OK to have them in the name (e.g. "`young`" and "`old`" for GC generations). @@ -47,12 +47,19 @@ Examples : * es.indices.storage.write.io, instead of es.indices.storage.write.bytes_per_sec * These can all be composed with the suffixes below, e.g. es.process.jvm.collection.time.total, es.indices.storage.write.total to represent the monotonic sum of time spent in GC and the total number of bytes written to indices respectively. -**Pluralization** and **suffixes**: -* If the metric is unit-less, use plural: `es.threadpool.activethreads`, `es.indices.docs` -* Use `total` as a suffix for monotonic sums (e.g. es.indices.docs.deleted.total) -* Use `count` to represent the count of "things" in the metric name/namespace (e.g. if we have `es.process.jvm.classes.loaded`, we will express the number of classes currently loaded by the JVM as es.process.jvm.classes.loaded.count, and the total number of classes loaded since the JVM started as es.process.jvm.classes.loaded.total +**Suffixes**: +* Use `total` as a suffix for monotonic metrics (always increasing counter) (e.g. es.indices.docs.deleted.total) + * Note: even though async counter is reporting a total cumulative value, it is till monotonic. +* Use `current` to represent the non-monotonic metrics (like gauges, upDownCounters) + * e.g. `current` vs `total` We can have es.process.jvm.classes.loaded.current to express the number of classes currently loaded by the JVM, and the total number of classes loaded since the JVM started as es.process.jvm.classes.loaded.total * Use `ratio` to represent the ratio of two measures with identical unit (or unit-less) or measures that represent a fraction in the range [0, 1]. Examples: * Exception: consider using utilization when the ratio is between a usage and its limit, e.g. the ratio between es.process.jvm.heap.usage and es.process.jvm.heap.limit should be es.process.jvm.heap.utilization +* Use `status` to represent enum like gauges. example es.health.overall.red.status have values 1/0 to represent true/false +* Use `usage` to represent the amount used ouf of the known resource size +* Use `size` to represent the overall size of the resource measured +* Use `utilisation` to represent a fraction of usage out of the overall size of a resource measured +* Use `histogram` to represent instruments of type histogram +* Use `time` to represent passage of time * If it has a unit of measure, then it should not be plural (and also not include the unit of measure, see above). Examples: es.process.jvm.collection.time, es.process.mem.virtual.usage, es.indices.storage.utilization ### Attributes @@ -64,3 +71,10 @@ Attribute names should follow the same rules. In particular, these rules apply t * pluralization (when an attribute represents a measurement) For **pluralization**, when an attribute represents an entity, the attribute name should be singular (e.g.` es.security.realm_type`, not` es.security.realms_type` or `es.security.realm_types`), unless it represents a collection (e.g.` es.rest.request_headers`) + + +### List of previously registered metric names +You can inspect all previously registered metrics names with +`./gradlew run -Dtests.es.logger.org.elasticsearch.telemetry.apm=debug` +This should help you find out the already registered group that your meteric +might fit diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APMMeterRegistry.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APMMeterRegistry.java index cd6d3d209b3ed..382fc9417eac0 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APMMeterRegistry.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APMMeterRegistry.java @@ -10,6 +10,8 @@ import io.opentelemetry.api.metrics.Meter; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.telemetry.apm.internal.metrics.DoubleAsyncCounterAdapter; @@ -47,6 +49,7 @@ * {@link #setProvider(Meter)} is used to change the provider for all existing meterRegistrar. */ public class APMMeterRegistry implements MeterRegistry { + private static final Logger logger = LogManager.getLogger(APMMeterRegistry.class); private final Registrar doubleCounters = new Registrar<>(); private final Registrar doubleAsynchronousCounters = new Registrar<>(); private final Registrar doubleUpDownCounters = new Registrar<>(); @@ -207,6 +210,7 @@ public LongHistogram getLongHistogram(String name) { private > T register(Registrar registrar, T adapter) { assert registrars.contains(registrar) : "usage of unknown registrar"; + logger.debug("Registering an instrument with name: " + adapter.getName()); return registrar.register(adapter); } diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/AbstractInstrument.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/AbstractInstrument.java index 72c6ccf905873..9329556ff0f3f 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/AbstractInstrument.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/AbstractInstrument.java @@ -11,6 +11,7 @@ import io.opentelemetry.api.metrics.Meter; import org.elasticsearch.core.Nullable; +import org.elasticsearch.telemetry.apm.internal.MetricNameValidator; import org.elasticsearch.telemetry.metric.Instrument; import java.security.AccessController; @@ -23,6 +24,7 @@ * An instrument that contains the name, description and unit. The delegate may be replaced when * the provider is updated. * Subclasses should implement the builder, which is used on initialization and provider updates. + * * @param delegated instrument */ public abstract class AbstractInstrument implements Instrument { @@ -50,19 +52,13 @@ void setProvider(@Nullable Meter meter) { } protected abstract static class Builder { - private static final int MAX_NAME_LENGTH = 255; protected final String name; protected final String description; protected final String unit; public Builder(String name, String description, String unit) { - if (name.length() > MAX_NAME_LENGTH) { - throw new IllegalArgumentException( - "Instrument name [" + name + "] with length [" + name.length() + "] exceeds maximum length [" + MAX_NAME_LENGTH + "]" - ); - } - this.name = Objects.requireNonNull(name); + this.name = MetricNameValidator.validate(name); this.description = Objects.requireNonNull(description); this.unit = Objects.requireNonNull(unit); } diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/MetricNameValidator.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/MetricNameValidator.java new file mode 100644 index 0000000000000..1a698b778687c --- /dev/null +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/MetricNameValidator.java @@ -0,0 +1,142 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.telemetry.apm.internal; + +import java.util.Objects; +import java.util.Set; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; + +public class MetricNameValidator { + private static final Pattern ALLOWED_CHARACTERS = Pattern.compile("[a-z][a-z0-9_]*"); + static final Set ALLOWED_SUFFIXES = Set.of( + "total", + "current", + "ratio", + "status" /*a workaround for enums */, + "usage", + "size", + "utilization", + "histogram", + "time" + ); + static final int MAX_METRIC_NAME_LENGTH = 255; + + static final int MAX_ELEMENT_LENGTH = 30; + static final int MAX_NUMBER_OF_ELEMENTS = 10; + + private MetricNameValidator() {} + + /** + * Validates a metric name as per guidelines in Naming.md + * + * @param metricName metric name to be validated + * @throws IllegalArgumentException an exception indicating an incorrect metric name + */ + public static String validate(String metricName) { + Objects.requireNonNull(metricName); + validateMaxMetricNameLength(metricName); + + String[] elements = metricName.split("\\."); + hasESPrefix(elements, metricName); + hasAtLeast3Elements(elements, metricName); + hasNotBreachNumberOfElementsLimit(elements, metricName); + lastElementIsFromAllowList(elements, metricName); + perElementValidations(elements, metricName); + return metricName; + } + + private static void validateMaxMetricNameLength(String metricName) { + if (metricName.length() > MAX_METRIC_NAME_LENGTH) { + throw new IllegalArgumentException( + "Metric name length " + + metricName.length() + + "is longer than max metric name length:" + + MAX_METRIC_NAME_LENGTH + + " Name was: " + + metricName + ); + } + } + + private static void lastElementIsFromAllowList(String[] elements, String name) { + String lastElement = elements[elements.length - 1]; + if (ALLOWED_SUFFIXES.contains(lastElement) == false) { + throw new IllegalArgumentException( + "Metric name should end with one of [" + + ALLOWED_SUFFIXES.stream().collect(Collectors.joining(",")) + + "] " + + "Last element was: " + + lastElement + + ". " + + "Name was: " + + name + ); + } + } + + private static void hasNotBreachNumberOfElementsLimit(String[] elements, String name) { + if (elements.length > MAX_NUMBER_OF_ELEMENTS) { + throw new IllegalArgumentException( + "Metric name should have at most 10 elements. It had: " + elements.length + ". The name was: " + name + ); + } + } + + private static void hasAtLeast3Elements(String[] elements, String name) { + if (elements.length < 3) { + throw new IllegalArgumentException( + "Metric name consist of at least 3 elements. An es. prefix, group and a name. The name was: " + name + ); + } + } + + private static void hasESPrefix(String[] elements, String name) { + if (elements[0].equals("es") == false) { + throw new IllegalArgumentException( + "Metric name should start with \"es.\" prefix and use \".\" as a separator. Name was: " + name + ); + } + } + + private static void perElementValidations(String[] elements, String name) { + for (String element : elements) { + hasOnlyAllowedCharacters(element, name); + hasNotBreachLengthLimit(element, name); + } + } + + private static void hasNotBreachLengthLimit(String element, String name) { + if (element.length() > MAX_ELEMENT_LENGTH) { + throw new IllegalArgumentException( + "Metric name's element should not be longer than " + + MAX_ELEMENT_LENGTH + + " characters. Was: " + + element.length() + + ". Name was: " + + name + ); + } + } + + private static void hasOnlyAllowedCharacters(String element, String name) { + Matcher matcher = ALLOWED_CHARACTERS.matcher(element); + if (matcher.matches() == false) { + throw new IllegalArgumentException( + "Metric name should only use [a-z][a-z0-9_]* characters. " + + "Element does not match: \"" + + element + + "\". " + + "Name was: " + + name + ); + } + } +} diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/tracing/APMTracer.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/tracing/APMTracer.java index 49fdc44681aa3..f021eb61ca753 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/tracing/APMTracer.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/tracing/APMTracer.java @@ -30,11 +30,11 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; import org.elasticsearch.tasks.Task; -import org.elasticsearch.telemetry.tracing.SpanId; +import org.elasticsearch.telemetry.tracing.TraceContext; +import org.elasticsearch.telemetry.tracing.Traceable; import java.security.AccessController; import java.security.PrivilegedAction; @@ -61,7 +61,7 @@ public class APMTracer extends AbstractLifecycleComponent implements org.elastic private static final Logger logger = LogManager.getLogger(APMTracer.class); /** Holds in-flight span information. */ - private final Map spans = ConcurrentCollections.newConcurrentMap(); + private final Map spans = ConcurrentCollections.newConcurrentMap(); private volatile boolean enabled; private volatile APMServices services; @@ -160,8 +160,9 @@ private void destroyApmServices() { } @Override - public void startTrace(ThreadContext threadContext, SpanId spanId, String spanName, @Nullable Map attributes) { - assert threadContext != null; + public void startTrace(TraceContext traceContext, Traceable traceable, String spanName, @Nullable Map attributes) { + assert traceContext != null; + String spanId = traceable.getSpanId(); assert spanId != null; assert spanName != null; @@ -182,21 +183,21 @@ public void startTrace(ThreadContext threadContext, SpanId spanId, String spanNa // A span can have a parent span, which here is modelled though a parent span context. // Setting this is important for seeing a complete trace in the APM UI. - final Context parentContext = getParentContext(threadContext); + final Context parentContext = getParentContext(traceContext); if (parentContext != null) { spanBuilder.setParent(parentContext); } - setSpanAttributes(threadContext, attributes, spanBuilder); + setSpanAttributes(traceContext, attributes, spanBuilder); - Instant startTime = threadContext.getTransient(Task.TRACE_START_TIME); + Instant startTime = traceContext.getTransient(Task.TRACE_START_TIME); if (startTime != null) { spanBuilder.setStartTimestamp(startTime); } final Span span = spanBuilder.startSpan(); final Context contextForNewSpan = Context.current().with(span); - updateThreadContext(threadContext, services, contextForNewSpan); + updateThreadContext(traceContext, services, contextForNewSpan); return contextForNewSpan; })); @@ -221,29 +222,29 @@ public void startTrace(String name, Map attributes) { spanBuilder.startSpan(); } - private static void updateThreadContext(ThreadContext threadContext, APMServices services, Context context) { + private static void updateThreadContext(TraceContext traceContext, APMServices services, Context context) { // The new span context can be used as the parent context directly within the same Java process... - threadContext.putTransient(Task.APM_TRACE_CONTEXT, context); + traceContext.putTransient(Task.APM_TRACE_CONTEXT, context); - // ...whereas for tasks sent to other ES nodes, we need to put trace HTTP headers into the threadContext so + // ...whereas for tasks sent to other ES nodes, we need to put trace HTTP headers into the traceContext so // that they can be propagated. - services.openTelemetry.getPropagators().getTextMapPropagator().inject(context, threadContext, (tc, key, value) -> { + services.openTelemetry.getPropagators().getTextMapPropagator().inject(context, traceContext, (tc, key, value) -> { if (isSupportedContextKey(key)) { tc.putHeader(key, value); } }); } - private Context getParentContext(ThreadContext threadContext) { + private Context getParentContext(TraceContext traceContext) { // https://github.com/open-telemetry/opentelemetry-java/discussions/2884#discussioncomment-381870 // If you just want to propagate across threads within the same process, you don't need context propagators (extract/inject). // You can just pass the Context object directly to another thread (it is immutable and thus thread-safe). // Attempt to fetch a local parent context first, otherwise look for a remote parent - Context parentContext = threadContext.getTransient("parent_" + Task.APM_TRACE_CONTEXT); + Context parentContext = traceContext.getTransient("parent_" + Task.APM_TRACE_CONTEXT); if (parentContext == null) { - final String traceParentHeader = threadContext.getTransient("parent_" + Task.TRACE_PARENT_HTTP_HEADER); - final String traceStateHeader = threadContext.getTransient("parent_" + Task.TRACE_STATE); + final String traceParentHeader = traceContext.getTransient("parent_" + Task.TRACE_PARENT_HTTP_HEADER); + final String traceStateHeader = traceContext.getTransient("parent_" + Task.TRACE_STATE); if (traceParentHeader != null) { final Map traceContextMap = Maps.newMapWithExpectedSize(2); @@ -276,12 +277,12 @@ private Context getParentContext(ThreadContext threadContext) { * However, if a scope is active, then the APM agent can capture additional information, so this method * exists to make it possible to use scopes in the few situation where it makes sense. * - * @param spanId the ID of a currently-open span for which to open a scope. + * @param traceable provides the ID of a currently-open span for which to open a scope. * @return a method to close the scope when you are finished with it. */ @Override - public Releasable withScope(SpanId spanId) { - final Context context = spans.get(spanId); + public Releasable withScope(Traceable traceable) { + final Context context = spans.get(traceable.getSpanId()); if (context != null) { var scope = AccessController.doPrivileged((PrivilegedAction) context::makeCurrent); return scope::close; @@ -327,60 +328,60 @@ private void setSpanAttributes(@Nullable Map spanAttributes, Spa spanBuilder.setAttribute(org.elasticsearch.telemetry.tracing.Tracer.AttributeKeys.CLUSTER_NAME, clusterName); } - private void setSpanAttributes(ThreadContext threadContext, @Nullable Map spanAttributes, SpanBuilder spanBuilder) { + private void setSpanAttributes(TraceContext traceContext, @Nullable Map spanAttributes, SpanBuilder spanBuilder) { setSpanAttributes(spanAttributes, spanBuilder); - final String xOpaqueId = threadContext.getHeader(Task.X_OPAQUE_ID_HTTP_HEADER); + final String xOpaqueId = traceContext.getHeader(Task.X_OPAQUE_ID_HTTP_HEADER); if (xOpaqueId != null) { spanBuilder.setAttribute("es.x-opaque-id", xOpaqueId); } } @Override - public void addError(SpanId spanId, Throwable throwable) { - final var span = Span.fromContextOrNull(spans.get(spanId)); + public void addError(Traceable traceable, Throwable throwable) { + final var span = Span.fromContextOrNull(spans.get(traceable.getSpanId())); if (span != null) { span.recordException(throwable); } } @Override - public void setAttribute(SpanId spanId, String key, boolean value) { - final var span = Span.fromContextOrNull(spans.get(spanId)); + public void setAttribute(Traceable traceable, String key, boolean value) { + final var span = Span.fromContextOrNull(spans.get(traceable.getSpanId())); if (span != null) { span.setAttribute(key, value); } } @Override - public void setAttribute(SpanId spanId, String key, double value) { - final var span = Span.fromContextOrNull(spans.get(spanId)); + public void setAttribute(Traceable traceable, String key, double value) { + final var span = Span.fromContextOrNull(spans.get(traceable.getSpanId())); if (span != null) { span.setAttribute(key, value); } } @Override - public void setAttribute(SpanId spanId, String key, long value) { - final var span = Span.fromContextOrNull(spans.get(spanId)); + public void setAttribute(Traceable traceable, String key, long value) { + final var span = Span.fromContextOrNull(spans.get(traceable.getSpanId())); if (span != null) { span.setAttribute(key, value); } } @Override - public void setAttribute(SpanId spanId, String key, String value) { - final var span = Span.fromContextOrNull(spans.get(spanId)); + public void setAttribute(Traceable traceable, String key, String value) { + final var span = Span.fromContextOrNull(spans.get(traceable.getSpanId())); if (span != null) { span.setAttribute(key, value); } } @Override - public void stopTrace(SpanId spanId) { - final var span = Span.fromContextOrNull(spans.remove(spanId)); + public void stopTrace(Traceable traceable) { + final var span = Span.fromContextOrNull(spans.remove(traceable.getSpanId())); if (span != null) { - logger.trace("Finishing trace [{}]", spanId); + logger.trace("Finishing trace [{}]", traceable); AccessController.doPrivileged((PrivilegedAction) () -> { span.end(); return null; @@ -400,8 +401,8 @@ public void stopTrace() { } @Override - public void addEvent(SpanId spanId, String eventName) { - final var span = Span.fromContextOrNull(spans.get(spanId)); + public void addEvent(Traceable traceable, String eventName) { + final var span = Span.fromContextOrNull(spans.get(traceable.getSpanId())); if (span != null) { span.addEvent(eventName); } @@ -425,7 +426,7 @@ private static boolean isSupportedContextKey(String key) { } // VisibleForTesting - Map getSpans() { + Map getSpans() { return spans; } diff --git a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/APMMeterRegistryTests.java b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/APMMeterRegistryTests.java index 778ca108dc5fe..8144b8f9a33b4 100644 --- a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/APMMeterRegistryTests.java +++ b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/APMMeterRegistryTests.java @@ -35,10 +35,8 @@ import java.util.List; import java.util.function.Supplier; -import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.sameInstance; public class APMMeterRegistryTests extends ESTestCase { @@ -84,8 +82,8 @@ public void testMeterIsOverridden() { public void testLookupByName() { var apmMeter = new APMMeterService(TELEMETRY_ENABLED, () -> testOtel, () -> noopOtel).getMeterRegistry(); - DoubleCounter registeredCounter = apmMeter.registerDoubleCounter("name", "desc", "unit"); - DoubleCounter lookedUpCounter = apmMeter.getDoubleCounter("name"); + DoubleCounter registeredCounter = apmMeter.registerDoubleCounter("es.test.name.total", "desc", "unit"); + DoubleCounter lookedUpCounter = apmMeter.getDoubleCounter("es.test.name.total"); assertThat(lookedUpCounter, sameInstance(registeredCounter)); } @@ -103,19 +101,6 @@ public void testNoopIsSetOnStop() { assertThat(meter, sameInstance(noopOtel)); } - public void testMaxNameLength() { - APMMeterService apmMeter = new APMMeterService(TELEMETRY_ENABLED, () -> testOtel, () -> noopOtel); - apmMeter.start(); - int max_length = 255; - var counter = apmMeter.getMeterRegistry().registerLongCounter("a".repeat(max_length), "desc", "count"); - assertThat(counter, instanceOf(LongCounter.class)); - IllegalArgumentException iae = expectThrows( - IllegalArgumentException.class, - () -> apmMeter.getMeterRegistry().registerLongCounter("a".repeat(max_length + 1), "desc", "count") - ); - assertThat(iae.getMessage(), containsString("exceeds maximum length [255]")); - } - public void testAllInstrumentsSwitchProviders() { TestAPMMeterService apmMeter = new TestAPMMeterService( Settings.builder().put(APMAgentSettings.TELEMETRY_METRICS_ENABLED_SETTING.getKey(), false).build(), @@ -125,18 +110,18 @@ public void testAllInstrumentsSwitchProviders() { APMMeterRegistry registry = apmMeter.getMeterRegistry(); Supplier doubleObserver = () -> new DoubleWithAttributes(1.5, Collections.emptyMap()); - DoubleCounter dc = registry.registerDoubleCounter("dc", "", ""); - DoubleUpDownCounter dudc = registry.registerDoubleUpDownCounter("dudc", "", ""); - DoubleHistogram dh = registry.registerDoubleHistogram("dh", "", ""); - DoubleAsyncCounter dac = registry.registerDoubleAsyncCounter("dac", "", "", doubleObserver); - DoubleGauge dg = registry.registerDoubleGauge("dg", "", "", doubleObserver); + DoubleCounter dc = registry.registerDoubleCounter("es.test.dc.total", "", ""); + DoubleUpDownCounter dudc = registry.registerDoubleUpDownCounter("es.test.dudc.current", "", ""); + DoubleHistogram dh = registry.registerDoubleHistogram("es.test.dh.histogram", "", ""); + DoubleAsyncCounter dac = registry.registerDoubleAsyncCounter("es.test.dac.total", "", "", doubleObserver); + DoubleGauge dg = registry.registerDoubleGauge("es.test.dg.current", "", "", doubleObserver); Supplier longObserver = () -> new LongWithAttributes(100, Collections.emptyMap()); - LongCounter lc = registry.registerLongCounter("lc", "", ""); - LongUpDownCounter ludc = registry.registerLongUpDownCounter("ludc", "", ""); - LongHistogram lh = registry.registerLongHistogram("lh", "", ""); - LongAsyncCounter lac = registry.registerLongAsyncCounter("lac", "", "", longObserver); - LongGauge lg = registry.registerLongGauge("lg", "", "", longObserver); + LongCounter lc = registry.registerLongCounter("es.test.lc.total", "", ""); + LongUpDownCounter ludc = registry.registerLongUpDownCounter("es.test.ludc.total", "", ""); + LongHistogram lh = registry.registerLongHistogram("es.test.lh.histogram", "", ""); + LongAsyncCounter lac = registry.registerLongAsyncCounter("es.test.lac.total", "", "", longObserver); + LongGauge lg = registry.registerLongGauge("es.test.lg.current", "", "", longObserver); apmMeter.setEnabled(true); diff --git a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/MeterRegistryConcurrencyTests.java b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/MeterRegistryConcurrencyTests.java index e8dafd996f5b0..11951a9bf1072 100644 --- a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/MeterRegistryConcurrencyTests.java +++ b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/MeterRegistryConcurrencyTests.java @@ -28,7 +28,7 @@ import static org.hamcrest.Matchers.sameInstance; public class MeterRegistryConcurrencyTests extends ESTestCase { - private final String name = "name"; + private final String name = "es.test.name.total"; private final String description = "desc"; private final String unit = "kg"; private final Meter noopMeter = OpenTelemetry.noop().getMeter("noop"); diff --git a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/MetricNameValidatorTests.java b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/MetricNameValidatorTests.java new file mode 100644 index 0000000000000..64f78d0af494c --- /dev/null +++ b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/MetricNameValidatorTests.java @@ -0,0 +1,102 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.telemetry.apm.internal; + +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.equalTo; + +public class MetricNameValidatorTests extends ESTestCase { + public void testMetricNameNotNull() { + String metricName = "es.somemodule.somemetric.total"; + assertThat(MetricNameValidator.validate(metricName), equalTo(metricName)); + + expectThrows(NullPointerException.class, () -> MetricNameValidator.validate(null)); + } + + public void testMaxMetricNameLength() { + MetricNameValidator.validate(metricNameWithLength(255)); + + expectThrows(IllegalArgumentException.class, () -> MetricNameValidator.validate(metricNameWithLength(256))); + } + + public void testESPrefixAndDotSeparator() { + MetricNameValidator.validate("es.somemodule.somemetric.total"); + + expectThrows(IllegalArgumentException.class, () -> MetricNameValidator.validate("somemodule.somemetric.total")); + // verify . is a separator + expectThrows(IllegalArgumentException.class, () -> MetricNameValidator.validate("es_somemodule_somemetric_total")); + expectThrows(IllegalArgumentException.class, () -> MetricNameValidator.validate("es_somemodule.somemetric.total")); + } + + public void testNameElementRegex() { + MetricNameValidator.validate("es.somemodulename0.somemetric.total"); + MetricNameValidator.validate("es.some_module_name0.somemetric.total"); + MetricNameValidator.validate("es.s.somemetric.total"); + + expectThrows(IllegalArgumentException.class, () -> MetricNameValidator.validate("es.someModuleName0.somemetric.total")); + expectThrows(IllegalArgumentException.class, () -> MetricNameValidator.validate("es.SomeModuleName.somemetric.total")); + expectThrows(IllegalArgumentException.class, () -> MetricNameValidator.validate("es.0some_module_name0.somemetric.total")); + expectThrows(IllegalArgumentException.class, () -> MetricNameValidator.validate("es.some_#_name0.somemetric.total")); + expectThrows(IllegalArgumentException.class, () -> MetricNameValidator.validate("es.some-name0.somemetric.total")); + } + + public void testNameHas3Elements() { + MetricNameValidator.validate("es.group.total"); + MetricNameValidator.validate("es.group.subgroup.total"); + + expectThrows(IllegalArgumentException.class, () -> MetricNameValidator.validate("es")); + expectThrows(IllegalArgumentException.class, () -> MetricNameValidator.validate("es.")); + expectThrows(IllegalArgumentException.class, () -> MetricNameValidator.validate("es.sth")); + } + + public void testNumberOfElementsLimit() { + MetricNameValidator.validate("es.a2.a3.a4.a5.a6.a7.a8.a9.total"); + + expectThrows(IllegalArgumentException.class, () -> MetricNameValidator.validate("es.a2.a3.a4.a5.a6.a7.a8.a9.a10.total")); + } + + public void testElementLengthLimit() { + MetricNameValidator.validate("es." + "a".repeat(MetricNameValidator.MAX_ELEMENT_LENGTH) + ".total"); + + expectThrows( + IllegalArgumentException.class, + () -> MetricNameValidator.validate("es." + "a".repeat(MetricNameValidator.MAX_ELEMENT_LENGTH + 1) + ".total") + ); + } + + public void testLastElementAllowList() { + for (String suffix : MetricNameValidator.ALLOWED_SUFFIXES) { + MetricNameValidator.validate("es.somemodule.somemetric." + suffix); + } + expectThrows(IllegalArgumentException.class, () -> MetricNameValidator.validate("es.somemodule.somemetric.some_other_suffix")); + } + + public static String metricNameWithLength(int length) { + int prefixAndSuffix = "es.".length() + ".utilization".length(); + assert length > prefixAndSuffix : "length too short"; + + var remainingChars = length - prefixAndSuffix; + StringBuilder metricName = new StringBuilder("es."); + var i = 0; + while (i < remainingChars) { + metricName.append("a"); + i++; + for (int j = 0; j < MetricNameValidator.MAX_ELEMENT_LENGTH - 1 && i < remainingChars; j++) { + metricName.append("x"); + i++; + } + metricName.append("."); + i++; + + } + metricName.append("utilization"); + return metricName.toString(); + } +} diff --git a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/metrics/AsyncCountersAdapterTests.java b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/metrics/AsyncCountersAdapterTests.java index 3e23b741e01e5..24b40063cd636 100644 --- a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/metrics/AsyncCountersAdapterTests.java +++ b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/metrics/AsyncCountersAdapterTests.java @@ -38,7 +38,7 @@ public void init() { // testing that a value reported is then used in a callback public void testLongAsyncCounter() throws Exception { AtomicReference attrs = new AtomicReference<>(); - LongAsyncCounter longAsyncCounter = registry.registerLongAsyncCounter("name", "desc", "unit", attrs::get); + LongAsyncCounter longAsyncCounter = registry.registerLongAsyncCounter("es.test.name.total", "desc", "unit", attrs::get); attrs.set(new LongWithAttributes(1L, Map.of("k", 1L))); @@ -70,7 +70,7 @@ public void testLongAsyncCounter() throws Exception { public void testDoubleAsyncAdapter() throws Exception { AtomicReference attrs = new AtomicReference<>(); - DoubleAsyncCounter doubleAsyncCounter = registry.registerDoubleAsyncCounter("name", "desc", "unit", attrs::get); + DoubleAsyncCounter doubleAsyncCounter = registry.registerDoubleAsyncCounter("es.test.name.total", "desc", "unit", attrs::get); attrs.set(new DoubleWithAttributes(1.0, Map.of("k", 1.0))); @@ -102,7 +102,7 @@ public void testDoubleAsyncAdapter() throws Exception { public void testNullGaugeRecord() throws Exception { DoubleAsyncCounter dcounter = registry.registerDoubleAsyncCounter( - "name", + "es.test.name.total", "desc", "unit", new AtomicReference()::get @@ -112,7 +112,7 @@ public void testNullGaugeRecord() throws Exception { assertThat(metrics, hasSize(0)); LongAsyncCounter lcounter = registry.registerLongAsyncCounter( - "name", + "es.test.name.total", "desc", "unit", new AtomicReference()::get diff --git a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/metrics/GaugeAdapterTests.java b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/metrics/GaugeAdapterTests.java index 10f2d58768d48..d5e605df1d096 100644 --- a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/metrics/GaugeAdapterTests.java +++ b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/metrics/GaugeAdapterTests.java @@ -38,7 +38,7 @@ public void init() { // testing that a value reported is then used in a callback public void testLongGaugeRecord() throws Exception { AtomicReference attrs = new AtomicReference<>(); - LongGauge gauge = registry.registerLongGauge("name", "desc", "unit", attrs::get); + LongGauge gauge = registry.registerLongGauge("es.test.name.total", "desc", "unit", attrs::get); attrs.set(new LongWithAttributes(1L, Map.of("k", 1L))); @@ -71,7 +71,7 @@ public void testLongGaugeRecord() throws Exception { // testing that a value reported is then used in a callback public void testDoubleGaugeRecord() throws Exception { AtomicReference attrs = new AtomicReference<>(); - DoubleGauge gauge = registry.registerDoubleGauge("name", "desc", "unit", attrs::get); + DoubleGauge gauge = registry.registerDoubleGauge("es.test.name.total", "desc", "unit", attrs::get); attrs.set(new DoubleWithAttributes(1.0d, Map.of("k", 1L))); @@ -102,12 +102,17 @@ public void testDoubleGaugeRecord() throws Exception { } public void testNullGaugeRecord() throws Exception { - DoubleGauge dgauge = registry.registerDoubleGauge("name", "desc", "unit", new AtomicReference()::get); + DoubleGauge dgauge = registry.registerDoubleGauge( + "es.test.name.total", + "desc", + "unit", + new AtomicReference()::get + ); otelMeter.collectMetrics(); List metrics = otelMeter.getRecorder().getMeasurements(dgauge); assertThat(metrics, hasSize(0)); - LongGauge lgauge = registry.registerLongGauge("name", "desc", "unit", new AtomicReference()::get); + LongGauge lgauge = registry.registerLongGauge("es.test.name.total", "desc", "unit", new AtomicReference()::get); otelMeter.collectMetrics(); metrics = otelMeter.getRecorder().getMeasurements(lgauge); assertThat(metrics, hasSize(0)); diff --git a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/tracing/APMTracerTests.java b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/tracing/APMTracerTests.java index 8cb94b782756d..04a4e1b3f3a34 100644 --- a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/tracing/APMTracerTests.java +++ b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/tracing/APMTracerTests.java @@ -22,13 +22,14 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.tasks.Task; import org.elasticsearch.telemetry.apm.internal.APMAgentSettings; -import org.elasticsearch.telemetry.tracing.SpanId; +import org.elasticsearch.telemetry.tracing.Traceable; import org.elasticsearch.test.ESTestCase; import java.time.Instant; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.concurrent.TimeUnit; import java.util.stream.Stream; @@ -44,9 +45,9 @@ public class APMTracerTests extends ESTestCase { - private static final SpanId SPAN_ID1 = SpanId.forBareString("id1"); - private static final SpanId SPAN_ID2 = SpanId.forBareString("id2"); - private static final SpanId SPAN_ID3 = SpanId.forBareString("id3"); + private static final Traceable TRACEABLE1 = new TestTraceable("id1"); + private static final Traceable TRACEABLE2 = new TestTraceable("id2"); + private static final Traceable TRACEABLE3 = new TestTraceable("id3"); /** * Check that the tracer doesn't create spans when tracing is disabled. @@ -55,7 +56,7 @@ public void test_onTraceStarted_withTracingDisabled_doesNotStartTrace() { Settings settings = Settings.builder().put(APMAgentSettings.APM_ENABLED_SETTING.getKey(), false).build(); APMTracer apmTracer = buildTracer(settings); - apmTracer.startTrace(new ThreadContext(settings), SPAN_ID1, "name1", null); + apmTracer.startTrace(new ThreadContext(settings), TRACEABLE1, "name1", null); assertThat(apmTracer.getSpans(), anEmptyMap()); } @@ -70,7 +71,7 @@ public void test_onTraceStarted_withSpanNameOmitted_doesNotStartTrace() { .build(); APMTracer apmTracer = buildTracer(settings); - apmTracer.startTrace(new ThreadContext(settings), SPAN_ID1, "name1", null); + apmTracer.startTrace(new ThreadContext(settings), TRACEABLE1, "name1", null); assertThat(apmTracer.getSpans(), anEmptyMap()); } @@ -82,10 +83,10 @@ public void test_onTraceStarted_startsTrace() { Settings settings = Settings.builder().put(APMAgentSettings.APM_ENABLED_SETTING.getKey(), true).build(); APMTracer apmTracer = buildTracer(settings); - apmTracer.startTrace(new ThreadContext(settings), SPAN_ID1, "name1", null); + apmTracer.startTrace(new ThreadContext(settings), TRACEABLE1, "name1", null); assertThat(apmTracer.getSpans(), aMapWithSize(1)); - assertThat(apmTracer.getSpans(), hasKey(SPAN_ID1)); + assertThat(apmTracer.getSpans(), hasKey(TRACEABLE1.getSpanId())); } /** @@ -99,10 +100,10 @@ public void test_onTraceStartedWithStartTime_startsTrace() { // 1_000_000L because of "toNanos" conversions that overflow for large long millis Instant spanStartTime = Instant.ofEpochMilli(randomLongBetween(0, Long.MAX_VALUE / 1_000_000L)); threadContext.putTransient(Task.TRACE_START_TIME, spanStartTime); - apmTracer.startTrace(threadContext, SPAN_ID1, "name1", null); + apmTracer.startTrace(threadContext, TRACEABLE1, "name1", null); assertThat(apmTracer.getSpans(), aMapWithSize(1)); - assertThat(apmTracer.getSpans(), hasKey(SPAN_ID1)); + assertThat(apmTracer.getSpans(), hasKey(TRACEABLE1.getSpanId())); assertThat(((SpyAPMTracer) apmTracer).getSpanStartTime("name1"), is(spanStartTime)); } @@ -113,8 +114,8 @@ public void test_onTraceStopped_stopsTrace() { Settings settings = Settings.builder().put(APMAgentSettings.APM_ENABLED_SETTING.getKey(), true).build(); APMTracer apmTracer = buildTracer(settings); - apmTracer.startTrace(new ThreadContext(settings), SPAN_ID1, "name1", null); - apmTracer.stopTrace(SPAN_ID1); + apmTracer.startTrace(new ThreadContext(settings), TRACEABLE1, "name1", null); + apmTracer.stopTrace(TRACEABLE1); assertThat(apmTracer.getSpans(), anEmptyMap()); } @@ -131,7 +132,7 @@ public void test_whenTraceStarted_threadContextIsPopulated() { APMTracer apmTracer = buildTracer(settings); ThreadContext threadContext = new ThreadContext(settings); - apmTracer.startTrace(threadContext, SPAN_ID1, "name1", null); + apmTracer.startTrace(threadContext, TRACEABLE1, "name1", null); assertThat(threadContext.getTransient(Task.APM_TRACE_CONTEXT), notNullValue()); } @@ -152,13 +153,13 @@ public void test_whenTraceStarted_andSpanNameIncluded_thenSpanIsStarted() { .build(); APMTracer apmTracer = buildTracer(settings); - apmTracer.startTrace(new ThreadContext(settings), SPAN_ID1, "name-aaa", null); - apmTracer.startTrace(new ThreadContext(settings), SPAN_ID2, "name-bbb", null); - apmTracer.startTrace(new ThreadContext(settings), SPAN_ID3, "name-ccc", null); + apmTracer.startTrace(new ThreadContext(settings), TRACEABLE1, "name-aaa", null); + apmTracer.startTrace(new ThreadContext(settings), TRACEABLE2, "name-bbb", null); + apmTracer.startTrace(new ThreadContext(settings), TRACEABLE3, "name-ccc", null); - assertThat(apmTracer.getSpans(), hasKey(SPAN_ID1)); - assertThat(apmTracer.getSpans(), hasKey(SPAN_ID2)); - assertThat(apmTracer.getSpans(), not(hasKey(SPAN_ID3))); + assertThat(apmTracer.getSpans(), hasKey(TRACEABLE1.getSpanId())); + assertThat(apmTracer.getSpans(), hasKey(TRACEABLE2.getSpanId())); + assertThat(apmTracer.getSpans(), not(hasKey(TRACEABLE3.getSpanId()))); } /** @@ -175,7 +176,7 @@ public void test_whenTraceStarted_andSpanNameIncludedAndExcluded_thenSpanIsNotSt .build(); APMTracer apmTracer = buildTracer(settings); - apmTracer.startTrace(new ThreadContext(settings), SPAN_ID1, "name-aaa", null); + apmTracer.startTrace(new ThreadContext(settings), TRACEABLE1, "name-aaa", null); assertThat(apmTracer.getSpans(), not(hasKey("id1"))); } @@ -197,13 +198,13 @@ public void test_whenTraceStarted_andSpanNameExcluded_thenSpanIsNotStarted() { .build(); APMTracer apmTracer = buildTracer(settings); - apmTracer.startTrace(new ThreadContext(settings), SPAN_ID1, "name-aaa", null); - apmTracer.startTrace(new ThreadContext(settings), SPAN_ID2, "name-bbb", null); - apmTracer.startTrace(new ThreadContext(settings), SPAN_ID3, "name-ccc", null); + apmTracer.startTrace(new ThreadContext(settings), TRACEABLE1, "name-aaa", null); + apmTracer.startTrace(new ThreadContext(settings), TRACEABLE2, "name-bbb", null); + apmTracer.startTrace(new ThreadContext(settings), TRACEABLE3, "name-ccc", null); - assertThat(apmTracer.getSpans(), not(hasKey(SPAN_ID1))); - assertThat(apmTracer.getSpans(), not(hasKey(SPAN_ID2))); - assertThat(apmTracer.getSpans(), hasKey(SPAN_ID3)); + assertThat(apmTracer.getSpans(), not(hasKey(TRACEABLE1.getSpanId()))); + assertThat(apmTracer.getSpans(), not(hasKey(TRACEABLE2.getSpanId()))); + assertThat(apmTracer.getSpans(), hasKey(TRACEABLE3.getSpanId())); } /** @@ -360,4 +361,17 @@ public Span startSpan() { } } } + + private static class TestTraceable implements Traceable { + private final String spanId; + + TestTraceable(String spanId) { + this.spanId = Objects.requireNonNull(spanId); + } + + @Override + public String getSpanId() { + return spanId; + } + } } diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java index 61b53ea10a786..cf974abf4fda9 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.core.util.Throwables; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; @@ -28,15 +29,17 @@ import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest; +import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction; import org.elasticsearch.action.admin.indices.stats.IndexShardStats; import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.admin.indices.stats.ShardStats; -import org.elasticsearch.action.admin.indices.template.delete.DeleteComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.delete.TransportDeleteComposableIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.get.GetComposableIndexTemplateAction; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequest; @@ -107,6 +110,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; @@ -277,17 +281,17 @@ public void testOtherWriteOps() throws Exception { } { IndexRequest indexRequest = new IndexRequest(dataStreamName).source("{\"@timestamp\": \"2020-12-12\"}", XContentType.JSON); - Exception e = expectThrows(IllegalArgumentException.class, () -> client().index(indexRequest).actionGet()); + Exception e = expectThrows(IllegalArgumentException.class, client().index(indexRequest)); assertThat(e.getMessage(), equalTo("only write ops with an op_type of create are allowed in data streams")); } { UpdateRequest updateRequest = new UpdateRequest(dataStreamName, "_id").doc("{}", XContentType.JSON); - Exception e = expectThrows(IllegalArgumentException.class, () -> client().update(updateRequest).actionGet()); + Exception e = expectThrows(IllegalArgumentException.class, client().update(updateRequest)); assertThat(e.getMessage(), equalTo("only write ops with an op_type of create are allowed in data streams")); } { DeleteRequest deleteRequest = new DeleteRequest(dataStreamName, "_id"); - Exception e = expectThrows(IllegalArgumentException.class, () -> client().delete(deleteRequest).actionGet()); + Exception e = expectThrows(IllegalArgumentException.class, client().delete(deleteRequest)); assertThat(e.getMessage(), equalTo("only write ops with an op_type of create are allowed in data streams")); } { @@ -432,7 +436,7 @@ public void testComposableTemplateOnlyMatchingWithDataStreamName() throws Except } } }"""; - PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request("id_1"); + TransportPutComposableIndexTemplateAction.Request request = new TransportPutComposableIndexTemplateAction.Request("id_1"); request.indexTemplate( ComposableIndexTemplate.builder() // use no wildcard, so that backing indices don't match just by name @@ -441,7 +445,7 @@ public void testComposableTemplateOnlyMatchingWithDataStreamName() throws Except .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) .build() ); - client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); + client().execute(TransportPutComposableIndexTemplateAction.TYPE, request).actionGet(); int numDocs = randomIntBetween(2, 16); indexDocs(dataStreamName, numDocs); @@ -510,7 +514,9 @@ public void testTimeStampValidationInvalidFieldMapping() throws Exception { } } }"""; - PutComposableIndexTemplateAction.Request createTemplateRequest = new PutComposableIndexTemplateAction.Request("logs-foo"); + TransportPutComposableIndexTemplateAction.Request createTemplateRequest = new TransportPutComposableIndexTemplateAction.Request( + "logs-foo" + ); createTemplateRequest.indexTemplate( ComposableIndexTemplate.builder() .indexPatterns(List.of("logs-*")) @@ -521,7 +527,7 @@ public void testTimeStampValidationInvalidFieldMapping() throws Exception { Exception e = expectThrows( IllegalArgumentException.class, - () -> client().execute(PutComposableIndexTemplateAction.INSTANCE, createTemplateRequest).actionGet() + client().execute(TransportPutComposableIndexTemplateAction.TYPE, createTemplateRequest) ); assertThat( e.getCause().getCause().getMessage(), @@ -578,7 +584,7 @@ public void testResolvabilityOfDataStreamsInAPIs() throws Exception { verifyResolvability(dataStreamName, indicesAdmin().prepareClose(dataStreamName), true); verifyResolvability(aliasToDataStream, indicesAdmin().prepareClose(aliasToDataStream), true); verifyResolvability(dataStreamName, clusterAdmin().prepareSearchShards(dataStreamName), false); - verifyResolvability(dataStreamName, indicesAdmin().prepareShardStores(dataStreamName), false); + verifyResolvability(client().execute(TransportIndicesShardStoresAction.TYPE, new IndicesShardStoresRequest(dataStreamName))); request = new CreateDataStreamAction.Request("logs-barbaz"); client().execute(CreateDataStreamAction.INSTANCE, request).actionGet(); @@ -622,7 +628,7 @@ public void testResolvabilityOfDataStreamsInAPIs() throws Exception { verifyResolvability(wildcardExpression, indicesAdmin().prepareOpen(wildcardExpression), false); verifyResolvability(wildcardExpression, indicesAdmin().prepareClose(wildcardExpression), false); verifyResolvability(wildcardExpression, clusterAdmin().prepareSearchShards(wildcardExpression), false); - verifyResolvability(wildcardExpression, indicesAdmin().prepareShardStores(wildcardExpression), false); + verifyResolvability(client().execute(TransportIndicesShardStoresAction.TYPE, new IndicesShardStoresRequest(wildcardExpression))); } public void testCannotDeleteComposableTemplateUsedByDataStream() throws Exception { @@ -633,8 +639,8 @@ public void testCannotDeleteComposableTemplateUsedByDataStream() throws Exceptio createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName + "-eggplant"); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); - DeleteComposableIndexTemplateAction.Request req = new DeleteComposableIndexTemplateAction.Request("id"); - Exception e = expectThrows(Exception.class, () -> client().execute(DeleteComposableIndexTemplateAction.INSTANCE, req).get()); + TransportDeleteComposableIndexTemplateAction.Request req = new TransportDeleteComposableIndexTemplateAction.Request("id"); + Exception e = expectThrows(Exception.class, client().execute(TransportDeleteComposableIndexTemplateAction.TYPE, req)); Optional maybeE = ExceptionsHelper.unwrapCausesAndSuppressed( e, err -> err.getMessage() @@ -645,8 +651,8 @@ public void testCannotDeleteComposableTemplateUsedByDataStream() throws Exceptio ); assertTrue(maybeE.isPresent()); - DeleteComposableIndexTemplateAction.Request req2 = new DeleteComposableIndexTemplateAction.Request("i*"); - Exception e2 = expectThrows(Exception.class, () -> client().execute(DeleteComposableIndexTemplateAction.INSTANCE, req2).get()); + TransportDeleteComposableIndexTemplateAction.Request req2 = new TransportDeleteComposableIndexTemplateAction.Request("i*"); + Exception e2 = expectThrows(Exception.class, client().execute(TransportDeleteComposableIndexTemplateAction.TYPE, req2)); maybeE = ExceptionsHelper.unwrapCausesAndSuppressed( e2, err -> err.getMessage() @@ -658,7 +664,7 @@ public void testCannotDeleteComposableTemplateUsedByDataStream() throws Exceptio assertTrue(maybeE.isPresent()); // Now replace it with a higher-priority template and delete the old one - PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request("id2"); + TransportPutComposableIndexTemplateAction.Request request = new TransportPutComposableIndexTemplateAction.Request("id2"); request.indexTemplate( ComposableIndexTemplate.builder() // Match the other data stream with a slightly different pattern @@ -669,13 +675,13 @@ public void testCannotDeleteComposableTemplateUsedByDataStream() throws Exceptio .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) .build() ); - client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); + client().execute(TransportPutComposableIndexTemplateAction.TYPE, request).actionGet(); - DeleteComposableIndexTemplateAction.Request deleteRequest = new DeleteComposableIndexTemplateAction.Request("id"); - client().execute(DeleteComposableIndexTemplateAction.INSTANCE, deleteRequest).get(); + TransportDeleteComposableIndexTemplateAction.Request deleteRequest = new TransportDeleteComposableIndexTemplateAction.Request("id"); + client().execute(TransportDeleteComposableIndexTemplateAction.TYPE, deleteRequest).get(); GetComposableIndexTemplateAction.Request getReq = new GetComposableIndexTemplateAction.Request("id"); - Exception e3 = expectThrows(Exception.class, () -> client().execute(GetComposableIndexTemplateAction.INSTANCE, getReq).get()); + Exception e3 = expectThrows(Exception.class, client().execute(GetComposableIndexTemplateAction.INSTANCE, getReq)); maybeE = ExceptionsHelper.unwrapCausesAndSuppressed(e3, err -> err.getMessage().contains("index template matching [id] not found")); assertTrue(maybeE.isPresent()); } @@ -874,7 +880,7 @@ public void testDataSteamAliasWithMalformedFilter() throws Exception { } Exception e = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().aliases(new IndicesAliasesRequest().addAliasAction(addAction)).actionGet() + indicesAdmin().aliases(new IndicesAliasesRequest().addAliasAction(addAction)) ); assertThat(e.getMessage(), equalTo("failed to parse filter for alias [" + alias + "]")); GetAliasesResponse response = indicesAdmin().getAliases(new GetAliasesRequest()).actionGet(); @@ -891,7 +897,7 @@ public void testAliasActionsFailOnDataStreamBackingIndices() throws Exception { AliasActions addAction = new AliasActions(AliasActions.Type.ADD).index(backingIndex).aliases("first_gen"); IndicesAliasesRequest aliasesAddRequest = new IndicesAliasesRequest(); aliasesAddRequest.addAliasAction(addAction); - Exception e = expectThrows(IllegalArgumentException.class, () -> indicesAdmin().aliases(aliasesAddRequest).actionGet()); + Exception e = expectThrows(IllegalArgumentException.class, indicesAdmin().aliases(aliasesAddRequest)); assertThat( e.getMessage(), equalTo( @@ -915,7 +921,7 @@ public void testAddDataStreamAliasesMixedExpressionValidation() throws Exception AliasActions addAction = new AliasActions(AliasActions.Type.ADD).index("metrics-*").aliases("my-alias"); IndicesAliasesRequest aliasesAddRequest = new IndicesAliasesRequest(); aliasesAddRequest.addAliasAction(addAction); - Exception e = expectThrows(IllegalArgumentException.class, () -> indicesAdmin().aliases(aliasesAddRequest).actionGet()); + Exception e = expectThrows(IllegalArgumentException.class, indicesAdmin().aliases(aliasesAddRequest)); assertThat(e.getMessage(), equalTo("expressions [metrics-*] that match with both data streams and regular indices are disallowed")); } @@ -977,7 +983,7 @@ public void testUpdateDataStreamsWithWildcards() throws Exception { { IndicesAliasesRequest aliasesAddRequest = new IndicesAliasesRequest(); aliasesAddRequest.addAliasAction(new AliasActions(AliasActions.Type.ADD).index("metrics-foo").aliases("my-alias*")); - expectThrows(InvalidAliasNameException.class, () -> indicesAdmin().aliases(aliasesAddRequest).actionGet()); + expectThrows(InvalidAliasNameException.class, indicesAdmin().aliases(aliasesAddRequest)); } // REMOVE does resolve wildcards: { @@ -1003,7 +1009,7 @@ public void testDataStreamAliasesUnsupportedParametersValidation() throws Except AliasActions addAction = new AliasActions(AliasActions.Type.ADD).index("metrics-*").aliases("my-alias").routing("[routing]"); IndicesAliasesRequest aliasesAddRequest = new IndicesAliasesRequest(); aliasesAddRequest.addAliasAction(addAction); - Exception e = expectThrows(IllegalArgumentException.class, () -> indicesAdmin().aliases(aliasesAddRequest).actionGet()); + Exception e = expectThrows(IllegalArgumentException.class, indicesAdmin().aliases(aliasesAddRequest)); assertThat(e.getMessage(), equalTo("aliases that point to data streams don't support routing")); } { @@ -1012,7 +1018,7 @@ public void testDataStreamAliasesUnsupportedParametersValidation() throws Except .indexRouting("[index_routing]"); IndicesAliasesRequest aliasesAddRequest = new IndicesAliasesRequest(); aliasesAddRequest.addAliasAction(addAction); - Exception e = expectThrows(IllegalArgumentException.class, () -> indicesAdmin().aliases(aliasesAddRequest).actionGet()); + Exception e = expectThrows(IllegalArgumentException.class, indicesAdmin().aliases(aliasesAddRequest)); assertThat(e.getMessage(), equalTo("aliases that point to data streams don't support index_routing")); } { @@ -1021,7 +1027,7 @@ public void testDataStreamAliasesUnsupportedParametersValidation() throws Except .searchRouting("[search_routing]"); IndicesAliasesRequest aliasesAddRequest = new IndicesAliasesRequest(); aliasesAddRequest.addAliasAction(addAction); - Exception e = expectThrows(IllegalArgumentException.class, () -> indicesAdmin().aliases(aliasesAddRequest).actionGet()); + Exception e = expectThrows(IllegalArgumentException.class, indicesAdmin().aliases(aliasesAddRequest)); assertThat(e.getMessage(), equalTo("aliases that point to data streams don't support search_routing")); } { @@ -1030,7 +1036,7 @@ public void testDataStreamAliasesUnsupportedParametersValidation() throws Except .isHidden(randomBoolean()); IndicesAliasesRequest aliasesAddRequest = new IndicesAliasesRequest(); aliasesAddRequest.addAliasAction(addAction); - Exception e = expectThrows(IllegalArgumentException.class, () -> indicesAdmin().aliases(aliasesAddRequest).actionGet()); + Exception e = expectThrows(IllegalArgumentException.class, indicesAdmin().aliases(aliasesAddRequest)); assertThat(e.getMessage(), equalTo("aliases that point to data streams don't support is_hidden")); } } @@ -1156,10 +1162,7 @@ public void testIndexDocsWithCustomRoutingTargetingDataStreamIsNotAllowed() thro IndexRequest indexRequestWithRouting = new IndexRequest(dataStream).source("@timestamp", System.currentTimeMillis()) .opType(DocWriteRequest.OpType.CREATE) .routing("custom"); - IllegalArgumentException exception = expectThrows( - IllegalArgumentException.class, - () -> client().index(indexRequestWithRouting).actionGet() - ); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, client().index(indexRequestWithRouting)); assertThat( exception.getMessage(), is( @@ -1199,8 +1202,8 @@ public void testIndexDocsWithCustomRoutingAllowed() throws Exception { .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, true)) .build(); client().execute( - PutComposableIndexTemplateAction.INSTANCE, - new PutComposableIndexTemplateAction.Request("id1").indexTemplate(template) + TransportPutComposableIndexTemplateAction.TYPE, + new TransportPutComposableIndexTemplateAction.Request("id1").indexTemplate(template) ).actionGet(); // Index doc that triggers creation of a data stream String dataStream = "logs-foobar"; @@ -1316,7 +1319,7 @@ public void testNoTimestampInDocument() throws Exception { client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); IndexRequest indexRequest = new IndexRequest(dataStreamName).opType("create").source("{}", XContentType.JSON); - Exception e = expectThrows(Exception.class, () -> client().index(indexRequest).actionGet()); + Exception e = expectThrows(Exception.class, client().index(indexRequest)); assertThat(e.getCause().getMessage(), equalTo("data stream timestamp field [@timestamp] is missing")); } @@ -1328,12 +1331,14 @@ public void testMultipleTimestampValuesInDocument() throws Exception { IndexRequest indexRequest = new IndexRequest(dataStreamName).opType("create") .source("{\"@timestamp\": [\"2020-12-12\",\"2022-12-12\"]}", XContentType.JSON); - Exception e = expectThrows(Exception.class, () -> client().index(indexRequest).actionGet()); + Exception e = expectThrows(Exception.class, client().index(indexRequest)); assertThat(e.getCause().getMessage(), equalTo("data stream timestamp field [@timestamp] encountered multiple values")); } public void testMixedAutoCreate() throws Exception { - PutComposableIndexTemplateAction.Request createTemplateRequest = new PutComposableIndexTemplateAction.Request("logs-foo"); + TransportPutComposableIndexTemplateAction.Request createTemplateRequest = new TransportPutComposableIndexTemplateAction.Request( + "logs-foo" + ); createTemplateRequest.indexTemplate( ComposableIndexTemplate.builder() .indexPatterns(List.of("logs-foo*")) @@ -1341,7 +1346,7 @@ public void testMixedAutoCreate() throws Exception { .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) .build() ); - client().execute(PutComposableIndexTemplateAction.INSTANCE, createTemplateRequest).actionGet(); + client().execute(TransportPutComposableIndexTemplateAction.TYPE, createTemplateRequest).actionGet(); BulkRequest bulkRequest = new BulkRequest(); bulkRequest.add(new IndexRequest("logs-foobar").opType(CREATE).source("{\"@timestamp\": \"2020-12-12\"}", XContentType.JSON)); @@ -1388,8 +1393,9 @@ public void testMixedAutoCreate() throws Exception { DeleteDataStreamAction.Request deleteDSReq = new DeleteDataStreamAction.Request(new String[] { "*" }); client().execute(DeleteDataStreamAction.INSTANCE, deleteDSReq).actionGet(); - DeleteComposableIndexTemplateAction.Request deleteTemplateRequest = new DeleteComposableIndexTemplateAction.Request("*"); - client().execute(DeleteComposableIndexTemplateAction.INSTANCE, deleteTemplateRequest).actionGet(); + TransportDeleteComposableIndexTemplateAction.Request deleteTemplateRequest = + new TransportDeleteComposableIndexTemplateAction.Request("*"); + client().execute(TransportDeleteComposableIndexTemplateAction.TYPE, deleteTemplateRequest).actionGet(); } public void testAutoCreateV1TemplateNoDataStream() { @@ -1427,7 +1433,7 @@ public void testCreatingDataStreamAndFirstBackingIndexExistsFails() throws Excep CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName, now); Exception e = expectThrows( ElasticsearchStatusException.class, - () -> client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).actionGet() + client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest) ); assertThat(e.getMessage(), equalTo("data stream could not be created because backing index [" + backingIndex + "] already exists")); } @@ -1597,7 +1603,7 @@ public void testCreateDataStreamWithSameNameAsIndexAlias() throws Exception { DataStreamIT.putComposableIndexTemplate("my-template", List.of("my-*")); var request = new CreateDataStreamAction.Request("my-alias"); - var e = expectThrows(IllegalStateException.class, () -> client().execute(CreateDataStreamAction.INSTANCE, request).actionGet()); + var e = expectThrows(IllegalStateException.class, client().execute(CreateDataStreamAction.INSTANCE, request)); assertThat(e.getMessage(), containsString("[my-alias (alias of [")); assertThat(e.getMessage(), containsString("]) conflicts with data stream")); } @@ -1610,7 +1616,7 @@ public void testCreateDataStreamWithSameNameAsIndex() throws Exception { DataStreamIT.putComposableIndexTemplate("my-template", List.of("my-*")); var request = new CreateDataStreamAction.Request("my-index"); - var e = expectThrows(IllegalStateException.class, () -> client().execute(CreateDataStreamAction.INSTANCE, request).actionGet()); + var e = expectThrows(IllegalStateException.class, client().execute(CreateDataStreamAction.INSTANCE, request)); assertThat(e.getMessage(), containsString("data stream [my-index] conflicts with index")); } @@ -1624,10 +1630,7 @@ public void testCreateDataStreamWithSameNameAsDataStreamAlias() throws Exception assertAcked(indicesAdmin().aliases(aliasesAddRequest).actionGet()); var request2 = new CreateDataStreamAction.Request("my-alias"); - var e = expectThrows( - IllegalStateException.class, - () -> client().execute(CreateDataStreamAction.INSTANCE, request2).actionGet() - ); + var e = expectThrows(IllegalStateException.class, client().execute(CreateDataStreamAction.INSTANCE, request2)); assertThat(e.getMessage(), containsString("data stream alias and data stream have the same name (my-alias)")); } { @@ -1645,10 +1648,7 @@ public void testCreateDataStreamWithSameNameAsDataStreamAlias() throws Exception assertAcked(client().execute(CreateDataStreamAction.INSTANCE, request).actionGet()); var request2 = new CreateDataStreamAction.Request("my-alias"); - var e = expectThrows( - IllegalStateException.class, - () -> client().execute(CreateDataStreamAction.INSTANCE, request2).actionGet() - ); + var e = expectThrows(IllegalStateException.class, client().execute(CreateDataStreamAction.INSTANCE, request2)); assertThat(e.getMessage(), containsString("data stream alias and data stream have the same name (my-alias)")); } } @@ -1663,7 +1663,7 @@ public void testCreateDataStreamAliasWithSameNameAsIndexAlias() throws Exception assertAcked(client().execute(CreateDataStreamAction.INSTANCE, request).actionGet()); IndicesAliasesRequest aliasesAddRequest = new IndicesAliasesRequest(); aliasesAddRequest.addAliasAction(new AliasActions(AliasActions.Type.ADD).index("logs-es").aliases("logs")); - var e = expectThrows(IllegalStateException.class, () -> indicesAdmin().aliases(aliasesAddRequest).actionGet()); + var e = expectThrows(IllegalStateException.class, indicesAdmin().aliases(aliasesAddRequest)); assertThat(e.getMessage(), containsString("data stream alias and indices alias have the same name (logs)")); } { @@ -1679,7 +1679,7 @@ public void testCreateDataStreamAliasWithSameNameAsIndexAlias() throws Exception ); var request = new CreateDataStreamAction.Request("logs-es"); - var e = expectThrows(IllegalStateException.class, () -> client().execute(CreateDataStreamAction.INSTANCE, request).actionGet()); + var e = expectThrows(IllegalStateException.class, client().execute(CreateDataStreamAction.INSTANCE, request)); assertThat(e.getMessage(), containsString("data stream alias and indices alias have the same name (logs)")); } } @@ -1695,7 +1695,7 @@ public void testCreateDataStreamAliasWithSameNameAsIndex() throws Exception { assertAcked(client().execute(CreateDataStreamAction.INSTANCE, request).actionGet()); IndicesAliasesRequest aliasesAddRequest = new IndicesAliasesRequest(); aliasesAddRequest.addAliasAction(new AliasActions(AliasActions.Type.ADD).index("logs-es").aliases("logs")); - var e = expectThrows(InvalidAliasNameException.class, () -> indicesAdmin().aliases(aliasesAddRequest).actionGet()); + var e = expectThrows(InvalidAliasNameException.class, indicesAdmin().aliases(aliasesAddRequest)); assertThat( e.getMessage(), equalTo("Invalid alias name [logs]: an index or data stream exists with the same name as the alias") @@ -1732,7 +1732,7 @@ public void testCreateIndexWithSameNameAsDataStreamAlias() throws Exception { assertAcked(indicesAdmin().aliases(aliasesAddRequest).actionGet()); CreateIndexRequest createIndexRequest = new CreateIndexRequest("logs"); - var e = expectThrows(InvalidIndexNameException.class, () -> indicesAdmin().create(createIndexRequest).actionGet()); + var e = expectThrows(InvalidIndexNameException.class, indicesAdmin().create(createIndexRequest)); assertThat(e.getMessage(), equalTo("Invalid index name [logs], already exists as alias")); } @@ -1747,7 +1747,7 @@ public void testCreateIndexAliasWithSameNameAsDataStreamAlias() throws Exception { CreateIndexRequest createIndexRequest = new CreateIndexRequest("my-index").alias(new Alias("logs")); - var e = expectThrows(IllegalStateException.class, () -> indicesAdmin().create(createIndexRequest).actionGet()); + var e = expectThrows(IllegalStateException.class, indicesAdmin().create(createIndexRequest)); assertThat(e.getMessage(), containsString("data stream alias and indices alias have the same name (logs)")); } { @@ -1755,7 +1755,7 @@ public void testCreateIndexAliasWithSameNameAsDataStreamAlias() throws Exception assertAcked(indicesAdmin().create(createIndexRequest).actionGet()); IndicesAliasesRequest addAliasRequest = new IndicesAliasesRequest(); addAliasRequest.addAliasAction(new AliasActions(AliasActions.Type.ADD).index("my-index").aliases("logs")); - var e = expectThrows(IllegalStateException.class, () -> indicesAdmin().aliases(addAliasRequest).actionGet()); + var e = expectThrows(IllegalStateException.class, indicesAdmin().aliases(addAliasRequest)); assertThat(e.getMessage(), containsString("data stream alias and indices alias have the same name (logs)")); } } @@ -1813,8 +1813,8 @@ public void onFailure(Exception e) { var ghostReference = brokenDataStreamHolder.get().getIndices().get(0); // Many APIs fail with NPE, because of broken data stream: - expectThrows(NullPointerException.class, () -> indicesAdmin().stats(new IndicesStatsRequest()).actionGet()); - expectThrows(NullPointerException.class, () -> client().search(new SearchRequest()).actionGet()); + expectThrows(NullPointerException.class, indicesAdmin().stats(new IndicesStatsRequest())); + expectThrows(NullPointerException.class, client().search(new SearchRequest())); assertAcked( client().execute( @@ -1851,10 +1851,10 @@ private static void verifyResolvability( assertThat(multiSearchResponse.getResponses()[0].getFailure().getMessage(), equalTo(expectedErrorMessage)); }); } else if (requestBuilder instanceof ValidateQueryRequestBuilder) { - Exception e = expectThrows(IndexNotFoundException.class, requestBuilder::get); + Exception e = expectThrows(IndexNotFoundException.class, requestBuilder); assertThat(e.getMessage(), equalTo(expectedErrorMessage)); } else { - Exception e = expectThrows(IndexNotFoundException.class, requestBuilder::get); + Exception e = expectThrows(IndexNotFoundException.class, requestBuilder); assertThat(e.getMessage(), equalTo(expectedErrorMessage)); } } else { @@ -1866,11 +1866,15 @@ private static void verifyResolvability( multiSearchResponse -> assertThat(multiSearchResponse.getResponses()[0].isFailure(), is(false)) ); } else { - requestBuilder.get(); + verifyResolvability(requestBuilder.execute()); } } } + private static void verifyResolvability(ActionFuture future) { + future.actionGet(10, TimeUnit.SECONDS); + } + static void indexDocs(String dataStream, int numDocs) { BulkRequest bulkRequest = new BulkRequest(); for (int i = 0; i < numDocs; i++) { @@ -1927,8 +1931,8 @@ public void testPartitionedTemplate() throws IOException { .build(); ComposableIndexTemplate finalTemplate = template; client().execute( - PutComposableIndexTemplateAction.INSTANCE, - new PutComposableIndexTemplateAction.Request("my-it").indexTemplate(finalTemplate) + TransportPutComposableIndexTemplateAction.TYPE, + new TransportPutComposableIndexTemplateAction.Request("my-it").indexTemplate(finalTemplate) ).actionGet(); /** * partition size with routing required @@ -1950,8 +1954,8 @@ public void testPartitionedTemplate() throws IOException { .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, true)) .build(); client().execute( - PutComposableIndexTemplateAction.INSTANCE, - new PutComposableIndexTemplateAction.Request("my-it").indexTemplate(template) + TransportPutComposableIndexTemplateAction.TYPE, + new TransportPutComposableIndexTemplateAction.Request("my-it").indexTemplate(template) ).actionGet(); /** @@ -1971,10 +1975,10 @@ public void testPartitionedTemplate() throws IOException { ComposableIndexTemplate finalTemplate1 = template; Exception e = expectThrows( IllegalArgumentException.class, - () -> client().execute( - PutComposableIndexTemplateAction.INSTANCE, - new PutComposableIndexTemplateAction.Request("my-it").indexTemplate(finalTemplate1) - ).actionGet() + client().execute( + TransportPutComposableIndexTemplateAction.TYPE, + new TransportPutComposableIndexTemplateAction.Request("my-it").indexTemplate(finalTemplate1) + ) ); Exception actualException = (Exception) e.getCause(); assertTrue( @@ -2003,10 +2007,10 @@ public void testRoutingEnabledInMappingDisabledInDataStreamTemplate() throws IOE .build(); Exception e = expectThrows( IllegalArgumentException.class, - () -> client().execute( - PutComposableIndexTemplateAction.INSTANCE, - new PutComposableIndexTemplateAction.Request("my-it").indexTemplate(template) - ).actionGet() + client().execute( + TransportPutComposableIndexTemplateAction.TYPE, + new TransportPutComposableIndexTemplateAction.Request("my-it").indexTemplate(template) + ) ); Exception actualException = (Exception) e.getCause(); assertTrue(Throwables.getRootCause(actualException).getMessage().contains("contradicting `_routing.required` settings")); @@ -2037,8 +2041,8 @@ public void testSearchWithRouting() throws IOException, ExecutionException, Inte .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, true)) .build(); client().execute( - PutComposableIndexTemplateAction.INSTANCE, - new PutComposableIndexTemplateAction.Request("my-it").indexTemplate(template) + TransportPutComposableIndexTemplateAction.TYPE, + new TransportPutComposableIndexTemplateAction.Request("my-it").indexTemplate(template) ).actionGet(); CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request("my-logs"); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); @@ -2294,7 +2298,7 @@ static void putComposableIndexTemplate( @Nullable Map aliases, @Nullable DataStreamLifecycle lifecycle ) throws IOException { - PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request(id); + TransportPutComposableIndexTemplateAction.Request request = new TransportPutComposableIndexTemplateAction.Request(id); request.indexTemplate( ComposableIndexTemplate.builder() .indexPatterns(patterns) @@ -2303,7 +2307,7 @@ static void putComposableIndexTemplate( .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) .build() ); - client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); + client().execute(TransportPutComposableIndexTemplateAction.TYPE, request).actionGet(); } } diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java index c3e59be54cc7f..da782cfd86ce2 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java @@ -24,7 +24,7 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; -import org.elasticsearch.action.admin.indices.template.delete.DeleteComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.delete.TransportDeleteComposableIndexTemplateAction; import org.elasticsearch.action.datastreams.CreateDataStreamAction; import org.elasticsearch.action.datastreams.DeleteDataStreamAction; import org.elasticsearch.action.datastreams.GetDataStreamAction; @@ -59,6 +59,7 @@ import java.util.stream.Collectors; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.anEmptyMap; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; @@ -161,9 +162,11 @@ public void testSnapshotAndRestore() throws Exception { assertEquals(1, restoreSnapshotResponse.getRestoreInfo().successfulShards()); assertEquals(DOCUMENT_SOURCE, client.prepareGet(dsBackingIndexName, id).get().getSourceAsMap()); - SearchHit[] hits = client.prepareSearch("ds").get().getHits().getHits(); - assertEquals(1, hits.length); - assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); + assertResponse(client.prepareSearch("ds"), response -> { + SearchHit[] hits = response.getHits().getHits(); + assertEquals(1, hits.length); + assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); + }); GetDataStreamAction.Response ds = client.execute( GetDataStreamAction.INSTANCE, @@ -219,9 +222,11 @@ public void testSnapshotAndRestoreAllDataStreamsInPlace() throws Exception { assertEquals(1, restoreSnapshotResponse.getRestoreInfo().successfulShards()); assertEquals(DOCUMENT_SOURCE, client.prepareGet(dsBackingIndexName, id).get().getSourceAsMap()); - SearchHit[] hits = client.prepareSearch("ds").get().getHits().getHits(); - assertEquals(1, hits.length); - assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); + assertResponse(client.prepareSearch("ds"), response -> { + SearchHit[] hits = response.getHits().getHits(); + assertEquals(1, hits.length); + assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); + }); GetDataStreamAction.Request getDataSteamRequest = new GetDataStreamAction.Request(new String[] { "*" }); GetDataStreamAction.Response ds = client.execute(GetDataStreamAction.INSTANCE, getDataSteamRequest).get(); @@ -271,9 +276,12 @@ public void testSnapshotAndRestoreInPlace() { assertEquals(1, restoreSnapshotResponse.getRestoreInfo().successfulShards()); assertEquals(DOCUMENT_SOURCE, client.prepareGet(dsBackingIndexName, id).get().getSourceAsMap()); - SearchHit[] hits = client.prepareSearch("ds").get().getHits().getHits(); - assertEquals(1, hits.length); - assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); + + assertResponse(client.prepareSearch("ds"), response -> { + SearchHit[] hits = response.getHits().getHits(); + assertEquals(1, hits.length); + assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); + }); GetDataStreamAction.Request getDataSteamRequest = new GetDataStreamAction.Request(new String[] { "ds" }); GetDataStreamAction.Response ds = client.execute(GetDataStreamAction.INSTANCE, getDataSteamRequest).actionGet(); @@ -347,9 +355,11 @@ public void testSnapshotAndRestoreAllIncludeSpecificDataStream() throws Exceptio assertEquals(1, restoreSnapshotResponse.getRestoreInfo().successfulShards()); assertEquals(DOCUMENT_SOURCE, client.prepareGet(backingIndexName, idToGet).get().getSourceAsMap()); - SearchHit[] hits = client.prepareSearch(backingIndexName).get().getHits().getHits(); - assertEquals(1, hits.length); - assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); + assertResponse(client.prepareSearch(backingIndexName), response -> { + SearchHit[] hits = response.getHits().getHits(); + assertEquals(1, hits.length); + assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); + }); GetDataStreamAction.Response ds = client.execute( GetDataStreamAction.INSTANCE, @@ -396,9 +406,11 @@ public void testSnapshotAndRestoreReplaceAll() throws Exception { assertEquals(2, restoreSnapshotResponse.getRestoreInfo().successfulShards()); assertEquals(DOCUMENT_SOURCE, client.prepareGet(dsBackingIndexName, id).get().getSourceAsMap()); - SearchHit[] hits = client.prepareSearch("ds").get().getHits().getHits(); - assertEquals(1, hits.length); - assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); + assertResponse(client.prepareSearch("ds"), response -> { + SearchHit[] hits = response.getHits().getHits(); + assertEquals(1, hits.length); + assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); + }); GetDataStreamAction.Response ds = client.execute( GetDataStreamAction.INSTANCE, @@ -449,9 +461,11 @@ public void testSnapshotAndRestoreAll() throws Exception { assertEquals(2, restoreSnapshotResponse.getRestoreInfo().successfulShards()); assertEquals(DOCUMENT_SOURCE, client.prepareGet(dsBackingIndexName, id).get().getSourceAsMap()); - SearchHit[] hits = client.prepareSearch("ds").get().getHits().getHits(); - assertEquals(1, hits.length); - assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); + assertResponse(client.prepareSearch("ds"), response -> { + SearchHit[] hits = response.getHits().getHits(); + assertEquals(1, hits.length); + assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); + }); GetDataStreamAction.Response ds = client.execute( GetDataStreamAction.INSTANCE, @@ -505,9 +519,11 @@ public void testSnapshotAndRestoreIncludeAliasesFalse() throws Exception { assertEquals(2, restoreSnapshotResponse.getRestoreInfo().successfulShards()); assertEquals(DOCUMENT_SOURCE, client.prepareGet(dsBackingIndexName, id).get().getSourceAsMap()); - SearchHit[] hits = client.prepareSearch("ds").get().getHits().getHits(); - assertEquals(1, hits.length); - assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); + assertResponse(client.prepareSearch("ds"), response -> { + SearchHit[] hits = response.getHits().getHits(); + assertEquals(1, hits.length); + assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); + }); GetDataStreamAction.Response ds = client.execute( GetDataStreamAction.INSTANCE, @@ -538,7 +554,7 @@ public void testRename() throws Exception { expectThrows( SnapshotRestoreException.class, - () -> client.admin().cluster().prepareRestoreSnapshot(REPO, SNAPSHOT).setWaitForCompletion(true).setIndices("ds").get() + client.admin().cluster().prepareRestoreSnapshot(REPO, SNAPSHOT).setWaitForCompletion(true).setIndices("ds") ); client.admin() @@ -557,7 +573,10 @@ public void testRename() throws Exception { assertEquals(1, ds.getDataStreams().size()); assertEquals(1, ds.getDataStreams().get(0).getDataStream().getIndices().size()); assertEquals(ds2BackingIndexName, ds.getDataStreams().get(0).getDataStream().getIndices().get(0).getName()); - assertEquals(DOCUMENT_SOURCE, client.prepareSearch("ds2").get().getHits().getHits()[0].getSourceAsMap()); + assertResponse( + client.prepareSearch("ds2"), + response -> assertEquals(DOCUMENT_SOURCE, response.getHits().getHits()[0].getSourceAsMap()) + ); assertEquals(DOCUMENT_SOURCE, client.prepareGet(ds2BackingIndexName, id).get().getSourceAsMap()); GetAliasesResponse getAliasesResponse = client.admin().indices().getAliases(new GetAliasesRequest("my-alias")).actionGet(); @@ -652,7 +671,7 @@ public void testBackingIndexIsNotRenamedWhenRestoringDataStream() { expectThrows( SnapshotRestoreException.class, - () -> client.admin().cluster().prepareRestoreSnapshot(REPO, SNAPSHOT).setWaitForCompletion(true).setIndices("ds").get() + client.admin().cluster().prepareRestoreSnapshot(REPO, SNAPSHOT).setWaitForCompletion(true).setIndices("ds") ); // delete data stream @@ -689,7 +708,7 @@ public void testDataStreamAndBackingIndicesAreRenamedUsingRegex() { expectThrows( SnapshotRestoreException.class, - () -> client.admin().cluster().prepareRestoreSnapshot(REPO, SNAPSHOT).setWaitForCompletion(true).setIndices("ds").get() + client.admin().cluster().prepareRestoreSnapshot(REPO, SNAPSHOT).setWaitForCompletion(true).setIndices("ds") ); // restore data stream attempting to rename the backing index @@ -768,7 +787,7 @@ public void testDataStreamNotStoredWhenIndexRequested() { assertEquals(RestStatus.OK, status); expectThrows( Exception.class, - () -> client.admin().cluster().prepareRestoreSnapshot(REPO, "snap2").setWaitForCompletion(true).setIndices("ds").get() + client.admin().cluster().prepareRestoreSnapshot(REPO, "snap2").setWaitForCompletion(true).setIndices("ds") ); } @@ -796,7 +815,7 @@ public void testDataStreamNotRestoredWhenIndexRequested() throws Exception { assertEquals(RestStatus.OK, restoreSnapshotResponse.status()); GetDataStreamAction.Request getRequest = new GetDataStreamAction.Request(new String[] { "ds" }); - expectThrows(ResourceNotFoundException.class, () -> client.execute(GetDataStreamAction.INSTANCE, getRequest).actionGet()); + expectThrows(ResourceNotFoundException.class, client.execute(GetDataStreamAction.INSTANCE, getRequest)); } public void testDataStreamNotIncludedInLimitedSnapshot() throws ExecutionException, InterruptedException { @@ -938,7 +957,7 @@ public void testSnapshotDSDuringRollover() throws Exception { if (partial) { assertTrue(rolloverResponse.get().isRolledOver()); } else { - SnapshotInProgressException e = expectThrows(SnapshotInProgressException.class, rolloverResponse::actionGet); + SnapshotInProgressException e = expectThrows(SnapshotInProgressException.class, rolloverResponse); assertThat(e.getMessage(), containsString("Cannot roll over data stream that is being snapshotted:")); } unblockAllDataNodes(repoName); @@ -1046,15 +1065,17 @@ public void testRestoreDataStreamAliasWithConflictingDataStream() throws Excepti assertAcked(client.execute(CreateDataStreamAction.INSTANCE, request).actionGet()); var e = expectThrows( IllegalStateException.class, - () -> client.admin().cluster().prepareRestoreSnapshot(REPO, snapshotName).setWaitForCompletion(true).get() + client.admin().cluster().prepareRestoreSnapshot(REPO, snapshotName).setWaitForCompletion(true) ); assertThat(e.getMessage(), containsString("data stream alias and data stream have the same name (my-alias)")); } finally { // Need to remove data streams in order to remove template client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request("*")).actionGet(); // Need to remove template, because base class doesn't remove composable index templates after each test (only legacy templates) - client.execute(DeleteComposableIndexTemplateAction.INSTANCE, new DeleteComposableIndexTemplateAction.Request("my-template")) - .actionGet(); + client.execute( + TransportDeleteComposableIndexTemplateAction.TYPE, + new TransportDeleteComposableIndexTemplateAction.Request("my-template") + ).actionGet(); } } @@ -1067,7 +1088,7 @@ public void testRestoreDataStreamAliasWithConflictingIndicesAlias() throws Excep var e = expectThrows( IllegalStateException.class, - () -> client.admin().cluster().prepareRestoreSnapshot(REPO, snapshotName).setWaitForCompletion(true).get() + client.admin().cluster().prepareRestoreSnapshot(REPO, snapshotName).setWaitForCompletion(true) ); assertThat(e.getMessage(), containsString("data stream alias and indices alias have the same name (my-alias)")); } diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataTierDataStreamIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataTierDataStreamIT.java index 672d2d21d73a5..13b186d9da509 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataTierDataStreamIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataTierDataStreamIT.java @@ -7,7 +7,7 @@ */ package org.elasticsearch.datastreams; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.routing.allocation.DataTier; import org.elasticsearch.common.settings.Settings; @@ -39,8 +39,8 @@ public void testDefaultDataStreamAllocateToHot() { .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) .build(); client().execute( - PutComposableIndexTemplateAction.INSTANCE, - new PutComposableIndexTemplateAction.Request("template").indexTemplate(template) + TransportPutComposableIndexTemplateAction.TYPE, + new TransportPutComposableIndexTemplateAction.Request("template").indexTemplate(template) ).actionGet(); var dsIndexName = prepareIndex(index).setCreate(true) diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java index ab42d831c6545..08b09bbc78348 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java @@ -17,7 +17,7 @@ import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.template.put.PutComponentTemplateAction; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.index.IndexRequest; @@ -49,6 +49,7 @@ import static org.elasticsearch.test.MapMatcher.assertMap; import static org.elasticsearch.test.MapMatcher.matchesMap; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -113,7 +114,7 @@ public void testTimeRanges() throws Exception { var mapping = new CompressedXContent(randomBoolean() ? MAPPING_TEMPLATE : MAPPING_TEMPLATE.replace("date", "date_nanos")); if (randomBoolean()) { - var request = new PutComposableIndexTemplateAction.Request("id"); + var request = new TransportPutComposableIndexTemplateAction.Request("id"); request.indexTemplate( ComposableIndexTemplate.builder() .indexPatterns(List.of("k8s*")) @@ -121,13 +122,13 @@ public void testTimeRanges() throws Exception { .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) .build() ); - client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); + client().execute(TransportPutComposableIndexTemplateAction.TYPE, request).actionGet(); } else { var putComponentTemplateRequest = new PutComponentTemplateAction.Request("1"); putComponentTemplateRequest.componentTemplate(new ComponentTemplate(new Template(null, mapping, null), null, null)); client().execute(PutComponentTemplateAction.INSTANCE, putComponentTemplateRequest).actionGet(); - var putTemplateRequest = new PutComposableIndexTemplateAction.Request("id"); + var putTemplateRequest = new TransportPutComposableIndexTemplateAction.Request("id"); putTemplateRequest.indexTemplate( ComposableIndexTemplate.builder() .indexPatterns(List.of("k8s*")) @@ -136,7 +137,7 @@ public void testTimeRanges() throws Exception { .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) .build() ); - client().execute(PutComposableIndexTemplateAction.INSTANCE, putTemplateRequest).actionGet(); + client().execute(TransportPutComposableIndexTemplateAction.TYPE, putTemplateRequest).actionGet(); } // index doc @@ -238,7 +239,7 @@ public void testInvalidTsdbTemplatesNoTimeSeriesDimensionAttribute() throws Exce } }"""; { - var request = new PutComposableIndexTemplateAction.Request("id"); + var request = new TransportPutComposableIndexTemplateAction.Request("id"); request.indexTemplate( ComposableIndexTemplate.builder() .indexPatterns(List.of("k8s*")) @@ -254,7 +255,7 @@ public void testInvalidTsdbTemplatesNoTimeSeriesDimensionAttribute() throws Exce ); var e = expectThrows( IllegalArgumentException.class, - () -> client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet() + () -> client().execute(TransportPutComposableIndexTemplateAction.TYPE, request).actionGet() ); assertThat( e.getCause().getCause().getMessage(), @@ -266,7 +267,7 @@ public void testInvalidTsdbTemplatesNoTimeSeriesDimensionAttribute() throws Exce ); } { - var request = new PutComposableIndexTemplateAction.Request("id"); + var request = new TransportPutComposableIndexTemplateAction.Request("id"); request.indexTemplate( ComposableIndexTemplate.builder() .indexPatterns(List.of("k8s*")) @@ -282,7 +283,7 @@ public void testInvalidTsdbTemplatesNoTimeSeriesDimensionAttribute() throws Exce ); var e = expectThrows( InvalidIndexTemplateException.class, - () -> client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet() + () -> client().execute(TransportPutComposableIndexTemplateAction.TYPE, request).actionGet() ); assertThat(e.getMessage(), containsString("[index.mode=time_series] requires a non-empty [index.routing_path]")); } @@ -300,7 +301,7 @@ public void testInvalidTsdbTemplatesNoKeywordFieldType() throws Exception { } } }"""; - var request = new PutComposableIndexTemplateAction.Request("id"); + var request = new TransportPutComposableIndexTemplateAction.Request("id"); request.indexTemplate( ComposableIndexTemplate.builder() .indexPatterns(List.of("k8s*")) @@ -316,7 +317,7 @@ public void testInvalidTsdbTemplatesNoKeywordFieldType() throws Exception { ); Exception e = expectThrows( IllegalArgumentException.class, - () -> client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet() + () -> client().execute(TransportPutComposableIndexTemplateAction.TYPE, request).actionGet() ); assertThat( e.getCause().getCause().getMessage(), @@ -340,7 +341,7 @@ public void testInvalidTsdbTemplatesMissingSettings() throws Exception { } } }"""; - var request = new PutComposableIndexTemplateAction.Request("id"); + var request = new TransportPutComposableIndexTemplateAction.Request("id"); request.indexTemplate( ComposableIndexTemplate.builder() .indexPatterns(List.of("k8s*")) @@ -356,7 +357,7 @@ public void testInvalidTsdbTemplatesMissingSettings() throws Exception { ); var e = expectThrows( IllegalArgumentException.class, - () -> client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet() + () -> client().execute(TransportPutComposableIndexTemplateAction.TYPE, request).actionGet() ); assertThat(e.getCause().getMessage(), equalTo("[index.routing_path] requires [index.mode=time_series]")); } @@ -366,7 +367,7 @@ public void testSkippingShards() throws Exception { var mapping = new CompressedXContent(randomBoolean() ? MAPPING_TEMPLATE : MAPPING_TEMPLATE.replace("date", "date_nanos")); { var templateSettings = Settings.builder().put("index.mode", "time_series").put("index.routing_path", "metricset").build(); - var request = new PutComposableIndexTemplateAction.Request("id1"); + var request = new TransportPutComposableIndexTemplateAction.Request("id1"); request.indexTemplate( ComposableIndexTemplate.builder() .indexPatterns(List.of("pattern-1")) @@ -374,13 +375,13 @@ public void testSkippingShards() throws Exception { .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) .build() ); - client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); + client().execute(TransportPutComposableIndexTemplateAction.TYPE, request).actionGet(); var indexRequest = new IndexRequest("pattern-1").opType(DocWriteRequest.OpType.CREATE).setRefreshPolicy("true"); indexRequest.source(DOC.replace("$time", formatInstant(time)), XContentType.JSON); client().index(indexRequest).actionGet(); } { - var request = new PutComposableIndexTemplateAction.Request("id2"); + var request = new TransportPutComposableIndexTemplateAction.Request("id2"); request.indexTemplate( ComposableIndexTemplate.builder() .indexPatterns(List.of("pattern-2")) @@ -388,7 +389,7 @@ public void testSkippingShards() throws Exception { .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) .build() ); - client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); + client().execute(TransportPutComposableIndexTemplateAction.TYPE, request).actionGet(); var indexRequest = new IndexRequest("pattern-2").opType(DocWriteRequest.OpType.CREATE).setRefreshPolicy("true"); indexRequest.source(DOC.replace("$time", formatInstant(time)), XContentType.JSON); client().index(indexRequest).actionGet(); @@ -400,11 +401,12 @@ public void testSkippingShards() throws Exception { var searchRequest = new SearchRequest("pattern-*"); searchRequest.setPreFilterShardSize(1); searchRequest.source(matchingRange); - var searchResponse = client().search(searchRequest).actionGet(); - ElasticsearchAssertions.assertHitCount(searchResponse, 2); - assertThat(searchResponse.getTotalShards(), equalTo(2)); - assertThat(searchResponse.getSkippedShards(), equalTo(0)); - assertThat(searchResponse.getSuccessfulShards(), equalTo(2)); + assertResponse(client().search(searchRequest), searchResponse -> { + ElasticsearchAssertions.assertHitCount(searchResponse, 2); + assertThat(searchResponse.getTotalShards(), equalTo(2)); + assertThat(searchResponse.getSkippedShards(), equalTo(0)); + assertThat(searchResponse.getSuccessfulShards(), equalTo(2)); + }); } { var nonMatchingRange = new SearchSourceBuilder().query( @@ -414,17 +416,18 @@ public void testSkippingShards() throws Exception { var searchRequest = new SearchRequest("pattern-*"); searchRequest.setPreFilterShardSize(1); searchRequest.source(nonMatchingRange); - var searchResponse = client().search(searchRequest).actionGet(); - ElasticsearchAssertions.assertNoSearchHits(searchResponse); - assertThat(searchResponse.getTotalShards(), equalTo(2)); - assertThat(searchResponse.getSkippedShards(), equalTo(1)); - assertThat(searchResponse.getSuccessfulShards(), equalTo(2)); + assertResponse(client().search(searchRequest), searchResponse -> { + ElasticsearchAssertions.assertNoSearchHits(searchResponse); + assertThat(searchResponse.getTotalShards(), equalTo(2)); + assertThat(searchResponse.getSkippedShards(), equalTo(1)); + assertThat(searchResponse.getSuccessfulShards(), equalTo(2)); + }); } } public void testTrimId() throws Exception { String dataStreamName = "k8s"; - var putTemplateRequest = new PutComposableIndexTemplateAction.Request("id"); + var putTemplateRequest = new TransportPutComposableIndexTemplateAction.Request("id"); putTemplateRequest.indexTemplate( ComposableIndexTemplate.builder() .indexPatterns(List.of(dataStreamName + "*")) @@ -444,7 +447,7 @@ public void testTrimId() throws Exception { .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) .build() ); - client().execute(PutComposableIndexTemplateAction.INSTANCE, putTemplateRequest).actionGet(); + client().execute(TransportPutComposableIndexTemplateAction.TYPE, putTemplateRequest).actionGet(); // index some data int numBulkRequests = 32; @@ -536,17 +539,19 @@ public void testTrimId() throws Exception { ); // Check the search api can synthesize _id + final String idxName = indexName; var searchRequest = new SearchRequest(dataStreamName); searchRequest.source().trackTotalHits(true); - var searchResponse = client().search(searchRequest).actionGet(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo((long) numBulkRequests * numDocsPerBulk)); - String id = searchResponse.getHits().getHits()[0].getId(); - assertThat(id, notNullValue()); - - // Check that the _id is gettable: - var getResponse = client().get(new GetRequest(indexName).id(id)).actionGet(); - assertThat(getResponse.isExists(), is(true)); - assertThat(getResponse.getId(), equalTo(id)); + assertResponse(client().search(searchRequest), searchResponse -> { + assertThat(searchResponse.getHits().getTotalHits().value, equalTo((long) numBulkRequests * numDocsPerBulk)); + String id = searchResponse.getHits().getHits()[0].getId(); + assertThat(id, notNullValue()); + + // Check that the _id is gettable: + var getResponse = client().get(new GetRequest(idxName).id(id)).actionGet(); + assertThat(getResponse.isExists(), is(true)); + assertThat(getResponse.getId(), equalTo(id)); + }); } static String formatInstant(Instant instant) { diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java index 03bd753e29068..f34bb96b3eb81 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java @@ -20,7 +20,7 @@ import org.elasticsearch.action.admin.indices.settings.get.GetSettingsAction; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; @@ -197,14 +197,14 @@ public void testOriginationDate() throws Exception { } } }"""; - PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request("id2"); + TransportPutComposableIndexTemplateAction.Request request = new TransportPutComposableIndexTemplateAction.Request("id2"); request.indexTemplate( ComposableIndexTemplate.builder() .indexPatterns(List.of("index_*")) .template(new Template(null, CompressedXContent.fromJSON(mapping), null, null)) .build() ); - client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); + client().execute(TransportPutComposableIndexTemplateAction.TYPE, request).actionGet(); String indexWithOldOriginationDate = "index_old"; long originTimeMillis = System.currentTimeMillis() - TimeValue.timeValueDays(365).millis(); @@ -861,7 +861,7 @@ static void putComposableIndexTemplate( @Nullable Map metadata, @Nullable DataStreamLifecycle lifecycle ) throws IOException { - PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request(id); + TransportPutComposableIndexTemplateAction.Request request = new TransportPutComposableIndexTemplateAction.Request(id); request.indexTemplate( ComposableIndexTemplate.builder() .indexPatterns(patterns) @@ -870,7 +870,7 @@ static void putComposableIndexTemplate( .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) .build() ); - client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); + client().execute(TransportPutComposableIndexTemplateAction.TYPE, request).actionGet(); } static void updateLifecycle(String dataStreamName, TimeValue dataRetention) { diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/ExplainDataStreamLifecycleIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/ExplainDataStreamLifecycleIT.java index 57febae28bb4d..471622489d9b2 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/ExplainDataStreamLifecycleIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/ExplainDataStreamLifecycleIT.java @@ -12,7 +12,7 @@ import org.elasticsearch.action.admin.indices.rollover.Condition; import org.elasticsearch.action.admin.indices.rollover.RolloverConditions; import org.elasticsearch.action.admin.indices.rollover.RolloverConfiguration; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; @@ -349,7 +349,7 @@ static void putComposableIndexTemplate( @Nullable Map metadata, @Nullable DataStreamLifecycle lifecycle ) throws IOException { - PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request(id); + TransportPutComposableIndexTemplateAction.Request request = new TransportPutComposableIndexTemplateAction.Request(id); request.indexTemplate( ComposableIndexTemplate.builder() .indexPatterns(patterns) @@ -358,7 +358,7 @@ static void putComposableIndexTemplate( .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) .build() ); - client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); + client().execute(TransportPutComposableIndexTemplateAction.TYPE, request).actionGet(); } } diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LazyRolloverDataStreamIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LazyRolloverDataStreamIT.java new file mode 100644 index 0000000000000..d89dbc346c7e0 --- /dev/null +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LazyRolloverDataStreamIT.java @@ -0,0 +1,183 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.datastreams; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; + +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.endsWith; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.startsWith; + +public class LazyRolloverDataStreamIT extends DisabledSecurityDataStreamTestCase { + + @SuppressWarnings("unchecked") + public void testLazyRollover() throws Exception { + Request putComposableIndexTemplateRequest = new Request("POST", "/_index_template/lazy-ds-template"); + putComposableIndexTemplateRequest.setJsonEntity(""" + { + "index_patterns": ["lazy-ds*"], + "data_stream": {} + } + """); + assertOK(client().performRequest(putComposableIndexTemplateRequest)); + + String dataStreamName = "lazy-ds"; + + Request createDocRequest = new Request("POST", "/" + dataStreamName + "/_doc?refresh=true"); + createDocRequest.setJsonEntity("{ \"@timestamp\": \"2020-10-22\", \"a\": 1 }"); + assertOK(client().performRequest(createDocRequest)); + + final Response rolloverResponse = client().performRequest(new Request("POST", "/" + dataStreamName + "/_rollover?lazy")); + Map rolloverResponseMap = entityAsMap(rolloverResponse); + assertThat((String) rolloverResponseMap.get("old_index"), startsWith(".ds-lazy-ds-")); + assertThat((String) rolloverResponseMap.get("old_index"), endsWith("-000001")); + assertThat((String) rolloverResponseMap.get("new_index"), startsWith(".ds-lazy-ds-")); + assertThat((String) rolloverResponseMap.get("new_index"), endsWith("-000002")); + assertThat(rolloverResponseMap.get("lazy"), equalTo(true)); + assertThat(rolloverResponseMap.get("dry_run"), equalTo(false)); + assertThat(rolloverResponseMap.get("acknowledged"), equalTo(true)); + assertThat(rolloverResponseMap.get("rolled_over"), equalTo(false)); + assertThat(rolloverResponseMap.get("conditions"), equalTo(Map.of())); + + { + final Response dataStreamResponse = client().performRequest(new Request("GET", "/_data_stream/" + dataStreamName)); + List dataStreams = (List) entityAsMap(dataStreamResponse).get("data_streams"); + assertThat(dataStreams.size(), is(1)); + Map dataStream = (Map) dataStreams.get(0); + assertThat(dataStream.get("name"), equalTo(dataStreamName)); + assertThat(dataStream.get("rollover_on_write"), is(true)); + assertThat(((List) dataStream.get("indices")).size(), is(1)); + } + + createDocRequest = new Request("POST", "/" + dataStreamName + "/_doc?refresh=true"); + createDocRequest.setJsonEntity("{ \"@timestamp\": \"2020-10-23\", \"a\": 2 }"); + assertOK(client().performRequest(createDocRequest)); + + { + final Response dataStreamResponse = client().performRequest(new Request("GET", "/_data_stream/" + dataStreamName)); + List dataStreams = (List) entityAsMap(dataStreamResponse).get("data_streams"); + assertThat(dataStreams.size(), is(1)); + Map dataStream = (Map) dataStreams.get(0); + assertThat(dataStream.get("name"), equalTo(dataStreamName)); + assertThat(dataStream.get("rollover_on_write"), is(false)); + assertThat(((List) dataStream.get("indices")).size(), is(2)); + } + } + + @SuppressWarnings("unchecked") + public void testLazyRolloverFailsIndexing() throws Exception { + Request putComposableIndexTemplateRequest = new Request("POST", "/_index_template/lazy-ds-template"); + putComposableIndexTemplateRequest.setJsonEntity(""" + { + "index_patterns": ["lazy-ds*"], + "data_stream": {} + } + """); + assertOK(client().performRequest(putComposableIndexTemplateRequest)); + + String dataStreamName = "lazy-ds"; + + Request createDocRequest = new Request("POST", "/" + dataStreamName + "/_doc?refresh=true"); + createDocRequest.setJsonEntity("{ \"@timestamp\": \"2020-10-22\", \"a\": 1 }"); + assertOK(client().performRequest(createDocRequest)); + + Request updateClusterSettingsRequest = new Request("PUT", "_cluster/settings"); + updateClusterSettingsRequest.setJsonEntity(""" + { + "persistent": { + "cluster.max_shards_per_node": 1 + } + }"""); + assertAcknowledged(client().performRequest(updateClusterSettingsRequest)); + + final Response rolloverResponse = client().performRequest(new Request("POST", "/" + dataStreamName + "/_rollover?lazy")); + Map rolloverResponseMap = entityAsMap(rolloverResponse); + assertThat((String) rolloverResponseMap.get("old_index"), startsWith(".ds-lazy-ds-")); + assertThat((String) rolloverResponseMap.get("old_index"), endsWith("-000001")); + assertThat((String) rolloverResponseMap.get("new_index"), startsWith(".ds-lazy-ds-")); + assertThat((String) rolloverResponseMap.get("new_index"), endsWith("-000002")); + assertThat(rolloverResponseMap.get("lazy"), equalTo(true)); + assertThat(rolloverResponseMap.get("dry_run"), equalTo(false)); + assertThat(rolloverResponseMap.get("acknowledged"), equalTo(true)); + assertThat(rolloverResponseMap.get("rolled_over"), equalTo(false)); + assertThat(rolloverResponseMap.get("conditions"), equalTo(Map.of())); + + { + final Response dataStreamResponse = client().performRequest(new Request("GET", "/_data_stream/" + dataStreamName)); + List dataStreams = (List) entityAsMap(dataStreamResponse).get("data_streams"); + assertThat(dataStreams.size(), is(1)); + Map dataStream = (Map) dataStreams.get(0); + assertThat(dataStream.get("name"), equalTo(dataStreamName)); + assertThat(dataStream.get("rollover_on_write"), is(true)); + assertThat(((List) dataStream.get("indices")).size(), is(1)); + } + + try { + createDocRequest = new Request("POST", "/" + dataStreamName + "/_doc?refresh=true"); + createDocRequest.setJsonEntity("{ \"@timestamp\": \"2020-10-23\", \"a\": 2 }"); + client().performRequest(createDocRequest); + fail("Indexing should have failed."); + } catch (ResponseException responseException) { + assertThat(responseException.getMessage(), containsString("this action would add [2] shards")); + } + + updateClusterSettingsRequest = new Request("PUT", "_cluster/settings"); + updateClusterSettingsRequest.setJsonEntity(""" + { + "persistent": { + "cluster.max_shards_per_node": null + } + }"""); + assertAcknowledged(client().performRequest(updateClusterSettingsRequest)); + createDocRequest = new Request("POST", "/" + dataStreamName + "/_doc?refresh=true"); + createDocRequest.setJsonEntity("{ \"@timestamp\": \"2020-10-23\", \"a\": 2 }"); + assertOK(client().performRequest(createDocRequest)); + { + final Response dataStreamResponse = client().performRequest(new Request("GET", "/_data_stream/" + dataStreamName)); + List dataStreams = (List) entityAsMap(dataStreamResponse).get("data_streams"); + assertThat(dataStreams.size(), is(1)); + Map dataStream = (Map) dataStreams.get(0); + assertThat(dataStream.get("name"), equalTo(dataStreamName)); + assertThat(dataStream.get("rollover_on_write"), is(false)); + assertThat(((List) dataStream.get("indices")).size(), is(2)); + } + } + + @SuppressWarnings("unchecked") + public void testLazyRolloverWithConditions() throws Exception { + Request putComposableIndexTemplateRequest = new Request("POST", "/_index_template/lazy-ds-template"); + putComposableIndexTemplateRequest.setJsonEntity(""" + { + "index_patterns": ["lazy-ds*"], + "data_stream": {} + } + """); + assertOK(client().performRequest(putComposableIndexTemplateRequest)); + + String dataStreamName = "lazy-ds"; + + Request createDocRequest = new Request("POST", "/" + dataStreamName + "/_doc?refresh=true"); + createDocRequest.setJsonEntity("{ \"@timestamp\": \"2020-10-22\", \"a\": 1 }"); + + assertOK(client().performRequest(createDocRequest)); + + Request rolloverRequest = new Request("POST", "/" + dataStreamName + "/_rollover?lazy"); + rolloverRequest.setJsonEntity("{\"conditions\": {\"max_docs\": 1}}"); + ResponseException responseError = expectThrows(ResponseException.class, () -> client().performRequest(rolloverRequest)); + assertThat(responseError.getResponse().getStatusLine().getStatusCode(), is(400)); + assertThat(responseError.getMessage(), containsString("only without any conditions")); + } +} diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProvider.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProvider.java index 7ec2d32851ea5..519499addd77e 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProvider.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProvider.java @@ -83,7 +83,7 @@ public Settings getAdditionalIndexSettings( if (indexMode != null) { if (indexMode == IndexMode.TIME_SERIES) { Settings.Builder builder = Settings.builder(); - TimeValue lookAheadTime = DataStreamsPlugin.LOOK_AHEAD_TIME.get(allSettings); + TimeValue lookAheadTime = DataStreamsPlugin.getLookAheadTime(allSettings); TimeValue lookBackTime = DataStreamsPlugin.LOOK_BACK_TIME.get(allSettings); final Instant start; final Instant end; diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java index fb93b7d688a74..63920ed73bf4a 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java @@ -90,15 +90,28 @@ public class DataStreamsPlugin extends Plugin implements ActionPlugin, HealthPlu Setting.Property.Dynamic ); + private static final TimeValue MAX_LOOK_AHEAD_TIME = TimeValue.timeValueHours(2); public static final Setting LOOK_AHEAD_TIME = Setting.timeSetting( "index.look_ahead_time", - TimeValue.timeValueHours(2), + TimeValue.timeValueMinutes(30), TimeValue.timeValueMinutes(1), - TimeValue.timeValueDays(7), + TimeValue.timeValueDays(7), // is effectively 2h now. Setting.Property.IndexScope, Setting.Property.Dynamic, Setting.Property.ServerlessPublic ); + + /** + * Returns the look ahead time and lowers it when it to 2 hours if it is configured to more than 2 hours. + */ + public static TimeValue getLookAheadTime(Settings settings) { + TimeValue lookAheadTime = DataStreamsPlugin.LOOK_AHEAD_TIME.get(settings); + if (lookAheadTime.compareTo(DataStreamsPlugin.MAX_LOOK_AHEAD_TIME) > 0) { + lookAheadTime = DataStreamsPlugin.MAX_LOOK_AHEAD_TIME; + } + return lookAheadTime; + } + public static final String LIFECYCLE_CUSTOM_INDEX_METADATA_KEY = "data_stream_lifecycle"; public static final Setting LOOK_BACK_TIME = Setting.timeSetting( "index.look_back_time", diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeService.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeService.java index f973eb95b39ce..3bbc37cd87ad5 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeService.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeService.java @@ -108,7 +108,7 @@ ClusterState updateTimeSeriesTemporalRange(ClusterState current, Instant now) { Index head = dataStream.getWriteIndex(); IndexMetadata im = current.metadata().getIndexSafe(head); Instant currentEnd = IndexSettings.TIME_SERIES_END_TIME.get(im.getSettings()); - TimeValue lookAheadTime = DataStreamsPlugin.LOOK_AHEAD_TIME.get(im.getSettings()); + TimeValue lookAheadTime = DataStreamsPlugin.getLookAheadTime(im.getSettings()); Instant newEnd = DataStream.getCanonicalTimestampBound( now.plus(lookAheadTime.getMillis(), ChronoUnit.MILLIS).plus(pollInterval.getMillis(), ChronoUnit.MILLIS) ); diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java index 21b1316e5685b..eff40cb1dbe62 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java @@ -27,7 +27,7 @@ import org.elasticsearch.action.admin.indices.rollover.RolloverConfiguration; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; -import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction; +import org.elasticsearch.action.admin.indices.settings.put.TransportUpdateSettingsAction; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.datastreams.lifecycle.ErrorEntry; import org.elasticsearch.action.downsample.DownsampleAction; @@ -895,7 +895,7 @@ private Set maybeExecuteForceMerge(ClusterState state, List indice transportActionsDeduplicator.executeOnce( updateMergePolicySettingsRequest, new ErrorRecordingActionListener( - UpdateSettingsAction.NAME, + TransportUpdateSettingsAction.TYPE.name(), indexName, errorStore, Strings.format( diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamLifecycleAction.java index 67bfae0740fb5..2b23b76670af2 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamLifecycleAction.java @@ -26,9 +26,7 @@ */ public class DeleteDataStreamLifecycleAction { - public static final ActionType INSTANCE = ActionType.acknowledgedResponse( - "indices:admin/data_stream/lifecycle/delete" - ); + public static final ActionType INSTANCE = ActionType.localOnly("indices:admin/data_stream/lifecycle/delete"); private DeleteDataStreamLifecycleAction() {/* no instances */} diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/PutDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/PutDataStreamLifecycleAction.java index f01d06fda8101..1a1af76315cc5 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/PutDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/PutDataStreamLifecycleAction.java @@ -40,9 +40,7 @@ */ public class PutDataStreamLifecycleAction { - public static final ActionType INSTANCE = ActionType.acknowledgedResponse( - "indices:admin/data_stream/lifecycle/put" - ); + public static final ActionType INSTANCE = ActionType.localOnly("indices:admin/data_stream/lifecycle/put"); private PutDataStreamLifecycleAction() {/* no instances */} diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestDataStreamLifecycleStatsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestDataStreamLifecycleStatsAction.java index 2daff2a05940c..ece16042706a7 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestDataStreamLifecycleStatsAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestDataStreamLifecycleStatsAction.java @@ -14,7 +14,7 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.util.List; @@ -40,6 +40,10 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient if (masterNodeTimeout != null) { request.masterNodeTimeout(masterNodeTimeout); } - return channel -> client.execute(GetDataStreamLifecycleStatsAction.INSTANCE, request, new RestChunkedToXContentListener<>(channel)); + return channel -> client.execute( + GetDataStreamLifecycleStatsAction.INSTANCE, + request, + new RestRefCountedChunkedToXContentListener<>(channel) + ); } } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestExplainDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestExplainDataStreamLifecycleAction.java index b3c39bc17f134..09f4b6efce633 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestExplainDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestExplainDataStreamLifecycleAction.java @@ -16,7 +16,7 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.util.List; @@ -48,7 +48,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient return channel -> client.execute( ExplainDataStreamLifecycleAction.INSTANCE, explainRequest, - new RestChunkedToXContentListener<>(channel) + new RestRefCountedChunkedToXContentListener<>(channel) ); } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestGetDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestGetDataStreamLifecycleAction.java index f65266f1b5e34..f2c514c794b32 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestGetDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestGetDataStreamLifecycleAction.java @@ -15,7 +15,7 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.util.List; @@ -44,7 +44,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli return channel -> client.execute( GetDataStreamLifecycleAction.INSTANCE, getDataLifecycleRequest, - new RestChunkedToXContentListener<>(channel) + new RestRefCountedChunkedToXContentListener<>(channel) ); } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamGetWriteIndexTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamGetWriteIndexTests.java index 07a80683b24fa..d0b41c847a61d 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamGetWriteIndexTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamGetWriteIndexTests.java @@ -87,24 +87,24 @@ public void testPickingBackingIndicesPredefinedDates() throws Exception { assertThat(backingIndex, notNullValue()); // Ensure truncate to seconds: assertThat(backingIndex.getSettings().get("index.time_series.start_time"), equalTo("2022-03-15T06:29:36.000Z")); - assertThat(backingIndex.getSettings().get("index.time_series.end_time"), equalTo("2022-03-15T10:29:36.000Z")); + assertThat(backingIndex.getSettings().get("index.time_series.end_time"), equalTo("2022-03-15T08:59:36.000Z")); // advance time and rollover: - time = time.plusSeconds(80 * 60); + time = time.plusSeconds(20 * 60); var result = rolloverOver(state, "logs-myapp", time); state = result.clusterState(); DataStream dataStream = state.getMetadata().dataStreams().get("logs-myapp"); backingIndex = state.getMetadata().index(dataStream.getIndices().get(1)); assertThat(backingIndex, notNullValue()); - assertThat(backingIndex.getSettings().get("index.time_series.start_time"), equalTo("2022-03-15T10:29:36.000Z")); - assertThat(backingIndex.getSettings().get("index.time_series.end_time"), equalTo("2022-03-15T12:29:36.000Z")); + assertThat(backingIndex.getSettings().get("index.time_series.start_time"), equalTo("2022-03-15T08:59:36.000Z")); + assertThat(backingIndex.getSettings().get("index.time_series.end_time"), equalTo("2022-03-15T09:29:36.000Z")); String secondBackingIndex = backingIndex.getIndex().getName(); // first backing index: { long start = MILLIS_FORMATTER.parseMillis("2022-03-15T06:29:36.000Z"); - long end = MILLIS_FORMATTER.parseMillis("2022-03-15T10:29:36.000Z") - 1; + long end = MILLIS_FORMATTER.parseMillis("2022-03-15T08:59:36.000Z") - 1; for (int i = 0; i < 256; i++) { String timestamp = MILLIS_FORMATTER.formatMillis(randomLongBetween(start, end)); var writeIndex = getWriteIndex(state, "logs-myapp", timestamp); @@ -114,14 +114,14 @@ public void testPickingBackingIndicesPredefinedDates() throws Exception { // Borderline: { - var writeIndex = getWriteIndex(state, "logs-myapp", "2022-03-15T10:29:35.999Z"); + var writeIndex = getWriteIndex(state, "logs-myapp", "2022-03-15T08:59:35.999Z"); assertThat(writeIndex.getName(), equalTo(".ds-logs-myapp-2022.03.15-000001")); } // Second backing index: { - long start = MILLIS_FORMATTER.parseMillis("2022-03-15T10:29:36.000Z"); - long end = MILLIS_FORMATTER.parseMillis("2022-03-15T12:29:36.000Z") - 1; + long start = MILLIS_FORMATTER.parseMillis("2022-03-15T08:59:36.000Z"); + long end = MILLIS_FORMATTER.parseMillis("2022-03-15T09:29:36.000Z") - 1; for (int i = 0; i < 256; i++) { String timestamp = MILLIS_FORMATTER.formatMillis(randomLongBetween(start, end)); var writeIndex = getWriteIndex(state, "logs-myapp", timestamp); @@ -131,19 +131,19 @@ public void testPickingBackingIndicesPredefinedDates() throws Exception { // Borderline (again): { - var writeIndex = getWriteIndex(state, "logs-myapp", "2022-03-15T12:29:35.999Z"); + var writeIndex = getWriteIndex(state, "logs-myapp", "2022-03-15T09:29:35.999Z"); assertThat(writeIndex.getName(), equalTo(secondBackingIndex)); } // Outside the valid temporal ranges: { var finalState = state; - var e = expectThrows(IllegalArgumentException.class, () -> getWriteIndex(finalState, "logs-myapp", "2022-03-15T12:29:36.000Z")); + var e = expectThrows(IllegalArgumentException.class, () -> getWriteIndex(finalState, "logs-myapp", "2022-03-15T09:29:36.000Z")); assertThat( e.getMessage(), equalTo( - "the document timestamp [2022-03-15T12:29:36.000Z] is outside of ranges of currently writable indices [" - + "[2022-03-15T06:29:36.000Z,2022-03-15T10:29:36.000Z][2022-03-15T10:29:36.000Z,2022-03-15T12:29:36.000Z]]" + "the document timestamp [2022-03-15T09:29:36.000Z] is outside of ranges of currently writable indices [" + + "[2022-03-15T06:29:36.000Z,2022-03-15T08:59:36.000Z][2022-03-15T08:59:36.000Z,2022-03-15T09:29:36.000Z]]" ) ); } @@ -158,24 +158,24 @@ public void testPickingBackingIndicesNanoTimestamp() throws Exception { assertThat(backingIndex, notNullValue()); // Ensure truncate to seconds and millis format: assertThat(backingIndex.getSettings().get("index.time_series.start_time"), equalTo("2022-03-15T06:29:36.000Z")); - assertThat(backingIndex.getSettings().get("index.time_series.end_time"), equalTo("2022-03-15T10:29:36.000Z")); + assertThat(backingIndex.getSettings().get("index.time_series.end_time"), equalTo("2022-03-15T08:59:36.000Z")); // advance time and rollover: - time = time.plusSeconds(80 * 60); + time = time.plusSeconds(20 * 60); var result = rolloverOver(state, "logs-myapp", time); state = result.clusterState(); DataStream dataStream = state.getMetadata().dataStreams().get("logs-myapp"); backingIndex = state.getMetadata().index(dataStream.getIndices().get(1)); assertThat(backingIndex, notNullValue()); - assertThat(backingIndex.getSettings().get("index.time_series.start_time"), equalTo("2022-03-15T10:29:36.000Z")); - assertThat(backingIndex.getSettings().get("index.time_series.end_time"), equalTo("2022-03-15T12:29:36.000Z")); + assertThat(backingIndex.getSettings().get("index.time_series.start_time"), equalTo("2022-03-15T08:59:36.000Z")); + assertThat(backingIndex.getSettings().get("index.time_series.end_time"), equalTo("2022-03-15T09:29:36.000Z")); String secondBackingIndex = backingIndex.getIndex().getName(); // first backing index: { long start = NANOS_FORMATTER.parseMillis("2022-03-15T06:29:36.000000000Z"); - long end = NANOS_FORMATTER.parseMillis("2022-03-15T10:29:36.000000000Z") - 1; + long end = NANOS_FORMATTER.parseMillis("2022-03-15T08:59:36.000000000Z") - 1; for (int i = 0; i < 256; i++) { String timestamp = NANOS_FORMATTER.formatMillis(randomLongBetween(start, end)); var writeIndex = getWriteIndex(state, "logs-myapp", timestamp); @@ -185,14 +185,14 @@ public void testPickingBackingIndicesNanoTimestamp() throws Exception { // Borderline: { - var writeIndex = getWriteIndex(state, "logs-myapp", "2022-03-15T10:29:35.999999999Z"); + var writeIndex = getWriteIndex(state, "logs-myapp", "2022-03-15T08:59:35.999999999Z"); assertThat(writeIndex.getName(), equalTo(".ds-logs-myapp-2022.03.15-000001")); } // Second backing index: { - long start = NANOS_FORMATTER.parseMillis("2022-03-15T10:29:36.000000000Z"); - long end = NANOS_FORMATTER.parseMillis("2022-03-15T12:29:36.000000000Z") - 1; + long start = NANOS_FORMATTER.parseMillis("2022-03-15T08:59:36.000000000Z"); + long end = NANOS_FORMATTER.parseMillis("2022-03-15T09:29:36.000000000Z") - 1; for (int i = 0; i < 256; i++) { String timestamp = NANOS_FORMATTER.formatMillis(randomLongBetween(start, end)); var writeIndex = getWriteIndex(state, "logs-myapp", timestamp); @@ -202,7 +202,7 @@ public void testPickingBackingIndicesNanoTimestamp() throws Exception { // Borderline (again): { - var writeIndex = getWriteIndex(state, "logs-myapp", "2022-03-15T12:29:35.999999999Z"); + var writeIndex = getWriteIndex(state, "logs-myapp", "2022-03-15T09:29:35.999999999Z"); assertThat(writeIndex.getName(), equalTo(secondBackingIndex)); } } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java index e622d16b5d4c9..62d07467d5086 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java @@ -39,7 +39,7 @@ public class DataStreamIndexSettingsProviderTests extends ESTestCase { private static final TimeValue DEFAULT_LOOK_BACK_TIME = TimeValue.timeValueHours(2); // default - private static final TimeValue DEFAULT_LOOK_AHEAD_TIME = TimeValue.timeValueHours(2); // default + private static final TimeValue DEFAULT_LOOK_AHEAD_TIME = TimeValue.timeValueMinutes(30); // default DataStreamIndexSettingsProvider provider; @@ -94,7 +94,6 @@ public void testGetAdditionalIndexSettingsIndexRoutingPathAlreadyDefined() throw String dataStreamName = "logs-app1"; Instant now = Instant.now().truncatedTo(ChronoUnit.SECONDS); - TimeValue lookAheadTime = TimeValue.timeValueHours(2); // default Settings settings = builder().putList(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "field2").build(); String mapping = """ { @@ -126,8 +125,8 @@ public void testGetAdditionalIndexSettingsIndexRoutingPathAlreadyDefined() throw List.of(new CompressedXContent(mapping)) ); assertThat(result.size(), equalTo(2)); - assertThat(IndexSettings.TIME_SERIES_START_TIME.get(result), equalTo(now.minusMillis(lookAheadTime.getMillis()))); - assertThat(IndexSettings.TIME_SERIES_END_TIME.get(result), equalTo(now.plusMillis(lookAheadTime.getMillis()))); + assertThat(IndexSettings.TIME_SERIES_START_TIME.get(result), equalTo(now.minusMillis(DEFAULT_LOOK_BACK_TIME.getMillis()))); + assertThat(IndexSettings.TIME_SERIES_END_TIME.get(result), equalTo(now.plusMillis(DEFAULT_LOOK_AHEAD_TIME.getMillis()))); } public void testGetAdditionalIndexSettingsMappingsMerging() throws Exception { @@ -135,7 +134,6 @@ public void testGetAdditionalIndexSettingsMappingsMerging() throws Exception { String dataStreamName = "logs-app1"; Instant now = Instant.now().truncatedTo(ChronoUnit.SECONDS); - TimeValue lookAheadTime = TimeValue.timeValueHours(2); // default Settings settings = Settings.EMPTY; String mapping1 = """ { @@ -193,8 +191,8 @@ public void testGetAdditionalIndexSettingsMappingsMerging() throws Exception { List.of(new CompressedXContent(mapping1), new CompressedXContent(mapping2), new CompressedXContent(mapping3)) ); assertThat(result.size(), equalTo(3)); - assertThat(IndexSettings.TIME_SERIES_START_TIME.get(result), equalTo(now.minusMillis(lookAheadTime.getMillis()))); - assertThat(IndexSettings.TIME_SERIES_END_TIME.get(result), equalTo(now.plusMillis(lookAheadTime.getMillis()))); + assertThat(IndexSettings.TIME_SERIES_START_TIME.get(result), equalTo(now.minusMillis(DEFAULT_LOOK_BACK_TIME.getMillis()))); + assertThat(IndexSettings.TIME_SERIES_END_TIME.get(result), equalTo(now.plusMillis(DEFAULT_LOOK_AHEAD_TIME.getMillis()))); assertThat(IndexMetadata.INDEX_ROUTING_PATH.get(result), containsInAnyOrder("field1", "field3")); } @@ -203,7 +201,6 @@ public void testGetAdditionalIndexSettingsNoMappings() { String dataStreamName = "logs-app1"; Instant now = Instant.now().truncatedTo(ChronoUnit.SECONDS); - TimeValue lookAheadTime = TimeValue.timeValueHours(2); // default Settings settings = Settings.EMPTY; Settings result = provider.getAdditionalIndexSettings( DataStream.getDefaultBackingIndexName(dataStreamName, 1), @@ -215,8 +212,8 @@ public void testGetAdditionalIndexSettingsNoMappings() { List.of() ); assertThat(result.size(), equalTo(2)); - assertThat(IndexSettings.TIME_SERIES_START_TIME.get(result), equalTo(now.minusMillis(lookAheadTime.getMillis()))); - assertThat(IndexSettings.TIME_SERIES_END_TIME.get(result), equalTo(now.plusMillis(lookAheadTime.getMillis()))); + assertThat(IndexSettings.TIME_SERIES_START_TIME.get(result), equalTo(now.minusMillis(DEFAULT_LOOK_BACK_TIME.getMillis()))); + assertThat(IndexSettings.TIME_SERIES_END_TIME.get(result), equalTo(now.plusMillis(DEFAULT_LOOK_AHEAD_TIME.getMillis()))); } public void testGetAdditionalIndexSettingsLookAheadTime() throws Exception { @@ -263,7 +260,7 @@ public void testGetAdditionalIndexSettingsLookBackTime() throws Exception { public void testGetAdditionalIndexSettingsDataStreamAlreadyCreated() throws Exception { String dataStreamName = "logs-app1"; - TimeValue lookAheadTime = TimeValue.timeValueHours(2); + TimeValue lookAheadTime = TimeValue.timeValueMinutes(30); Instant sixHoursAgo = Instant.now().minus(6, ChronoUnit.HOURS).truncatedTo(ChronoUnit.SECONDS); Instant currentEnd = sixHoursAgo.plusMillis(lookAheadTime.getMillis()); @@ -415,7 +412,6 @@ public void testGetAdditionalIndexSettingsDowngradeFromTsdb() { public void testGenerateRoutingPathFromDynamicTemplate() throws Exception { Instant now = Instant.now().truncatedTo(ChronoUnit.SECONDS); - TimeValue lookAheadTime = TimeValue.timeValueHours(2); // default String mapping = """ { "_doc": { @@ -448,14 +444,13 @@ public void testGenerateRoutingPathFromDynamicTemplate() throws Exception { """; Settings result = generateTsdbSettings(mapping, now); assertThat(result.size(), equalTo(3)); - assertThat(IndexSettings.TIME_SERIES_START_TIME.get(result), equalTo(now.minusMillis(lookAheadTime.getMillis()))); - assertThat(IndexSettings.TIME_SERIES_END_TIME.get(result), equalTo(now.plusMillis(lookAheadTime.getMillis()))); + assertThat(IndexSettings.TIME_SERIES_START_TIME.get(result), equalTo(now.minusMillis(DEFAULT_LOOK_BACK_TIME.getMillis()))); + assertThat(IndexSettings.TIME_SERIES_END_TIME.get(result), equalTo(now.plusMillis(DEFAULT_LOOK_AHEAD_TIME.getMillis()))); assertThat(IndexMetadata.INDEX_ROUTING_PATH.get(result), containsInAnyOrder("host.id", "prometheus.labels.*")); } public void testGenerateRoutingPathFromDynamicTemplateWithMultiplePathMatchEntries() throws Exception { Instant now = Instant.now().truncatedTo(ChronoUnit.SECONDS); - TimeValue lookAheadTime = TimeValue.timeValueHours(2); // default String mapping = """ { "_doc": { @@ -488,8 +483,8 @@ public void testGenerateRoutingPathFromDynamicTemplateWithMultiplePathMatchEntri """; Settings result = generateTsdbSettings(mapping, now); assertThat(result.size(), equalTo(3)); - assertThat(IndexSettings.TIME_SERIES_START_TIME.get(result), equalTo(now.minusMillis(lookAheadTime.getMillis()))); - assertThat(IndexSettings.TIME_SERIES_END_TIME.get(result), equalTo(now.plusMillis(lookAheadTime.getMillis()))); + assertThat(IndexSettings.TIME_SERIES_START_TIME.get(result), equalTo(now.minusMillis(DEFAULT_LOOK_BACK_TIME.getMillis()))); + assertThat(IndexSettings.TIME_SERIES_END_TIME.get(result), equalTo(now.plusMillis(DEFAULT_LOOK_AHEAD_TIME.getMillis()))); assertThat( IndexMetadata.INDEX_ROUTING_PATH.get(result), containsInAnyOrder("host.id", "xprometheus.labels.*", "yprometheus.labels.*") @@ -500,7 +495,6 @@ public void testGenerateRoutingPathFromDynamicTemplateWithMultiplePathMatchEntri public void testGenerateRoutingPathFromDynamicTemplate_templateWithNoPathMatch() throws Exception { Instant now = Instant.now().truncatedTo(ChronoUnit.SECONDS); - TimeValue lookAheadTime = TimeValue.timeValueHours(2); // default String mapping = """ { "_doc": { @@ -542,14 +536,13 @@ public void testGenerateRoutingPathFromDynamicTemplate_templateWithNoPathMatch() """; Settings result = generateTsdbSettings(mapping, now); assertThat(result.size(), equalTo(3)); - assertThat(IndexSettings.TIME_SERIES_START_TIME.get(result), equalTo(now.minusMillis(lookAheadTime.getMillis()))); - assertThat(IndexSettings.TIME_SERIES_END_TIME.get(result), equalTo(now.plusMillis(lookAheadTime.getMillis()))); + assertThat(IndexSettings.TIME_SERIES_START_TIME.get(result), equalTo(now.minusMillis(DEFAULT_LOOK_BACK_TIME.getMillis()))); + assertThat(IndexSettings.TIME_SERIES_END_TIME.get(result), equalTo(now.plusMillis(DEFAULT_LOOK_AHEAD_TIME.getMillis()))); assertThat(IndexMetadata.INDEX_ROUTING_PATH.get(result), containsInAnyOrder("host.id", "prometheus.labels.*")); } public void testGenerateRoutingPathFromDynamicTemplate_nonKeywordTemplate() throws Exception { Instant now = Instant.now().truncatedTo(ChronoUnit.SECONDS); - TimeValue lookAheadTime = TimeValue.timeValueHours(2); // default String mapping = """ { "_doc": { @@ -590,8 +583,8 @@ public void testGenerateRoutingPathFromDynamicTemplate_nonKeywordTemplate() thro } """; Settings result = generateTsdbSettings(mapping, now); - assertThat(IndexSettings.TIME_SERIES_START_TIME.get(result), equalTo(now.minusMillis(lookAheadTime.getMillis()))); - assertThat(IndexSettings.TIME_SERIES_END_TIME.get(result), equalTo(now.plusMillis(lookAheadTime.getMillis()))); + assertThat(IndexSettings.TIME_SERIES_START_TIME.get(result), equalTo(now.minusMillis(DEFAULT_LOOK_BACK_TIME.getMillis()))); + assertThat(IndexSettings.TIME_SERIES_END_TIME.get(result), equalTo(now.plusMillis(DEFAULT_LOOK_AHEAD_TIME.getMillis()))); assertThat(IndexMetadata.INDEX_ROUTING_PATH.get(result), containsInAnyOrder("host.id", "prometheus.labels.*")); assertEquals(2, IndexMetadata.INDEX_ROUTING_PATH.get(result).size()); } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java index 928512f659039..0ef4ea5ef451a 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java @@ -12,8 +12,8 @@ import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; -import org.elasticsearch.action.admin.indices.template.delete.DeleteComposableIndexTemplateAction; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.delete.TransportDeleteComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.datastreams.CreateDataStreamAction; import org.elasticsearch.action.datastreams.DataStreamsStatsAction; import org.elasticsearch.action.datastreams.DeleteDataStreamAction; @@ -233,8 +233,8 @@ private String createDataStream(boolean hidden) throws Exception { .build(); assertAcked( client().execute( - PutComposableIndexTemplateAction.INSTANCE, - new PutComposableIndexTemplateAction.Request(dataStreamName + "_template").indexTemplate(template) + TransportPutComposableIndexTemplateAction.TYPE, + new TransportPutComposableIndexTemplateAction.Request(dataStreamName + "_template").indexTemplate(template) ) ); assertAcked(client().execute(CreateDataStreamAction.INSTANCE, new CreateDataStreamAction.Request(dataStreamName))); @@ -279,8 +279,8 @@ private void deleteDataStream(String dataStreamName) { assertAcked(client().execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(new String[] { dataStreamName }))); assertAcked( client().execute( - DeleteComposableIndexTemplateAction.INSTANCE, - new DeleteComposableIndexTemplateAction.Request(dataStreamName + "_template") + TransportDeleteComposableIndexTemplateAction.TYPE, + new TransportDeleteComposableIndexTemplateAction.Request(dataStreamName + "_template") ) ); } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataDataStreamRolloverServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataDataStreamRolloverServiceTests.java index 4c333c3f0ab8d..4f2df2c690bc8 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataDataStreamRolloverServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataDataStreamRolloverServiceTests.java @@ -244,7 +244,7 @@ public void testRolloverAndMigrateDataStream() throws Exception { Instant endTime = IndexSettings.TIME_SERIES_END_TIME.get(im.getSettings()); assertThat(startTime.isBefore(endTime), is(true)); assertThat(startTime, equalTo(now.minus(2, ChronoUnit.HOURS))); - assertThat(endTime, equalTo(now.plus(2, ChronoUnit.HOURS))); + assertThat(endTime, equalTo(now.plus(30, ChronoUnit.MINUTES))); } finally { testThreadPool.shutdown(); } @@ -339,7 +339,7 @@ public void testChangingIndexModeFromTimeSeriesToSomethingElseNoEffectOnExisting endTime = IndexSettings.TIME_SERIES_END_TIME.get(im.getSettings()); assertThat(startTime.isBefore(endTime), is(true)); assertThat(startTime, equalTo(now.minus(2, ChronoUnit.HOURS))); - assertThat(endTime, equalTo(now.plus(2, ChronoUnit.HOURS))); + assertThat(endTime, equalTo(now.plus(30, ChronoUnit.MINUTES))); } finally { testThreadPool.shutdown(); } @@ -416,7 +416,7 @@ public void testRolloverClusterStateWithBrokenOlderTsdbDataStream() throws Excep var lastStartTime = IndexSettings.TIME_SERIES_START_TIME.get(im.getSettings()); var kastEndTime = IndexSettings.TIME_SERIES_END_TIME.get(im.getSettings()); assertThat(lastStartTime, equalTo(now.minus(2, ChronoUnit.HOURS).truncatedTo(ChronoUnit.SECONDS))); - assertThat(kastEndTime, equalTo(now.plus(2, ChronoUnit.HOURS).truncatedTo(ChronoUnit.SECONDS))); + assertThat(kastEndTime, equalTo(now.plus(30, ChronoUnit.MINUTES).truncatedTo(ChronoUnit.SECONDS))); assertThat(im.getIndexMode(), equalTo(IndexMode.TIME_SERIES)); } } finally { diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/TimestampFieldMapperServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/TimestampFieldMapperServiceTests.java index 1a9287c1d5ee8..97959fa385241 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/TimestampFieldMapperServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/TimestampFieldMapperServiceTests.java @@ -10,7 +10,7 @@ import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.Template; @@ -94,7 +94,7 @@ private void createTemplate(boolean tsdb) throws IOException { } }"""; var templateSettings = Settings.builder().put("index.mode", tsdb ? "time_series" : "standard"); - var request = new PutComposableIndexTemplateAction.Request("id"); + var request = new TransportPutComposableIndexTemplateAction.Request("id"); request.indexTemplate( ComposableIndexTemplate.builder() .indexPatterns(List.of("k8s*")) @@ -102,7 +102,7 @@ private void createTemplate(boolean tsdb) throws IOException { .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) .build() ); - client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); + client().execute(TransportPutComposableIndexTemplateAction.TYPE, request).actionGet(); } private static String formatInstant(Instant instant) { diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeServiceTests.java index c383991dba19c..dbb48ea3ddc26 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeServiceTests.java @@ -62,7 +62,7 @@ public void testUpdateTimeSeriesTemporalRange() { String dataStreamName = "logs-app1"; Instant now = Instant.now().truncatedTo(ChronoUnit.MILLIS); Instant start = now.minus(2, ChronoUnit.HOURS); - Instant end = now.plus(3, ChronoUnit.HOURS); + Instant end = now.plus(40, ChronoUnit.MINUTES); Metadata metadata = DataStreamTestHelper.getClusterStateWithDataStream( dataStreamName, List.of(new Tuple<>(start.minus(4, ChronoUnit.HOURS), start), new Tuple<>(start, end)) @@ -89,12 +89,12 @@ public void testUpdateTimeSeriesTemporalRange() { assertThat(getEndTime(result, dataStreamName, 1), not(equalTo(previousEndTime2))); assertThat( getEndTime(result, dataStreamName, 1), - equalTo(now.plus(2, ChronoUnit.HOURS).plus(5, ChronoUnit.MINUTES).truncatedTo(ChronoUnit.SECONDS)) + equalTo(now.plus(30, ChronoUnit.MINUTES).plus(5, ChronoUnit.MINUTES).truncatedTo(ChronoUnit.SECONDS)) ); } public void testUpdateTimeSeriesTemporalRange_customLookAHeadTime() { - int lookAHeadTimeMinutes = randomIntBetween(30, 180); + int lookAHeadTimeMinutes = randomIntBetween(30, 120); TemporalAmount lookAHeadTime = Duration.ofMinutes(lookAHeadTimeMinutes); int timeSeriesPollIntervalMinutes = randomIntBetween(1, 10); TemporalAmount timeSeriesPollInterval = Duration.ofMinutes(timeSeriesPollIntervalMinutes); @@ -133,7 +133,7 @@ public void testUpdateTimeSeriesTemporalRange_NoUpdateBecauseReplicated() { String dataStreamName = "logs-app1"; Instant now = Instant.now().truncatedTo(ChronoUnit.MILLIS); Instant start = now.minus(2, ChronoUnit.HOURS); - Instant end = now.plus(3, ChronoUnit.HOURS); + Instant end = now.plus(31, ChronoUnit.MINUTES); Metadata metadata = DataStreamTestHelper.getClusterStateWithDataStream( dataStreamName, List.of(new Tuple<>(start.minus(4, ChronoUnit.HOURS), start), new Tuple<>(start, end)) @@ -182,25 +182,25 @@ public void testUpdateTimeSeriesTemporalRangeMultipleDataStream() { String dataStreamName3 = "logs-app3"; Instant now = Instant.now().truncatedTo(ChronoUnit.MILLIS); - Instant start = now.minus(6, ChronoUnit.HOURS); + Instant start = now.minus(90, ChronoUnit.MINUTES); Metadata.Builder mbBuilder = new Metadata.Builder(); for (String dataStreamName : List.of(dataStreamName1, dataStreamName2, dataStreamName3)) { - Instant end = start.plus(2, ChronoUnit.HOURS); + Instant end = start.plus(30, ChronoUnit.MINUTES); DataStreamTestHelper.getClusterStateWithDataStream(mbBuilder, dataStreamName, List.of(new Tuple<>(start, end))); start = end; } - now = now.minus(3, ChronoUnit.HOURS); + now = now.minus(45, ChronoUnit.MINUTES); ClusterState before = ClusterState.builder(ClusterState.EMPTY_STATE).metadata(mbBuilder).build(); ClusterState result = instance.updateTimeSeriesTemporalRange(before, now); assertThat(result, not(sameInstance(before))); assertThat( getEndTime(result, dataStreamName1, 0), - equalTo(now.plus(2, ChronoUnit.HOURS).plus(5, ChronoUnit.MINUTES).truncatedTo(ChronoUnit.SECONDS)) + equalTo(now.plus(30, ChronoUnit.MINUTES).plus(5, ChronoUnit.MINUTES).truncatedTo(ChronoUnit.SECONDS)) ); assertThat( getEndTime(result, dataStreamName2, 0), - equalTo(now.plus(2, ChronoUnit.HOURS).plus(5, ChronoUnit.MINUTES).truncatedTo(ChronoUnit.SECONDS)) + equalTo(now.plus(30, ChronoUnit.MINUTES).plus(5, ChronoUnit.MINUTES).truncatedTo(ChronoUnit.SECONDS)) ); assertThat(getEndTime(result, dataStreamName3, 0), equalTo(start)); } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleFixtures.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleFixtures.java index 6833f2222b585..426905aecde4e 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleFixtures.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleFixtures.java @@ -10,7 +10,7 @@ import org.elasticsearch.action.admin.indices.rollover.MaxAgeCondition; import org.elasticsearch.action.admin.indices.rollover.RolloverInfo; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataStream; @@ -81,7 +81,7 @@ static void putComposableIndexTemplate( @Nullable Map metadata, @Nullable DataStreamLifecycle lifecycle ) throws IOException { - PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request(id); + TransportPutComposableIndexTemplateAction.Request request = new TransportPutComposableIndexTemplateAction.Request(id); request.indexTemplate( ComposableIndexTemplate.builder() .indexPatterns(patterns) @@ -90,7 +90,7 @@ static void putComposableIndexTemplate( .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) .build() ); - assertTrue(client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet().isAcknowledged()); + assertTrue(client().execute(TransportPutComposableIndexTemplateAction.TYPE, request).actionGet().isAcknowledged()); } static DataStreamLifecycle randomLifecycle() { diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AppendProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AppendProcessor.java index 273b76955060b..4f28c9bb14f80 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AppendProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AppendProcessor.java @@ -48,9 +48,10 @@ public ValueSource getValue() { } @Override - public IngestDocument execute(IngestDocument ingestDocument) throws Exception { - ingestDocument.appendFieldValue(field, value, allowDuplicates); - return ingestDocument; + public IngestDocument execute(IngestDocument document) throws Exception { + String path = document.renderTemplate(field); + document.appendFieldValue(path, value, allowDuplicates); + return document; } @Override diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JsonProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JsonProcessor.java index 7b20cfbf0b398..3bf82be24330e 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JsonProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JsonProcessor.java @@ -8,8 +8,6 @@ package org.elasticsearch.ingest.common; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.core.Strings; import org.elasticsearch.ingest.AbstractProcessor; import org.elasticsearch.ingest.ConfigurationUtils; @@ -20,7 +18,6 @@ import org.elasticsearch.xcontent.json.JsonXContent; import java.io.IOException; -import java.io.InputStream; import java.util.Locale; import java.util.Map; @@ -90,10 +87,11 @@ public ConflictStrategy getAddToRootConflictStrategy() { } public static Object apply(Object fieldValue, boolean allowDuplicateKeys, boolean strictJsonParsing) { - BytesReference bytesRef = fieldValue == null ? new BytesArray("null") : new BytesArray(fieldValue.toString()); try ( - InputStream stream = bytesRef.streamInput(); - XContentParser parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, stream) + XContentParser parser = JsonXContent.jsonXContent.createParser( + XContentParserConfiguration.EMPTY, + fieldValue == null ? "null" : fieldValue.toString() + ) ) { parser.allowDuplicateKeys(allowDuplicateKeys); XContentParser.Token token = parser.nextToken(); diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RemoveProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RemoveProcessor.java index c27bc4de85ec4..fa86bcda5047b 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RemoveProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RemoveProcessor.java @@ -64,7 +64,7 @@ private void fieldsToRemoveProcessor(IngestDocument document) { } } else { for (TemplateScript.Factory field : fieldsToRemove) { - document.removeField(field); + document.removeField(document.renderTemplate(field)); } } } diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java index 84e66a3134b69..b629f00545aec 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.ingest.AbstractProcessor; import org.elasticsearch.ingest.IngestDocument; @@ -25,7 +26,6 @@ import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonXContent; -import java.io.InputStream; import java.util.Arrays; import java.util.Map; @@ -108,9 +108,11 @@ public ScriptProcessor create( ) throws Exception { try ( XContentBuilder builder = XContentBuilder.builder(JsonXContent.jsonXContent).map(config); - InputStream stream = BytesReference.bytes(builder).streamInput(); - XContentParser parser = XContentType.JSON.xContent() - .createParser(XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), stream) + XContentParser parser = XContentHelper.createParserNotCompressed( + XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), + BytesReference.bytes(builder), + XContentType.JSON + ) ) { Script script = Script.parse(parser); diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/SetProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/SetProcessor.java index 229b796b89c75..2d7db39f3738e 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/SetProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/SetProcessor.java @@ -78,12 +78,13 @@ public boolean isIgnoreEmptyValue() { @Override public IngestDocument execute(IngestDocument document) { - if (overrideEnabled || document.hasField(field) == false || document.getFieldValue(field, Object.class) == null) { + String path = document.renderTemplate(field); + if (overrideEnabled || document.hasField(path) == false || document.getFieldValue(path, Object.class) == null) { if (copyFrom != null) { Object fieldValue = document.getFieldValue(copyFrom, Object.class, ignoreEmptyValue); - document.setFieldValue(field, IngestDocument.deepCopy(fieldValue), ignoreEmptyValue); + document.setFieldValue(path, IngestDocument.deepCopy(fieldValue), ignoreEmptyValue); } else { - document.setFieldValue(field, value, ignoreEmptyValue); + document.setFieldValue(path, value, ignoreEmptyValue); } } return document; diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ConvertProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ConvertProcessorTests.java index f1b7433e10a0f..28083aa0551ee 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ConvertProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ConvertProcessorTests.java @@ -309,27 +309,39 @@ public void testConvertBooleanError() throws Exception { } public void testConvertIpV4() throws Exception { - // valid ipv4 address - IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); - String fieldName = RandomDocumentPicks.randomFieldName(random()); - String targetField = randomValueOtherThan(fieldName, () -> RandomDocumentPicks.randomFieldName(random())); - String validIpV4 = "192.168.1.1"; - ingestDocument.setFieldValue(fieldName, validIpV4); - - Processor processor = new ConvertProcessor(randomAlphaOfLength(10), null, fieldName, targetField, Type.IP, false); - processor.execute(ingestDocument); - assertThat(ingestDocument.getFieldValue(targetField, String.class), equalTo(validIpV4)); - - // invalid ipv4 address - IngestDocument ingestDocument2 = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); - fieldName = RandomDocumentPicks.randomFieldName(random()); - targetField = randomValueOtherThan(fieldName, () -> RandomDocumentPicks.randomFieldName(random())); - String invalidIpV4 = "192.168.1.256"; - ingestDocument2.setFieldValue(fieldName, invalidIpV4); + { + // valid ipv4 address + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); + String fieldName = RandomDocumentPicks.randomFieldName(random()); + // We can't have targetField be a nested field under fieldName since we're going to set a top-level value for fieldName: + String targetField = randomValueOtherThanMany( + targetFieldName -> fieldName.equals(targetFieldName) || targetFieldName.startsWith(fieldName + "."), + () -> RandomDocumentPicks.randomFieldName(random()) + ); + String validIpV4 = "192.168.1.1"; + ingestDocument.setFieldValue(fieldName, validIpV4); + + Processor processor = new ConvertProcessor(randomAlphaOfLength(10), null, fieldName, targetField, Type.IP, false); + processor.execute(ingestDocument); + assertThat(ingestDocument.getFieldValue(targetField, String.class), equalTo(validIpV4)); + } - Processor processor2 = new ConvertProcessor(randomAlphaOfLength(10), null, fieldName, targetField, Type.IP, false); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> processor2.execute(ingestDocument2)); - assertThat(e.getMessage(), containsString("'" + invalidIpV4 + "' is not an IP string literal.")); + { + // invalid ipv4 address + IngestDocument ingestDocument2 = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); + String fieldName = RandomDocumentPicks.randomFieldName(random()); + // We can't have targetField be a nested field under fieldName since we're going to set a top-level value for fieldName: + String targetField = randomValueOtherThanMany( + targetFieldName -> fieldName.equals(targetFieldName) || targetFieldName.startsWith(fieldName + "."), + () -> RandomDocumentPicks.randomFieldName(random()) + ); + String invalidIpV4 = "192.168.1.256"; + ingestDocument2.setFieldValue(fieldName, invalidIpV4); + + Processor processor2 = new ConvertProcessor(randomAlphaOfLength(10), null, fieldName, targetField, Type.IP, false); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> processor2.execute(ingestDocument2)); + assertThat(e.getMessage(), containsString("'" + invalidIpV4 + "' is not an IP string literal.")); + } } public void testConvertIpV6() throws Exception { diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorTests.java index 1e40345208a1b..0b20fbc22e1cc 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorTests.java @@ -120,7 +120,7 @@ public void testMatchWithoutCaptures() throws Exception { public void testNullField() { String fieldName = RandomDocumentPicks.randomFieldName(random()); IngestDocument doc = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); - doc.setFieldValue(fieldName, null); + doc.setFieldValue(fieldName, (Object) null); GrokProcessor processor = new GrokProcessor( randomAlphaOfLength(10), null, @@ -138,7 +138,7 @@ public void testNullField() { public void testNullFieldWithIgnoreMissing() throws Exception { String fieldName = RandomDocumentPicks.randomFieldName(random()); IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); - originalIngestDocument.setFieldValue(fieldName, null); + originalIngestDocument.setFieldValue(fieldName, (Object) null); IngestDocument ingestDocument = new IngestDocument(originalIngestDocument); GrokProcessor processor = new GrokProcessor( randomAlphaOfLength(10), diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorTests.java index 1d10c30909906..f472e9d9bacd4 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorTests.java @@ -123,7 +123,7 @@ public void testRenameNewFieldAlreadyExists() throws Exception { public void testRenameExistingFieldNullValue() throws Exception { IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); String fieldName = RandomDocumentPicks.randomFieldName(random()); - ingestDocument.setFieldValue(fieldName, null); + ingestDocument.setFieldValue(fieldName, (Object) null); String newFieldName = randomValueOtherThanMany(ingestDocument::hasField, () -> RandomDocumentPicks.randomFieldName(random())); Processor processor = createRenameProcessor(fieldName, newFieldName, false); processor.execute(ingestDocument); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SetProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SetProcessorFactoryTests.java index bd6a29181c657..09c5c58e3664d 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SetProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SetProcessorFactoryTests.java @@ -167,4 +167,17 @@ public void testMediaType() throws Exception { ElasticsearchException e = expectThrows(ElasticsearchException.class, () -> factory.create(null, processorTag, null, config2)); assertThat(e.getMessage(), containsString("property does not contain a supported media type [" + expectedMediaType + "]")); } + + public void testCreateWithEmptyField() throws Exception { + // edge case: it's valid (according to the current validation) to *create* a set processor that has an empty string as its 'field'. + // it will fail at ingest execution time, but we don't reject it at pipeline creation time. + Map config = new HashMap<>(); + config.put("field", ""); + config.put("value", "value1"); + String processorTag = randomAlphaOfLength(10); + SetProcessor setProcessor = factory.create(null, processorTag, null, config); + assertThat(setProcessor.getTag(), equalTo(processorTag)); + assertThat(setProcessor.getField().newInstance(Map.of()).execute(), equalTo("")); + assertThat(setProcessor.getValue().copyAndResolve(Map.of()), equalTo("value1")); + } } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SetProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SetProcessorTests.java index 5973e4fe5741c..6cef9d3ecde8a 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SetProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SetProcessorTests.java @@ -61,15 +61,11 @@ public void testSetFieldsTypeMismatch() throws Exception { IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); ingestDocument.setFieldValue("field", "value"); Processor processor = createSetProcessor("field.inner", "value", null, true, false); - try { - processor.execute(ingestDocument); - fail("processor execute should have failed"); - } catch (IllegalArgumentException e) { - assertThat( - e.getMessage(), - equalTo("cannot set [inner] with parent object of type [java.lang.String] as " + "part of path [field.inner]") - ); - } + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> processor.execute(ingestDocument)); + assertThat( + exception.getMessage(), + equalTo("cannot set [inner] with parent object of type [java.lang.String] as part of path [field.inner]") + ); } public void testSetNewFieldWithOverrideDisabled() throws Exception { @@ -184,20 +180,6 @@ public void testCopyFromOtherField() throws Exception { } } - private static void assertMapEquals(Object actual, Object expected) { - if (expected instanceof Map expectedMap) { - Map actualMap = (Map) actual; - assertThat(actualMap.keySet().toArray(), arrayContainingInAnyOrder(expectedMap.keySet().toArray())); - for (Map.Entry entry : actualMap.entrySet()) { - if (entry.getValue() instanceof Map) { - assertMapEquals(entry.getValue(), expectedMap.get(entry.getKey())); - } else { - assertThat(entry.getValue(), equalTo(expectedMap.get(entry.getKey()))); - } - } - } - } - public void testCopyFromDeepCopiesNonPrimitiveMutableTypes() throws Exception { final String originalField = "originalField"; final String targetField = "targetField"; @@ -256,6 +238,15 @@ public void testCopyFromDeepCopiesNonPrimitiveMutableTypes() throws Exception { assertThat(ingestDocument.getFieldValue(targetField, Object.class), equalTo(preservedDate)); } + public void testSetEmptyField() { + // edge case: it's valid (according to the current validation) to *create* a set processor that has an empty string as its 'field', + // but it will fail at ingest execution time. + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + Processor processor = createSetProcessor("", "some_value", null, false, false); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> processor.execute(ingestDocument)); + assertThat(exception.getMessage(), equalTo("path cannot be null nor empty")); + } + private static Processor createSetProcessor( String fieldName, Object fieldValue, @@ -273,4 +264,18 @@ private static Processor createSetProcessor( ignoreEmptyValue ); } + + private static void assertMapEquals(Object actual, Object expected) { + if (expected instanceof Map expectedMap) { + Map actualMap = (Map) actual; + assertThat(actualMap.keySet().toArray(), arrayContainingInAnyOrder(expectedMap.keySet().toArray())); + for (Map.Entry entry : actualMap.entrySet()) { + if (entry.getValue() instanceof Map) { + assertMapEquals(entry.getValue(), expectedMap.get(entry.getKey())); + } else { + assertThat(entry.getValue(), equalTo(expectedMap.get(entry.getKey()))); + } + } + } + } } diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/270_set_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/270_set_processor.yml index 594ff52c2b27a..f74e9a5752b80 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/270_set_processor.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/270_set_processor.yml @@ -227,3 +227,71 @@ teardown: - match: { _source.foo: "hello" } - match: { _source.method_call_is_ignored: "" } - match: { _source.missing_method_is_ignored: "" } + +--- +"Test set processor with mustache edge cases": + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors" : [ + { + "script": { + "description": "Set a field 'foo' with a value of '{{bar}}' -- no mustache here, just strings", + "lang": "painless", + "source": "ctx.foo = '{{bar}}'" + } + }, + { + "set": { + "description": "Dereference the foo field via actual mustache", + "field": "result_1", + "value": "{{foo}}" + } + }, + { + "set": { + "description": "Dereference the foo field via copy_from", + "field": "result_2", + "copy_from": "foo" + } + }, + { + "set": { + "description": "Copy the original bar value into old_bar", + "field": "old_bar", + "copy_from": "bar" + } + }, + { + "set": { + "description": "Set whatever field value_bar refers to (it's bar) to 3", + "field": "{{value_bar}}", + "value": 3 + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: "1" + pipeline: "1" + body: { + foo: 1, + bar: 2, + value_bar: "bar" + } + + - do: + get: + index: test + id: "1" + - match: { _source.foo: "{{bar}}" } + - match: { _source.result_1: "{{bar}}" } + - match: { _source.result_2: "{{bar}}" } + - match: { _source.old_bar: 2 } + - match: { _source.bar: 3 } diff --git a/modules/ingest-geoip/build.gradle b/modules/ingest-geoip/build.gradle index c44832ef7e2ff..f755a27b478cc 100644 --- a/modules/ingest-geoip/build.gradle +++ b/modules/ingest-geoip/build.gradle @@ -27,12 +27,12 @@ tasks.named('internalClusterTestTestingConventions').configure { } dependencies { - implementation('com.maxmind.geoip2:geoip2:4.0.0') + implementation('com.maxmind.geoip2:geoip2:4.2.0') // geoip2 dependencies: runtimeOnly("com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}") runtimeOnly("com.fasterxml.jackson.core:jackson-databind:${versions.jackson}") runtimeOnly("com.fasterxml.jackson.core:jackson-core:${versions.jackson}") - implementation('com.maxmind.db:maxmind-db:3.0.0') + implementation('com.maxmind.db:maxmind-db:3.1.0') testImplementation 'org.elasticsearch:geolite2-databases:20191119' internalClusterTestImplementation project(':modules:reindex') diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseNodeService.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseNodeService.java index ab2d96c7d198d..ec17915f7d622 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseNodeService.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseNodeService.java @@ -464,15 +464,19 @@ void retrieveDatabase( // so it is ok if this happens in a blocking manner on a thread from generic thread pool. // This makes the code easier to understand and maintain. SearchResponse searchResponse = client.search(searchRequest).actionGet(); - SearchHit[] hits = searchResponse.getHits().getHits(); + try { + SearchHit[] hits = searchResponse.getHits().getHits(); - if (searchResponse.getHits().getHits().length == 0) { - failureHandler.accept(new ResourceNotFoundException("chunk document with id [" + id + "] not found")); - return; + if (searchResponse.getHits().getHits().length == 0) { + failureHandler.accept(new ResourceNotFoundException("chunk document with id [" + id + "] not found")); + return; + } + byte[] data = (byte[]) hits[0].getSourceAsMap().get("data"); + md.update(data); + chunkConsumer.accept(data); + } finally { + searchResponse.decRef(); } - byte[] data = (byte[]) hits[0].getSourceAsMap().get("data"); - md.update(data); - chunkConsumer.accept(data); } String actualMd5 = MessageDigests.toHexString(md.digest()); if (Objects.equals(expectedMd5, actualMd5)) { diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/DatabaseNodeServiceTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/DatabaseNodeServiceTests.java index 58089f792226a..c7dbee47ea823 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/DatabaseNodeServiceTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/DatabaseNodeServiceTests.java @@ -15,7 +15,6 @@ import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchResponseSections; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -38,6 +37,8 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.CheckedRunnable; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; @@ -57,6 +58,7 @@ import org.junit.After; import org.junit.Before; import org.mockito.ArgumentCaptor; +import org.mockito.stubbing.Answer; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; @@ -68,11 +70,13 @@ import java.security.MessageDigest; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; import java.util.UUID; +import java.util.concurrent.CopyOnWriteArrayList; import java.util.function.Consumer; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -109,6 +113,8 @@ public class DatabaseNodeServiceTests extends ESTestCase { private IngestService ingestService; private ClusterService clusterService; + private final Collection toRelease = new CopyOnWriteArrayList<>(); + @Before public void setup() throws IOException { final Path geoIpConfigDir = createTempDir(); @@ -133,6 +139,8 @@ public void setup() throws IOException { public void cleanup() { resourceWatcherService.close(); threadPool.shutdownNow(); + Releasables.close(toRelease); + toRelease.clear(); } public void testCheckDatabases() throws Exception { @@ -321,19 +329,14 @@ private String mockSearches(String databaseName, int firstChunk, int lastChunk) } SearchHits hits = new SearchHits(new SearchHit[] { hit }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1f); - SearchResponse searchResponse = new SearchResponse( - new SearchResponseSections(hits, null, null, false, null, null, 0), - null, - 1, - 1, - 0, - 1L, - null, - null - ); + SearchResponse searchResponse = new SearchResponse(hits, null, null, false, null, null, 0, null, 1, 1, 0, 1L, null, null); + toRelease.add(searchResponse::decRef); @SuppressWarnings("unchecked") ActionFuture actionFuture = mock(ActionFuture.class); - when(actionFuture.actionGet()).thenReturn(searchResponse); + when(actionFuture.actionGet()).thenAnswer((Answer) invocation -> { + searchResponse.incRef(); + return searchResponse; + }); requestMap.put(databaseName + "_" + i, actionFuture); } when(client.search(any())).thenAnswer(invocationOnMock -> { diff --git a/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/DeviceTypeParser.java b/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/DeviceTypeParser.java index 40e54c8fe5f7e..6117ebc6aa319 100644 --- a/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/DeviceTypeParser.java +++ b/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/DeviceTypeParser.java @@ -39,23 +39,24 @@ public class DeviceTypeParser { private final HashMap> deviceTypePatterns = new HashMap<>(); public void init(InputStream regexStream) throws IOException { - XContentParser yamlParser = XContentFactory.xContent(XContentType.YAML) - .createParser(XContentParserConfiguration.EMPTY, regexStream); - - XContentParser.Token token = yamlParser.nextToken(); - - if (token == XContentParser.Token.START_OBJECT) { - token = yamlParser.nextToken(); - - for (; token != null; token = yamlParser.nextToken()) { - String currentName = yamlParser.currentName(); - if (token == XContentParser.Token.FIELD_NAME && patternListKeys.contains(currentName)) { - List> parserConfigurations = readParserConfigurations(yamlParser); - ArrayList subPatterns = new ArrayList<>(); - for (Map map : parserConfigurations) { - subPatterns.add(new DeviceTypeSubPattern(Pattern.compile((map.get("regex"))), map.get("replacement"))); + try ( + XContentParser yamlParser = XContentFactory.xContent(XContentType.YAML) + .createParser(XContentParserConfiguration.EMPTY, regexStream) + ) { + XContentParser.Token token = yamlParser.nextToken(); + if (token == XContentParser.Token.START_OBJECT) { + token = yamlParser.nextToken(); + + for (; token != null; token = yamlParser.nextToken()) { + String currentName = yamlParser.currentName(); + if (token == XContentParser.Token.FIELD_NAME && patternListKeys.contains(currentName)) { + List> parserConfigurations = readParserConfigurations(yamlParser); + ArrayList subPatterns = new ArrayList<>(); + for (Map map : parserConfigurations) { + subPatterns.add(new DeviceTypeSubPattern(Pattern.compile((map.get("regex"))), map.get("replacement"))); + } + deviceTypePatterns.put(currentName, subPatterns); } - deviceTypePatterns.put(currentName, subPatterns); } } } diff --git a/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentParser.java b/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentParser.java index 41ced0c7ff4cc..515c31735c313 100644 --- a/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentParser.java +++ b/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentParser.java @@ -48,59 +48,62 @@ final class UserAgentParser { private void init(InputStream regexStream) throws IOException { // EMPTY is safe here because we don't use namedObject - XContentParser yamlParser = XContentFactory.xContent(XContentType.YAML) - .createParser(XContentParserConfiguration.EMPTY, regexStream); + try ( + XContentParser yamlParser = XContentFactory.xContent(XContentType.YAML) + .createParser(XContentParserConfiguration.EMPTY, regexStream) + ) { - XContentParser.Token token = yamlParser.nextToken(); + XContentParser.Token token = yamlParser.nextToken(); - if (token == XContentParser.Token.START_OBJECT) { - token = yamlParser.nextToken(); + if (token == XContentParser.Token.START_OBJECT) { + token = yamlParser.nextToken(); - for (; token != null; token = yamlParser.nextToken()) { - if (token == XContentParser.Token.FIELD_NAME && yamlParser.currentName().equals("user_agent_parsers")) { - List> parserConfigurations = readParserConfigurations(yamlParser); - - for (Map map : parserConfigurations) { - uaPatterns.add( - new UserAgentSubpattern( - compilePattern(map.get("regex"), map.get("regex_flag")), - map.get("family_replacement"), - map.get("v1_replacement"), - map.get("v2_replacement"), - map.get("v3_replacement"), - map.get("v4_replacement") - ) - ); - } - } else if (token == XContentParser.Token.FIELD_NAME && yamlParser.currentName().equals("os_parsers")) { - List> parserConfigurations = readParserConfigurations(yamlParser); - - for (Map map : parserConfigurations) { - osPatterns.add( - new UserAgentSubpattern( - compilePattern(map.get("regex"), map.get("regex_flag")), - map.get("os_replacement"), - map.get("os_v1_replacement"), - map.get("os_v2_replacement"), - map.get("os_v3_replacement"), - map.get("os_v4_replacement") - ) - ); - } - } else if (token == XContentParser.Token.FIELD_NAME && yamlParser.currentName().equals("device_parsers")) { - List> parserConfigurations = readParserConfigurations(yamlParser); - - for (Map map : parserConfigurations) { - devicePatterns.add( - new UserAgentSubpattern( - compilePattern(map.get("regex"), map.get("regex_flag")), - map.get("device_replacement"), - null, - null, - null, - null - ) - ); + for (; token != null; token = yamlParser.nextToken()) { + if (token == XContentParser.Token.FIELD_NAME && yamlParser.currentName().equals("user_agent_parsers")) { + List> parserConfigurations = readParserConfigurations(yamlParser); + + for (Map map : parserConfigurations) { + uaPatterns.add( + new UserAgentSubpattern( + compilePattern(map.get("regex"), map.get("regex_flag")), + map.get("family_replacement"), + map.get("v1_replacement"), + map.get("v2_replacement"), + map.get("v3_replacement"), + map.get("v4_replacement") + ) + ); + } + } else if (token == XContentParser.Token.FIELD_NAME && yamlParser.currentName().equals("os_parsers")) { + List> parserConfigurations = readParserConfigurations(yamlParser); + + for (Map map : parserConfigurations) { + osPatterns.add( + new UserAgentSubpattern( + compilePattern(map.get("regex"), map.get("regex_flag")), + map.get("os_replacement"), + map.get("os_v1_replacement"), + map.get("os_v2_replacement"), + map.get("os_v3_replacement"), + map.get("os_v4_replacement") + ) + ); + } + } else if (token == XContentParser.Token.FIELD_NAME && yamlParser.currentName().equals("device_parsers")) { + List> parserConfigurations = readParserConfigurations(yamlParser); + + for (Map map : parserConfigurations) { + devicePatterns.add( + new UserAgentSubpattern( + compilePattern(map.get("regex"), map.get("regex_flag")), + map.get("device_replacement"), + null, + null, + null, + null + ) + ); + } } } } diff --git a/modules/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/DeviceTypeParserTests.java b/modules/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/DeviceTypeParserTests.java index 6543ef2095b87..582a40fb8a210 100644 --- a/modules/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/DeviceTypeParserTests.java +++ b/modules/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/DeviceTypeParserTests.java @@ -31,36 +31,39 @@ public class DeviceTypeParserTests extends ESTestCase { private static DeviceTypeParser deviceTypeParser; private ArrayList> readTestDevices(InputStream regexStream, String keyName) throws IOException { - XContentParser yamlParser = XContentFactory.xContent(XContentType.YAML) - .createParser(XContentParserConfiguration.EMPTY, regexStream); + try ( + XContentParser yamlParser = XContentFactory.xContent(XContentType.YAML) + .createParser(XContentParserConfiguration.EMPTY, regexStream) + ) { - XContentParser.Token token = yamlParser.nextToken(); + XContentParser.Token token = yamlParser.nextToken(); - ArrayList> testDevices = new ArrayList<>(); + ArrayList> testDevices = new ArrayList<>(); - if (token == XContentParser.Token.START_OBJECT) { - token = yamlParser.nextToken(); + if (token == XContentParser.Token.START_OBJECT) { + token = yamlParser.nextToken(); - for (; token != null; token = yamlParser.nextToken()) { - String currentName = yamlParser.currentName(); - if (token == XContentParser.Token.FIELD_NAME && currentName.equals(keyName)) { - List> parserConfigurations = readParserConfigurations(yamlParser); + for (; token != null; token = yamlParser.nextToken()) { + String currentName = yamlParser.currentName(); + if (token == XContentParser.Token.FIELD_NAME && currentName.equals(keyName)) { + List> parserConfigurations = readParserConfigurations(yamlParser); - for (Map map : parserConfigurations) { - HashMap testDevice = new HashMap<>(); + for (Map map : parserConfigurations) { + HashMap testDevice = new HashMap<>(); - testDevice.put("type", map.get("type")); - testDevice.put("os", map.get("os")); - testDevice.put("browser", map.get("browser")); - testDevice.put("device", map.get("device")); - testDevices.add(testDevice); + testDevice.put("type", map.get("type")); + testDevice.put("os", map.get("os")); + testDevice.put("browser", map.get("browser")); + testDevice.put("device", map.get("device")); + testDevices.add(testDevice); + } } } } - } - return testDevices; + return testDevices; + } } private static VersionedName getVersionName(String name) { diff --git a/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java b/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java index d9e346454aefe..0c3376c9c8a90 100644 --- a/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java +++ b/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java @@ -31,6 +31,7 @@ import java.util.Map; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.containsString; @@ -141,42 +142,43 @@ public void testBasic() throws Exception { search5.setScriptParams(params5); multiRequest.add(search5); - MultiSearchTemplateResponse response = client().execute(MustachePlugin.MULTI_SEARCH_TEMPLATE_ACTION, multiRequest).get(); - assertThat(response.getResponses(), arrayWithSize(5)); - assertThat(response.getTook().millis(), greaterThan(0L)); - - MultiSearchTemplateResponse.Item response1 = response.getResponses()[0]; - assertThat(response1.isFailure(), is(false)); - SearchTemplateResponse searchTemplateResponse1 = response1.getResponse(); - assertThat(searchTemplateResponse1.hasResponse(), is(true)); - assertHitCount(searchTemplateResponse1.getResponse(), (numDocs / 2) + (numDocs % 2)); - assertThat(searchTemplateResponse1.getSource().utf8ToString(), equalTo(""" - {"query":{"match":{"odd":"true"}}}""")); - - MultiSearchTemplateResponse.Item response2 = response.getResponses()[1]; - assertThat(response2.isFailure(), is(false)); - SearchTemplateResponse searchTemplateResponse2 = response2.getResponse(); - assertThat(searchTemplateResponse2.hasResponse(), is(false)); - assertThat(searchTemplateResponse2.getSource().utf8ToString(), equalTo(""" - {"query":{"match_phrase_prefix":{"message":"quick brown f"}}}""")); - - MultiSearchTemplateResponse.Item response3 = response.getResponses()[2]; - assertThat(response3.isFailure(), is(false)); - SearchTemplateResponse searchTemplateResponse3 = response3.getResponse(); - assertThat(searchTemplateResponse3.hasResponse(), is(true)); - assertHitCount(searchTemplateResponse3.getResponse(), (numDocs / 2)); - assertThat(searchTemplateResponse3.getSource().utf8ToString(), equalTo(""" - {"query":{"term":{"odd":"false"}}}""")); - - MultiSearchTemplateResponse.Item response4 = response.getResponses()[3]; - assertThat(response4.isFailure(), is(true)); - assertThat(response4.getFailure(), instanceOf(IndexNotFoundException.class)); - assertThat(response4.getFailure().getMessage(), equalTo("no such index [unknown]")); - - MultiSearchTemplateResponse.Item response5 = response.getResponses()[4]; - assertThat(response5.isFailure(), is(true)); - assertNull(response5.getResponse()); - assertThat(response5.getFailure(), instanceOf(XContentParseException.class)); + assertResponse(client().execute(MustachePlugin.MULTI_SEARCH_TEMPLATE_ACTION, multiRequest), response -> { + assertThat(response.getResponses(), arrayWithSize(5)); + assertThat(response.getTook().millis(), greaterThan(0L)); + + MultiSearchTemplateResponse.Item response1 = response.getResponses()[0]; + assertThat(response1.isFailure(), is(false)); + SearchTemplateResponse searchTemplateResponse1 = response1.getResponse(); + assertThat(searchTemplateResponse1.hasResponse(), is(true)); + assertHitCount(searchTemplateResponse1.getResponse(), (numDocs / 2) + (numDocs % 2)); + assertThat(searchTemplateResponse1.getSource().utf8ToString(), equalTo(""" + {"query":{"match":{"odd":"true"}}}""")); + + MultiSearchTemplateResponse.Item response2 = response.getResponses()[1]; + assertThat(response2.isFailure(), is(false)); + SearchTemplateResponse searchTemplateResponse2 = response2.getResponse(); + assertThat(searchTemplateResponse2.hasResponse(), is(false)); + assertThat(searchTemplateResponse2.getSource().utf8ToString(), equalTo(""" + {"query":{"match_phrase_prefix":{"message":"quick brown f"}}}""")); + + MultiSearchTemplateResponse.Item response3 = response.getResponses()[2]; + assertThat(response3.isFailure(), is(false)); + SearchTemplateResponse searchTemplateResponse3 = response3.getResponse(); + assertThat(searchTemplateResponse3.hasResponse(), is(true)); + assertHitCount(searchTemplateResponse3.getResponse(), (numDocs / 2)); + assertThat(searchTemplateResponse3.getSource().utf8ToString(), equalTo(""" + {"query":{"term":{"odd":"false"}}}""")); + + MultiSearchTemplateResponse.Item response4 = response.getResponses()[3]; + assertThat(response4.isFailure(), is(true)); + assertThat(response4.getFailure(), instanceOf(IndexNotFoundException.class)); + assertThat(response4.getFailure().getMessage(), equalTo("no such index [unknown]")); + + MultiSearchTemplateResponse.Item response5 = response.getResponses()[4]; + assertThat(response5.isFailure(), is(true)); + assertNull(response5.getResponse()); + assertThat(response5.getFailure(), instanceOf(XContentParseException.class)); + }); } /** @@ -193,21 +195,24 @@ public void testCCSCheckCompatibility() throws Exception { searchTemplateRequest.setRequest(new SearchRequest()); MultiSearchTemplateRequest request = new MultiSearchTemplateRequest(); request.add(searchTemplateRequest); - MultiSearchTemplateResponse multiSearchTemplateResponse = client().execute(MustachePlugin.MULTI_SEARCH_TEMPLATE_ACTION, request) - .get(); - Item response = multiSearchTemplateResponse.getResponses()[0]; - assertTrue(response.isFailure()); - Exception ex = response.getFailure(); - assertThat(ex.getMessage(), containsString("[class org.elasticsearch.action.search.SearchRequest] is not compatible with version")); - assertThat(ex.getMessage(), containsString("'search.check_ccs_compatibility' setting is enabled.")); - - String expectedCause = Strings.format( - "[fail_before_current_version] was released first in version %s, failed compatibility " - + "check trying to send it to node with version %s", - FailBeforeCurrentVersionQueryBuilder.FUTURE_VERSION, - TransportVersions.MINIMUM_CCS_VERSION - ); - String actualCause = ex.getCause().getMessage(); - assertEquals(expectedCause, actualCause); + assertResponse(client().execute(MustachePlugin.MULTI_SEARCH_TEMPLATE_ACTION, request), multiSearchTemplateResponse -> { + Item response = multiSearchTemplateResponse.getResponses()[0]; + assertTrue(response.isFailure()); + Exception ex = response.getFailure(); + assertThat( + ex.getMessage(), + containsString("[class org.elasticsearch.action.search.SearchRequest] is not compatible with version") + ); + assertThat(ex.getMessage(), containsString("'search.check_ccs_compatibility' setting is enabled.")); + + String expectedCause = Strings.format( + "[fail_before_current_version] was released first in version %s, failed compatibility " + + "check trying to send it to node with version %s", + FailBeforeCurrentVersionQueryBuilder.FUTURE_VERSION, + TransportVersions.MINIMUM_CCS_VERSION + ); + String actualCause = ex.getCause().getMessage(); + assertEquals(expectedCause, actualCause); + }); } } diff --git a/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchTemplateIT.java b/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchTemplateIT.java index 77480e6bc9e63..510ff01cf93f7 100644 --- a/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchTemplateIT.java +++ b/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchTemplateIT.java @@ -18,6 +18,7 @@ import org.elasticsearch.search.DummyQueryParserPlugin; import org.elasticsearch.search.SearchService; import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.elasticsearch.xcontent.XContentParseException; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonXContent; @@ -32,7 +33,7 @@ import java.util.concurrent.ExecutionException; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -77,13 +78,13 @@ public void testSearchRequestFail() throws Exception { .get() ); - SearchTemplateResponse searchResponse = new SearchTemplateRequestBuilder(client()).setRequest(searchRequest) - .setScript(query) - .setScriptType(ScriptType.INLINE) - .setScriptParams(Collections.singletonMap("my_size", 1)) - .get(); - - assertThat(searchResponse.getResponse().getHits().getHits().length, equalTo(1)); + assertResponse( + new SearchTemplateRequestBuilder(client()).setRequest(searchRequest) + .setScript(query) + .setScriptType(ScriptType.INLINE) + .setScriptParams(Collections.singletonMap("my_size", 1)), + searchResponse -> assertThat(searchResponse.getResponse().getHits().getHits().length, equalTo(1)) + ); } /** @@ -101,8 +102,10 @@ public void testTemplateQueryAsEscapedString() throws Exception { }"""; SearchTemplateRequest request = SearchTemplateRequest.fromXContent(createParser(JsonXContent.jsonXContent, query)); request.setRequest(searchRequest); - SearchTemplateResponse searchResponse = client().execute(MustachePlugin.SEARCH_TEMPLATE_ACTION, request).get(); - assertThat(searchResponse.getResponse().getHits().getHits().length, equalTo(1)); + assertResponse( + client().execute(MustachePlugin.SEARCH_TEMPLATE_ACTION, request), + searchResponse -> assertThat(searchResponse.getResponse().getHits().getHits().length, equalTo(1)) + ); } /** @@ -122,8 +125,10 @@ public void testTemplateQueryAsEscapedStringStartingWithConditionalClause() thro }"""; SearchTemplateRequest request = SearchTemplateRequest.fromXContent(createParser(JsonXContent.jsonXContent, templateString)); request.setRequest(searchRequest); - SearchTemplateResponse searchResponse = client().execute(MustachePlugin.SEARCH_TEMPLATE_ACTION, request).get(); - assertThat(searchResponse.getResponse().getHits().getHits().length, equalTo(1)); + assertResponse( + client().execute(MustachePlugin.SEARCH_TEMPLATE_ACTION, request), + searchResponse -> assertThat(searchResponse.getResponse().getHits().getHits().length, equalTo(1)) + ); } /** @@ -143,8 +148,10 @@ public void testTemplateQueryAsEscapedStringWithConditionalClauseAtEnd() throws }"""; SearchTemplateRequest request = SearchTemplateRequest.fromXContent(createParser(JsonXContent.jsonXContent, templateString)); request.setRequest(searchRequest); - SearchTemplateResponse searchResponse = client().execute(MustachePlugin.SEARCH_TEMPLATE_ACTION, request).get(); - assertThat(searchResponse.getResponse().getHits().getHits().length, equalTo(1)); + assertResponse( + client().execute(MustachePlugin.SEARCH_TEMPLATE_ACTION, request), + searchResponse -> assertThat(searchResponse.getResponse().getHits().getHits().length, equalTo(1)) + ); } public void testIndexedTemplateClient() throws Exception { @@ -177,12 +184,13 @@ public void testIndexedTemplateClient() throws Exception { Map templateParams = new HashMap<>(); templateParams.put("fieldParam", "foo"); - SearchTemplateResponse searchResponse = new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest("test")) - .setScript("testTemplate") - .setScriptType(ScriptType.STORED) - .setScriptParams(templateParams) - .get(); - assertHitCount(searchResponse.getResponse(), 4); + assertHitCount( + new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest("test")) + .setScript("testTemplate") + .setScriptType(ScriptType.STORED) + .setScriptParams(templateParams), + 4 + ); assertAcked(clusterAdmin().prepareDeleteStoredScript("testTemplate")); @@ -273,13 +281,13 @@ public void testIndexedTemplate() throws Exception { Map templateParams = new HashMap<>(); templateParams.put("fieldParam", "foo"); - - SearchTemplateResponse searchResponse = new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest().indices("test")) - .setScript("1a") - .setScriptType(ScriptType.STORED) - .setScriptParams(templateParams) - .get(); - assertHitCount(searchResponse.getResponse(), 4); + assertHitCount( + new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest().indices("test")) + .setScript("1a") + .setScriptType(ScriptType.STORED) + .setScriptParams(templateParams), + 4 + ); expectThrows( ResourceNotFoundException.class, @@ -291,12 +299,13 @@ public void testIndexedTemplate() throws Exception { ); templateParams.put("fieldParam", "bar"); - searchResponse = new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest("test")) - .setScript("2") - .setScriptType(ScriptType.STORED) - .setScriptParams(templateParams) - .get(); - assertHitCount(searchResponse.getResponse(), 1); + assertHitCount( + new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest("test")) + .setScript("2") + .setScriptType(ScriptType.STORED) + .setScriptParams(templateParams), + 1 + ); } // Relates to #10397 @@ -352,13 +361,14 @@ public void testIndexedTemplateOverwrite() throws Exception { .setId("git01") .setContent(new BytesArray(query.replace("{{slop}}", Integer.toString(0))), XContentType.JSON) ); - - SearchTemplateResponse searchResponse = new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest("testindex")) - .setScript("git01") - .setScriptType(ScriptType.STORED) - .setScriptParams(templateParams) - .get(); - assertHitCount(searchResponse.getResponse(), 1); + assertHitCount( + new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest("testindex")) + .setScript("git01") + .setScript("git01") + .setScriptType(ScriptType.STORED) + .setScriptParams(templateParams), + 1 + ); } } @@ -394,12 +404,13 @@ public void testIndexedTemplateWithArray() throws Exception { String[] fieldParams = { "foo", "bar" }; arrayTemplateParams.put("fieldParam", fieldParams); - SearchTemplateResponse searchResponse = new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest("test")) - .setScript("4") - .setScriptType(ScriptType.STORED) - .setScriptParams(arrayTemplateParams) - .get(); - assertHitCount(searchResponse.getResponse(), 5); + assertHitCount( + new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest("test")) + .setScript("4") + .setScriptType(ScriptType.STORED) + .setScriptParams(arrayTemplateParams), + 5 + ); } /** @@ -435,4 +446,8 @@ public void testCCSCheckCompatibility() throws Exception { String actualCause = underlying.getMessage().replaceAll("\\d{7,}", "XXXXXXX"); assertEquals(expectedCause, actualCause); } + + public static void assertHitCount(SearchTemplateRequestBuilder requestBuilder, long expectedHitCount) { + assertResponse(requestBuilder, response -> ElasticsearchAssertions.assertHitCount(response.getResponse(), expectedHitCount)); + } } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java index a26352eb3d8c7..9bdabcede8ec6 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java @@ -18,8 +18,11 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.transport.LeakTracker; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; @@ -99,6 +102,20 @@ public String toString() { private final Item[] items; private final long tookInMillis; + private final RefCounted refCounted = LeakTracker.wrap(new AbstractRefCounted() { + @Override + protected void closeInternal() { + for (int i = 0; i < items.length; i++) { + Item item = items[i]; + var r = item.response; + if (r != null) { + r.decRef(); + items[i] = null; + } + } + } + }); + MultiSearchTemplateResponse(StreamInput in) throws IOException { super(in); items = in.readArray(Item::new, Item[]::new); @@ -162,6 +179,26 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par return builder; } + @Override + public void incRef() { + refCounted.incRef(); + } + + @Override + public boolean tryIncRef() { + return refCounted.tryIncRef(); + } + + @Override + public boolean decRef() { + return refCounted.decRef(); + } + + @Override + public boolean hasReferences() { + return refCounted.hasReferences(); + } + static final class Fields { static final String RESPONSES = "responses"; static final String STATUS = "status"; @@ -179,6 +216,7 @@ public static MultiSearchTemplateResponse fromXContext(XContentParser parser) { if (item.getResponse() != null) { stResponse = new SearchTemplateResponse(); stResponse.setResponse(item.getResponse()); + item.getResponse().incRef(); } templateResponses[i++] = new Item(stResponse, item.getFailure()); } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequest.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequest.java index 6cc7f47511cb1..8145c586c402d 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequest.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequest.java @@ -54,7 +54,7 @@ public SearchTemplateRequest(StreamInput in) throws IOException { scriptType = ScriptType.readFrom(in); script = in.readOptionalString(); if (in.readBoolean()) { - scriptParams = in.readMap(); + scriptParams = in.readGenericMap(); } } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateResponse.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateResponse.java index b4b804bf22e92..34e771c51e4f4 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateResponse.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateResponse.java @@ -14,12 +14,17 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ChunkedToXContent; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.AbstractRefCounted; +import org.elasticsearch.core.RefCounted; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.transport.LeakTracker; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import java.io.IOException; @@ -35,6 +40,15 @@ public class SearchTemplateResponse extends ActionResponse implements ToXContent /** Contains the search response, if any **/ private SearchResponse response; + private final RefCounted refCounted = LeakTracker.wrap(new AbstractRefCounted() { + @Override + protected void closeInternal() { + if (response != null) { + response.decRef(); + } + } + }); + SearchTemplateResponse() {} SearchTemplateResponse(StreamInput in) throws IOException { @@ -74,6 +88,26 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(response); } + @Override + public void incRef() { + refCounted.incRef(); + } + + @Override + public boolean tryIncRef() { + return refCounted.tryIncRef(); + } + + @Override + public boolean decRef() { + return refCounted.decRef(); + } + + @Override + public boolean hasReferences() { + return refCounted.hasReferences(); + } + public static SearchTemplateResponse fromXContent(XContentParser parser) throws IOException { SearchTemplateResponse searchTemplateResponse = new SearchTemplateResponse(); Map contentAsMap = parser.map(); @@ -85,11 +119,16 @@ public static SearchTemplateResponse fromXContent(XContentParser parser) throws } else { XContentType contentType = parser.contentType(); XContentBuilder builder = XContentFactory.contentBuilder(contentType).map(contentAsMap); - XContentParser searchResponseParser = contentType.xContent() - .createParser(parser.getXContentRegistry(), parser.getDeprecationHandler(), BytesReference.bytes(builder).streamInput()); - - SearchResponse searchResponse = SearchResponse.fromXContent(searchResponseParser); - searchTemplateResponse.setResponse(searchResponse); + try ( + XContentParser searchResponseParser = XContentHelper.createParserNotCompressed( + XContentParserConfiguration.EMPTY.withRegistry(parser.getXContentRegistry()) + .withDeprecationHandler(parser.getDeprecationHandler()), + BytesReference.bytes(builder), + contentType + ) + ) { + searchTemplateResponse.setResponse(SearchResponse.fromXContent(searchResponseParser)); + } } return searchTemplateResponse; } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportMultiSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportMultiSearchTemplateAction.java index 4b0c365ba8b13..11871978e433a 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportMultiSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportMultiSearchTemplateAction.java @@ -80,6 +80,7 @@ protected void doExecute(Task task, MultiSearchTemplateRequest request, ActionLi try { searchRequest = convert(searchTemplateRequest, searchTemplateResponse, scriptService, xContentRegistry, searchUsageHolder); } catch (Exception e) { + searchTemplateResponse.decRef(); items[i] = new MultiSearchTemplateResponse.Item(null, e); if (ExceptionsHelper.status(e).getStatus() >= 500 && ExceptionsHelper.isNodeOrShardUnavailableTypeException(e) == false) { logger.warn("MultiSearchTemplate convert failure", e); @@ -98,12 +99,17 @@ protected void doExecute(Task task, MultiSearchTemplateRequest request, ActionLi MultiSearchResponse.Item item = r.getResponses()[i]; int originalSlot = originalSlots.get(i); if (item.isFailure()) { + var existing = items[originalSlot]; + if (existing.getResponse() != null) { + existing.getResponse().decRef(); + } items[originalSlot] = new MultiSearchTemplateResponse.Item(null, item.getFailure()); } else { items[originalSlot].getResponse().setResponse(item.getResponse()); + item.getResponse().incRef(); } } - l.onResponse(new MultiSearchTemplateResponse(items, r.getTook().millis())); + ActionListener.respondAndRelease(l, new MultiSearchTemplateResponse(items, r.getTook().millis())); })); } } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportSearchTemplateAction.java index 2b315f48dcce4..c6bd2afc64d21 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportSearchTemplateAction.java @@ -71,18 +71,29 @@ public TransportSearchTemplateAction( @Override protected void doExecute(Task task, SearchTemplateRequest request, ActionListener listener) { final SearchTemplateResponse response = new SearchTemplateResponse(); + boolean success = false; try { SearchRequest searchRequest = convert(request, response, scriptService, xContentRegistry, searchUsageHolder); if (searchRequest != null) { - client.search(searchRequest, listener.delegateFailureAndWrap((l, searchResponse) -> { + client.search(searchRequest, listener.delegateResponse((l, e) -> { + response.decRef(); + l.onFailure(e); + }).delegateFailureAndWrap((l, searchResponse) -> { response.setResponse(searchResponse); - l.onResponse(response); + searchResponse.incRef(); + ActionListener.respondAndRelease(l, response); })); + success = true; } else { - listener.onResponse(response); + success = true; + ActionListener.respondAndRelease(listener, response); } } catch (IOException e) { listener.onFailure(e); + } finally { + if (success == false) { + response.decRef(); + } } } diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponseTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponseTests.java index 3db0d12216e54..03f2fbd3e81a7 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponseTests.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponseTests.java @@ -11,7 +11,8 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.Strings; -import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.core.RefCounted; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.test.AbstractXContentTestCase; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentParser; @@ -38,11 +39,9 @@ protected MultiSearchTemplateResponse createTestInstance() { int totalShards = randomIntBetween(1, Integer.MAX_VALUE); int successfulShards = randomIntBetween(0, totalShards); int skippedShards = totalShards - successfulShards; - InternalSearchResponse internalSearchResponse = InternalSearchResponse.EMPTY_WITH_TOTAL_HITS; SearchResponse.Clusters clusters = randomClusters(); SearchTemplateResponse searchTemplateResponse = new SearchTemplateResponse(); - SearchResponse searchResponse = new SearchResponse( - internalSearchResponse, + SearchResponse searchResponse = SearchResponseUtils.emptyWithTotalHits( null, totalShards, successfulShards, @@ -75,11 +74,9 @@ private static MultiSearchTemplateResponse createTestInstanceWithFailures() { int totalShards = randomIntBetween(1, Integer.MAX_VALUE); int successfulShards = randomIntBetween(0, totalShards); int skippedShards = totalShards - successfulShards; - InternalSearchResponse internalSearchResponse = InternalSearchResponse.EMPTY_WITH_TOTAL_HITS; SearchResponse.Clusters clusters = randomClusters(); SearchTemplateResponse searchTemplateResponse = new SearchTemplateResponse(); - SearchResponse searchResponse = new SearchResponse( - internalSearchResponse, + SearchResponse searchResponse = SearchResponseUtils.emptyWithTotalHits( null, totalShards, successfulShards, @@ -150,7 +147,13 @@ public void testFromXContentWithFailures() throws IOException { this::doParseInstance, this::assertEqualInstances, assertToXContentEquivalence, - ToXContent.EMPTY_PARAMS + ToXContent.EMPTY_PARAMS, + RefCounted::decRef ); } + + @Override + protected void dispose(MultiSearchTemplateResponse instance) { + instance.decRef(); + } } diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateResponseTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateResponseTests.java index d3f23d3f4a21c..73c8887669a02 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateResponseTests.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateResponseTests.java @@ -14,7 +14,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; -import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.test.AbstractXContentTestCase; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -54,10 +54,8 @@ private static SearchResponse createSearchResponse() { int totalShards = randomIntBetween(1, Integer.MAX_VALUE); int successfulShards = randomIntBetween(0, totalShards); int skippedShards = randomIntBetween(0, totalShards); - InternalSearchResponse internalSearchResponse = InternalSearchResponse.EMPTY_WITH_TOTAL_HITS; - return new SearchResponse( - internalSearchResponse, + return SearchResponseUtils.emptyWithTotalHits( null, totalShards, successfulShards, @@ -127,33 +125,36 @@ protected boolean supportsUnknownFields() { public void testSourceToXContent() throws IOException { SearchTemplateResponse response = new SearchTemplateResponse(); + try { + XContentBuilder source = XContentFactory.jsonBuilder() + .startObject() + .startObject("query") + .startObject("terms") + .field("status", new String[] { "pending", "published" }) + .endObject() + .endObject() + .endObject(); + response.setSource(BytesReference.bytes(source)); + + XContentType contentType = randomFrom(XContentType.values()); + XContentBuilder expectedResponse = XContentFactory.contentBuilder(contentType) + .startObject() + .startObject("template_output") + .startObject("query") + .startObject("terms") + .field("status", new String[] { "pending", "published" }) + .endObject() + .endObject() + .endObject() + .endObject(); - XContentBuilder source = XContentFactory.jsonBuilder() - .startObject() - .startObject("query") - .startObject("terms") - .field("status", new String[] { "pending", "published" }) - .endObject() - .endObject() - .endObject(); - response.setSource(BytesReference.bytes(source)); - - XContentType contentType = randomFrom(XContentType.values()); - XContentBuilder expectedResponse = XContentFactory.contentBuilder(contentType) - .startObject() - .startObject("template_output") - .startObject("query") - .startObject("terms") - .field("status", new String[] { "pending", "published" }) - .endObject() - .endObject() - .endObject() - .endObject(); - - XContentBuilder actualResponse = XContentFactory.contentBuilder(contentType); - response.toXContent(actualResponse, ToXContent.EMPTY_PARAMS); - - assertToXContentEquivalent(BytesReference.bytes(expectedResponse), BytesReference.bytes(actualResponse), contentType); + XContentBuilder actualResponse = XContentFactory.contentBuilder(contentType); + response.toXContent(actualResponse, ToXContent.EMPTY_PARAMS); + + assertToXContentEquivalent(BytesReference.bytes(expectedResponse), BytesReference.bytes(actualResponse), contentType); + } finally { + response.decRef(); + } } public void testSearchResponseToXContent() throws IOException { @@ -161,17 +162,14 @@ public void testSearchResponseToXContent() throws IOException { hit.score(2.0f); SearchHit[] hits = new SearchHit[] { hit }; - InternalSearchResponse internalSearchResponse = new InternalSearchResponse( + SearchResponse searchResponse = new SearchResponse( new SearchHits(hits, new TotalHits(100, TotalHits.Relation.EQUAL_TO), 1.5f), null, null, - null, false, null, - 1 - ); - SearchResponse searchResponse = new SearchResponse( - internalSearchResponse, + null, + 1, null, 0, 0, @@ -182,37 +180,46 @@ public void testSearchResponseToXContent() throws IOException { ); SearchTemplateResponse response = new SearchTemplateResponse(); - response.setResponse(searchResponse); - - XContentType contentType = randomFrom(XContentType.values()); - XContentBuilder expectedResponse = XContentFactory.contentBuilder(contentType) - .startObject() - .field("took", 0) - .field("timed_out", false) - .startObject("_shards") - .field("total", 0) - .field("successful", 0) - .field("skipped", 0) - .field("failed", 0) - .endObject() - .startObject("hits") - .startObject("total") - .field("value", 100) - .field("relation", "eq") - .endObject() - .field("max_score", 1.5F) - .startArray("hits") - .startObject() - .field("_id", "id") - .field("_score", 2.0F) - .endObject() - .endArray() - .endObject() - .endObject(); - - XContentBuilder actualResponse = XContentFactory.contentBuilder(contentType); - response.toXContent(actualResponse, ToXContent.EMPTY_PARAMS); - - assertToXContentEquivalent(BytesReference.bytes(expectedResponse), BytesReference.bytes(actualResponse), contentType); + try { + response.setResponse(searchResponse); + + XContentType contentType = randomFrom(XContentType.values()); + XContentBuilder expectedResponse = XContentFactory.contentBuilder(contentType) + .startObject() + .field("took", 0) + .field("timed_out", false) + .startObject("_shards") + .field("total", 0) + .field("successful", 0) + .field("skipped", 0) + .field("failed", 0) + .endObject() + .startObject("hits") + .startObject("total") + .field("value", 100) + .field("relation", "eq") + .endObject() + .field("max_score", 1.5F) + .startArray("hits") + .startObject() + .field("_id", "id") + .field("_score", 2.0F) + .endObject() + .endArray() + .endObject() + .endObject(); + + XContentBuilder actualResponse = XContentFactory.contentBuilder(contentType); + response.toXContent(actualResponse, ToXContent.EMPTY_PARAMS); + + assertToXContentEquivalent(BytesReference.bytes(expectedResponse), BytesReference.bytes(actualResponse), contentType); + } finally { + response.decRef(); + } + } + + @Override + protected void dispose(SearchTemplateResponse instance) { + instance.decRef(); } } diff --git a/modules/lang-painless/src/doc/java/org/elasticsearch/painless/ContextGeneratorCommon.java b/modules/lang-painless/src/doc/java/org/elasticsearch/painless/ContextGeneratorCommon.java index c54214e5f854d..b8390f6aab75c 100644 --- a/modules/lang-painless/src/doc/java/org/elasticsearch/painless/ContextGeneratorCommon.java +++ b/modules/lang-painless/src/doc/java/org/elasticsearch/painless/ContextGeneratorCommon.java @@ -34,15 +34,21 @@ public class ContextGeneratorCommon { @SuppressForbidden(reason = "retrieving data from an internal API not exposed as part of the REST client") + @SuppressWarnings("unchecked") public static List getContextInfos() throws IOException { URLConnection getContextNames = new URL("http://" + System.getProperty("cluster.uri") + "/_scripts/painless/_context") .openConnection(); - XContentParser parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, getContextNames.getInputStream()); - parser.nextToken(); - parser.nextToken(); - @SuppressWarnings("unchecked") - List contextNames = (List) (Object) parser.list(); - parser.close(); + List contextNames; + try ( + XContentParser parser = JsonXContent.jsonXContent.createParser( + XContentParserConfiguration.EMPTY, + getContextNames.getInputStream() + ) + ) { + parser.nextToken(); + parser.nextToken(); + contextNames = (List) (Object) parser.list(); + } ((HttpURLConnection) getContextNames).disconnect(); List contextInfos = new ArrayList<>(); @@ -51,9 +57,10 @@ public static List getContextInfos() throws IOException { URLConnection getContextInfo = new URL( "http://" + System.getProperty("cluster.uri") + "/_scripts/painless/_context?context=" + contextName ).openConnection(); - parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, getContextInfo.getInputStream()); - contextInfos.add(PainlessContextInfo.fromXContent(parser)); - ((HttpURLConnection) getContextInfo).disconnect(); + try (var parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, getContextInfo.getInputStream())) { + contextInfos.add(PainlessContextInfo.fromXContent(parser)); + ((HttpURLConnection) getContextInfo).disconnect(); + } } contextInfos.sort(Comparator.comparing(PainlessContextInfo::getName)); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/api/Json.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/api/Json.java index b2993d6169336..f121894cf4dc5 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/api/Json.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/api/Json.java @@ -20,16 +20,16 @@ public class Json { * Load a string as the Java version of a JSON type, either List (JSON array), Map (JSON object), Number, Boolean or String */ public static Object load(String json) throws IOException { - XContentParser parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, json); - - return switch (parser.nextToken()) { - case START_ARRAY -> parser.list(); - case START_OBJECT -> parser.map(); - case VALUE_NUMBER -> parser.numberValue(); - case VALUE_BOOLEAN -> parser.booleanValue(); - case VALUE_STRING -> parser.text(); - default -> null; - }; + try (XContentParser parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, json)) { + return switch (parser.nextToken()) { + case START_ARRAY -> parser.list(); + case START_OBJECT -> parser.map(); + case VALUE_NUMBER -> parser.numberValue(); + case VALUE_BOOLEAN -> parser.booleanValue(); + case VALUE_STRING -> parser.text(); + default -> null; + }; + } } /** diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/SimilarityScriptTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/SimilarityScriptTests.java index 8437d78962c0c..6ad7622b35cac 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/SimilarityScriptTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/SimilarityScriptTests.java @@ -54,7 +54,7 @@ public void testBasics() throws IOException { SimilarityScript.CONTEXT, Collections.emptyMap() ); - ScriptedSimilarity sim = new ScriptedSimilarity("foobar", null, "foobaz", factory::newInstance, true); + ScriptedSimilarity sim = new ScriptedSimilarity("foobar", null, "foobaz", factory, true); try (Directory dir = new ByteBuffersDirectory()) { IndexWriter w = new IndexWriter(dir, newIndexWriterConfig().setSimilarity(sim)); @@ -103,7 +103,7 @@ public void testWeightScript() throws IOException { SimilarityScript.CONTEXT, Collections.emptyMap() ); - ScriptedSimilarity sim = new ScriptedSimilarity("foobar", weightFactory::newInstance, "foobaz", factory::newInstance, true); + ScriptedSimilarity sim = new ScriptedSimilarity("foobar", weightFactory, "foobaz", factory, true); try (Directory dir = new ByteBuffersDirectory()) { IndexWriter w = new IndexWriter(dir, newIndexWriterConfig().setSimilarity(sim)); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/action/PainlessExecuteRequestTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/action/PainlessExecuteRequestTests.java index 7505f75293a3a..0126be744bbd9 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/action/PainlessExecuteRequestTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/action/PainlessExecuteRequestTests.java @@ -49,7 +49,7 @@ public final void testFromXContent() throws Exception { try (XContentBuilder builder = XContentBuilder.builder(xContent)) { builder.value(testInstance); - try (XContentParser parser = createParser(xContent, BytesReference.bytes(builder).streamInput())) { + try (XContentParser parser = createParser(xContent, BytesReference.bytes(builder))) { PainlessExecuteAction.Request result = PainlessExecuteAction.Request.parse(parser); assertThat(result, equalTo(testInstance)); } diff --git a/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/parsers/ShapeParser.java b/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/parsers/ShapeParser.java index 06fef4e18c973..1b22c24bb57c1 100644 --- a/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/parsers/ShapeParser.java +++ b/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/parsers/ShapeParser.java @@ -8,18 +8,14 @@ package org.elasticsearch.legacygeo.parsers; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.index.mapper.AbstractGeometryFieldMapper; import org.elasticsearch.index.mapper.AbstractShapeGeometryFieldMapper; import org.elasticsearch.legacygeo.builders.ShapeBuilder; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContent; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.support.MapXContentParser; import java.io.IOException; -import java.util.Collections; /** * first point of entry for a shape parser @@ -70,19 +66,4 @@ public interface ShapeParser { return parse(parser, null); } - static ShapeBuilder parse(Object value) throws IOException { - try ( - XContentParser parser = new MapXContentParser( - NamedXContentRegistry.EMPTY, - LoggingDeprecationHandler.INSTANCE, - Collections.singletonMap("value", value), - null - ) - ) { - parser.nextToken(); // start object - parser.nextToken(); // field name - parser.nextToken(); // field value - return parse(parser); - } - } } diff --git a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoJsonShapeParserTests.java b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoJsonShapeParserTests.java index 5ef5eb6c0b5b8..4fa1d7b7a3108 100644 --- a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoJsonShapeParserTests.java +++ b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoJsonShapeParserTests.java @@ -181,34 +181,37 @@ public void testParseMultiDimensionShapes() throws IOException { .endArray() .endObject(); - XContentParser parser = createParser(pointGeoJson); - parser.nextToken(); - ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class); - assertNull(parser.nextToken()); + XContentBuilder lineGeoJson; + try (XContentParser parser = createParser(pointGeoJson)) { + parser.nextToken(); + ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class); + assertNull(parser.nextToken()); - // multi dimension linestring - XContentBuilder lineGeoJson = XContentFactory.jsonBuilder() - .startObject() - .field("type", "LineString") - .startArray("coordinates") - .startArray() - .value(100.0) - .value(0.0) - .value(15.0) - .endArray() - .startArray() - .value(101.0) - .value(1.0) - .value(18.0) - .value(19.0) - .endArray() - .endArray() - .endObject(); + // multi dimension linestring + lineGeoJson = XContentFactory.jsonBuilder() + .startObject() + .field("type", "LineString") + .startArray("coordinates") + .startArray() + .value(100.0) + .value(0.0) + .value(15.0) + .endArray() + .startArray() + .value(101.0) + .value(1.0) + .value(18.0) + .value(19.0) + .endArray() + .endArray() + .endObject(); + } - parser = createParser(lineGeoJson); - parser.nextToken(); - ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class); - assertNull(parser.nextToken()); + try (var parser = createParser(lineGeoJson)) { + parser.nextToken(); + ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class); + assertNull(parser.nextToken()); + } } @Override diff --git a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/search/LegacyGeoShapeQueryTests.java b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/search/LegacyGeoShapeQueryTests.java index 6ad4d2c06c6d4..4e06a37ec7f20 100644 --- a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/search/LegacyGeoShapeQueryTests.java +++ b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/search/LegacyGeoShapeQueryTests.java @@ -99,7 +99,7 @@ public void testPointsOnlyExplicit() throws Exception { .get(); // test that point was inserted - assertHitCount(client().prepareSearch("geo_points_only").setQuery(matchAllQuery()).get(), 2L); + assertHitCount(client().prepareSearch("geo_points_only").setQuery(matchAllQuery()), 2L); } public void testPointsOnly() throws Exception { diff --git a/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/MatchOnlyTextMapperIT.java b/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/MatchOnlyTextMapperIT.java index adca59bafcb36..c387ff2b2134b 100644 --- a/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/MatchOnlyTextMapperIT.java +++ b/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/MatchOnlyTextMapperIT.java @@ -55,7 +55,7 @@ public void testHighlightingWithMatchOnlyTextFieldMatchPhrase() throws IOExcepti .startObject() .field( "message", - "[.ds-.slm-history-6-2023.09.20-" + "[.ds-.slm-history-7-2023.09.20-" + randomInt() + "][0] marking and sending shard failed due to [failed recovery]" ) @@ -104,7 +104,7 @@ public void testHighlightingWithMatchOnlyTextFieldSyntheticSource() throws IOExc .startObject() .field( "message", - "[.ds-.slm-history-6-2023.09.20-" + "[.ds-.slm-history-7-2023.09.20-" + randomInt() + "][0] marking and sending shard failed due to [failed recovery]" ) diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java index 161cb1674a7b9..fa83e2600de9b 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java @@ -326,7 +326,10 @@ public BlockLoader blockLoader(BlockLoaderContext blContext) { if (textFieldType.isSyntheticSource()) { return new BlockStoredFieldsReader.BytesFromStringsBlockLoader(storedFieldNameForSyntheticSource()); } - return new BlockSourceReader.BytesRefsBlockLoader(SourceValueFetcher.toString(blContext.sourcePaths(name()))); + SourceValueFetcher fetcher = SourceValueFetcher.toString(blContext.sourcePaths(name())); + // MatchOnlyText never has norms, so we have to use the field names field + BlockSourceReader.LeafIteratorLookup lookup = BlockSourceReader.lookupFromFieldNames(blContext.fieldNames(), name()); + return new BlockSourceReader.BytesRefsBlockLoader(fetcher, lookup); } @Override diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java index b35fb09c2d053..e6c77b7b50c09 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java @@ -316,7 +316,11 @@ public BlockLoader blockLoader(BlockLoaderContext blContext) { double scalingFactorInverse = 1d / scalingFactor; return new BlockDocValuesReader.DoublesBlockLoader(name(), l -> l * scalingFactorInverse); } - return new BlockSourceReader.DoublesBlockLoader(sourceValueFetcher(blContext.sourcePaths(name()))); + ValueFetcher valueFetcher = sourceValueFetcher(blContext.sourcePaths(name())); + BlockSourceReader.LeafIteratorLookup lookup = isStored() || isIndexed() + ? BlockSourceReader.lookupFromFieldNames(blContext.fieldNames(), name()) + : BlockSourceReader.lookupMatchingAll(); + return new BlockSourceReader.DoublesBlockLoader(valueFetcher, lookup); } @Override diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapperTests.java index e1fbc2e149441..abd2a4c8fa622 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapperTests.java @@ -265,7 +265,7 @@ protected IngestScriptSupport ingestScriptSupport() { } @Override - protected Function loadBlockExpected() { + protected Function loadBlockExpected(MapperService mapper, String loaderFieldName) { return v -> ((BytesRef) v).utf8ToString(); } } diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapperTests.java index 665e9289c3c7d..13927962e5d58 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapperTests.java @@ -438,7 +438,7 @@ public List invalidExample() throws IOException { } @Override - protected Function loadBlockExpected() { + protected Function loadBlockExpected(MapperService mapper, String loaderFieldName) { return v -> (Number) v; } diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchTests.java index a924c0e323f96..a4c3ce6e7a3f7 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchTests.java @@ -9,7 +9,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.join.ScoreMode; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -50,6 +49,7 @@ import static org.elasticsearch.index.query.QueryBuilders.scriptQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHitsWithoutFailures; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; @@ -407,25 +407,26 @@ public void testPercolateNamedQueries() { """); QueryBuilder query = new PercolateQueryBuilder("my_query", List.of(house1_doc, house2_doc), XContentType.JSON); - SearchResponse response = client().prepareSearch("houses").setQuery(query).get(); - assertEquals(2, response.getHits().getTotalHits().value); + assertResponse(client().prepareSearch("houses").setQuery(query), response -> { + assertEquals(2, response.getHits().getTotalHits().value); - SearchHit[] hits = response.getHits().getHits(); - assertThat(hits[0].getFields().get("_percolator_document_slot").getValues(), equalTo(Arrays.asList(0, 1))); - assertThat( - hits[0].getFields().get("_percolator_document_slot_0_matched_queries").getValues(), - equalTo(Arrays.asList("fireplace_query", "detached_query", "3_bedrooms_query")) - ); - assertThat( - hits[0].getFields().get("_percolator_document_slot_1_matched_queries").getValues(), - equalTo(Arrays.asList("fireplace_query", "3_bedrooms_query")) - ); + SearchHit[] hits = response.getHits().getHits(); + assertThat(hits[0].getFields().get("_percolator_document_slot").getValues(), equalTo(Arrays.asList(0, 1))); + assertThat( + hits[0].getFields().get("_percolator_document_slot_0_matched_queries").getValues(), + equalTo(Arrays.asList("fireplace_query", "detached_query", "3_bedrooms_query")) + ); + assertThat( + hits[0].getFields().get("_percolator_document_slot_1_matched_queries").getValues(), + equalTo(Arrays.asList("fireplace_query", "3_bedrooms_query")) + ); - assertThat(hits[1].getFields().get("_percolator_document_slot").getValues(), equalTo(Arrays.asList(0))); - assertThat( - hits[1].getFields().get("_percolator_document_slot_0_matched_queries").getValues(), - equalTo(Arrays.asList("swimming_pool_query", "3_bedrooms_query")) - ); + assertThat(hits[1].getFields().get("_percolator_document_slot").getValues(), equalTo(Arrays.asList(0))); + assertThat( + hits[1].getFields().get("_percolator_document_slot_0_matched_queries").getValues(), + equalTo(Arrays.asList("swimming_pool_query", "3_bedrooms_query")) + ); + }); } } diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedRequest.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedRequest.java index cf97c78bb4d64..934b89fb3f668 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedRequest.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedRequest.java @@ -179,7 +179,7 @@ static void validateEvaluatedQuery(SearchSourceBuilder evaluationRequest) { for (int i = 0; i < intentSize; i++) { ratedDocs.add(new RatedDocument(in)); } - this.params = in.readMap(); + this.params = in.readGenericMap(); int summaryFieldsSize = in.readInt(); summaryFields = new ArrayList<>(summaryFieldsSize); for (int i = 0; i < summaryFieldsSize; i++) { diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedSearchHit.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedSearchHit.java index ffaa333dc74b7..d58c15d4efd74 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedSearchHit.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedSearchHit.java @@ -38,7 +38,7 @@ public RatedSearchHit(SearchHit searchHit, OptionalInt rating) { } RatedSearchHit(StreamInput in) throws IOException { - this(new SearchHit(in), in.readBoolean() ? OptionalInt.of(in.readVInt()) : OptionalInt.empty()); + this(SearchHit.readFrom(in), in.readBoolean() ? OptionalInt.of(in.readVInt()) : OptionalInt.empty()); } @Override diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java index d971786e477a6..2a45b8e9d8be4 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.ToXContent; @@ -208,15 +209,14 @@ public void testNoResults() throws Exception { } } } - SearchHit[] hits = new SearchHit[0]; DiscountedCumulativeGain dcg = new DiscountedCumulativeGain(); - EvalQueryQuality result = dcg.evaluate("id", hits, ratedDocs); + EvalQueryQuality result = dcg.evaluate("id", SearchHits.EMPTY, ratedDocs); assertEquals(0.0d, result.metricScore(), DELTA); assertEquals(0, filterUnratedDocuments(result.getHitsAndRatings()).size()); // also check normalized dcg = new DiscountedCumulativeGain(true, null, 10); - result = dcg.evaluate("id", hits, ratedDocs); + result = dcg.evaluate("id", SearchHits.EMPTY, ratedDocs); assertEquals(0.0d, result.metricScore(), DELTA); assertEquals(0, filterUnratedDocuments(result.getHitsAndRatings()).size()); } diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/ExpectedReciprocalRankTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/ExpectedReciprocalRankTests.java index 52391883d8bab..2f9d2a3a117ed 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/ExpectedReciprocalRankTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/ExpectedReciprocalRankTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.ToXContent; @@ -114,7 +115,7 @@ private SearchHit[] createSearchHits(List rated, Integer[] releva */ public void testNoResults() throws Exception { ExpectedReciprocalRank err = new ExpectedReciprocalRank(5, 0, 10); - assertEquals(0.0, err.evaluate("id", new SearchHit[0], Collections.emptyList()).metricScore(), DELTA); + assertEquals(0.0, err.evaluate("id", SearchHits.EMPTY, Collections.emptyList()).metricScore(), DELTA); } public void testParseFromXContent() throws IOException { diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java index 73a2eba86345a..1aa5df55f5296 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.ToXContent; @@ -150,8 +151,7 @@ public void testEvaluationNoRelevantInResults() { } public void testNoResults() throws Exception { - SearchHit[] hits = new SearchHit[0]; - EvalQueryQuality evaluated = (new MeanReciprocalRank()).evaluate("id", hits, Collections.emptyList()); + EvalQueryQuality evaluated = (new MeanReciprocalRank()).evaluate("id", SearchHits.EMPTY, Collections.emptyList()); assertEquals(0.0d, evaluated.metricScore(), 0.00001); assertEquals(-1, ((MeanReciprocalRank.Detail) evaluated.getMetricDetails()).getFirstRelevantRank()); } diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java index 306b6cafd4c9d..2b199182619ce 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.ToXContent; @@ -136,8 +137,7 @@ public void testNoRatedDocs() throws Exception { } public void testNoResults() throws Exception { - SearchHit[] hits = new SearchHit[0]; - EvalQueryQuality evaluated = (new PrecisionAtK()).evaluate("id", hits, Collections.emptyList()); + EvalQueryQuality evaluated = (new PrecisionAtK()).evaluate("id", SearchHits.EMPTY, Collections.emptyList()); assertEquals(0.0d, evaluated.metricScore(), 0.00001); assertEquals(0, ((PrecisionAtK.Detail) evaluated.getMetricDetails()).getRelevantRetrieved()); assertEquals(0, ((PrecisionAtK.Detail) evaluated.getMetricDetails()).getRetrieved()); diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RecallAtKTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RecallAtKTests.java index 866675f4a9c11..c5cbb84d66d2d 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RecallAtKTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RecallAtKTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.ToXContent; @@ -113,7 +114,7 @@ public void testNoRatedDocs() throws Exception { } public void testNoResults() throws Exception { - EvalQueryQuality evaluated = (new RecallAtK()).evaluate("id", new SearchHit[0], Collections.emptyList()); + EvalQueryQuality evaluated = (new RecallAtK()).evaluate("id", SearchHits.EMPTY, Collections.emptyList()); assertEquals(0.0d, evaluated.metricScore(), 0.00001); assertEquals(0, ((RecallAtK.Detail) evaluated.getMetricDetails()).getRelevantRetrieved()); assertEquals(0, ((RecallAtK.Detail) evaluated.getMetricDetails()).getRelevant()); @@ -123,7 +124,7 @@ public void testNoResultsWithRatedDocs() throws Exception { List rated = new ArrayList<>(); rated.add(createRatedDoc("test", "0", RELEVANT_RATING)); - EvalQueryQuality evaluated = (new RecallAtK()).evaluate("id", new SearchHit[0], rated); + EvalQueryQuality evaluated = (new RecallAtK()).evaluate("id", SearchHits.EMPTY, rated); assertEquals(0.0d, evaluated.metricScore(), 0.00001); assertEquals(0, ((RecallAtK.Detail) evaluated.getMetricDetails()).getRelevantRetrieved()); assertEquals(1, ((RecallAtK.Detail) evaluated.getMetricDetails()).getRelevant()); diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java index 0c1a0e41206c7..ac850e991296c 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java @@ -19,7 +19,7 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.template.put.PutComponentTemplateAction; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; @@ -419,8 +419,10 @@ private void migrateWithTemplatesV2(String prefix, SystemIndexDescriptor... desc .version(5L) .metadata(Collections.singletonMap("baz", "thud")) .build(); - client().execute(PutComposableIndexTemplateAction.INSTANCE, new PutComposableIndexTemplateAction.Request("a-it").indexTemplate(cit)) - .get(); + client().execute( + TransportPutComposableIndexTemplateAction.TYPE, + new TransportPutComposableIndexTemplateAction.Request("a-it").indexTemplate(cit) + ).get(); ensureGreen(); diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/AbstractBulkByQueryRestHandler.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/AbstractBulkByQueryRestHandler.java index 7502ab748f7fe..23e7fb011e88d 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/AbstractBulkByQueryRestHandler.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/AbstractBulkByQueryRestHandler.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.index.reindex.AbstractBulkByScrollRequest; import org.elasticsearch.index.reindex.BulkByScrollResponse; @@ -21,6 +22,7 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; import java.io.IOException; import java.util.Map; @@ -93,13 +95,12 @@ private static XContentParser extractRequestSpecificFields(RestRequest restReque consumer.getValue().accept(value); } } - return parser.contentType() - .xContent() - .createParser( - parser.getXContentRegistry(), - parser.getDeprecationHandler(), - BytesReference.bytes(builder.map(body)).streamInput() - ); + return XContentHelper.createParserNotCompressed( + XContentParserConfiguration.EMPTY.withRegistry(parser.getXContentRegistry()) + .withDeprecationHandler(parser.getDeprecationHandler()), + BytesReference.bytes(builder.map(body)), + parser.contentType() + ); } } diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/Reindexer.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/Reindexer.java index 4c3206e82b8d6..dbe1968bb076a 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/Reindexer.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/Reindexer.java @@ -36,6 +36,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; @@ -60,7 +61,6 @@ import org.elasticsearch.xcontent.XContentType; import java.io.IOException; -import java.io.InputStream; import java.io.UncheckedIOException; import java.util.ArrayList; import java.util.List; @@ -321,8 +321,11 @@ protected RequestWrapper buildRequest(ScrollableHitSource.Hit doc) if (mainRequestXContentType != null && doc.getXContentType() != mainRequestXContentType) { // we need to convert try ( - InputStream stream = doc.getSource().streamInput(); - XContentParser parser = sourceXContentType.xContent().createParser(XContentParserConfiguration.EMPTY, stream); + XContentParser parser = XContentHelper.createParserNotCompressed( + XContentParserConfiguration.EMPTY, + doc.getSource(), + sourceXContentType + ); XContentBuilder builder = XContentBuilder.builder(mainRequestXContentType.xContent()) ) { parser.nextToken(); diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java index 3c5a3eb2e40f9..c3cf7cf62f925 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java @@ -66,7 +66,6 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskManager; @@ -574,9 +573,14 @@ protected RequestWrapper buildRequest(Hit doc) { new TotalHits(0, TotalHits.Relation.EQUAL_TO), 0 ); - InternalSearchResponse internalResponse = new InternalSearchResponse(hits, null, null, null, false, false, 1); SearchResponse searchResponse = new SearchResponse( - internalResponse, + hits, + null, + null, + false, + false, + null, + 1, scrollId(), 5, 4, @@ -585,30 +589,33 @@ protected RequestWrapper buildRequest(Hit doc) { null, SearchResponse.Clusters.EMPTY ); + try { + client.lastSearch.get().listener.onResponse(searchResponse); - client.lastSearch.get().listener.onResponse(searchResponse); + assertEquals(0, capturedDelay.get().seconds()); + capturedCommand.get().run(); - assertEquals(0, capturedDelay.get().seconds()); - capturedCommand.get().run(); + // So the next request is going to have to wait an extra 100 seconds or so (base was 10 seconds, so 110ish) + assertThat(client.lastScroll.get().request.scroll().keepAlive().seconds(), either(equalTo(110L)).or(equalTo(109L))); - // So the next request is going to have to wait an extra 100 seconds or so (base was 10 seconds, so 110ish) - assertThat(client.lastScroll.get().request.scroll().keepAlive().seconds(), either(equalTo(110L)).or(equalTo(109L))); + // Now we can simulate a response and check the delay that we used for the task + if (randomBoolean()) { + client.lastScroll.get().listener.onResponse(searchResponse); + assertEquals(99, capturedDelay.get().seconds()); + } else { + // Let's rethrottle between the starting the scroll and getting the response + worker.rethrottle(10f); + client.lastScroll.get().listener.onResponse(searchResponse); + // The delay uses the new throttle + assertEquals(9, capturedDelay.get().seconds()); + } - // Now we can simulate a response and check the delay that we used for the task - if (randomBoolean()) { - client.lastScroll.get().listener.onResponse(searchResponse); - assertEquals(99, capturedDelay.get().seconds()); - } else { - // Let's rethrottle between the starting the scroll and getting the response - worker.rethrottle(10f); - client.lastScroll.get().listener.onResponse(searchResponse); - // The delay uses the new throttle - assertEquals(9, capturedDelay.get().seconds()); + // Running the command ought to increment the delay counter on the task. + capturedCommand.get().run(); + assertEquals(capturedDelay.get(), testTask.getStatus().getThrottled()); + } finally { + searchResponse.decRef(); } - - // Running the command ought to increment the delay counter on the task. - capturedCommand.get().run(); - assertEquals(capturedDelay.get(), testTask.getStatus().getThrottled()); } /** diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java index 0ad1867e75058..7ac50eb0e7c6c 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java @@ -30,7 +30,6 @@ import org.elasticsearch.index.reindex.ScrollableHitSource; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; @@ -127,6 +126,7 @@ private void dotestBasicsWithRetry(int retries, int minFailures, int maxFailures ++expectedSearchRetries; } + searchResponse.decRef(); searchResponse = createSearchResponse(); client.respond(TransportSearchScrollAction.TYPE, searchResponse); } @@ -168,9 +168,14 @@ private SearchResponse createSearchResponse() { new TotalHits(0, TotalHits.Relation.EQUAL_TO), 0 ); - InternalSearchResponse internalResponse = new InternalSearchResponse(hits, null, null, null, false, false, 1); return new SearchResponse( - internalResponse, + hits, + null, + null, + false, + false, + null, + 1, randomSimpleString(random(), 1, 10), 5, 4, diff --git a/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobContainerRetriesTests.java b/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobContainerRetriesTests.java index 3cc56c949e852..9fed6708de53c 100644 --- a/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobContainerRetriesTests.java +++ b/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobContainerRetriesTests.java @@ -44,6 +44,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.elasticsearch.repositories.blobstore.BlobStoreTestUtil.randomPurpose; import static org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase.randomBytes; +import static org.elasticsearch.test.hamcrest.OptionalMatchers.isPresentWith; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -133,8 +134,7 @@ public void testReadRangeBlobWithRetries() throws Exception { final int rangeStart = getRangeStart(exchange); assertThat(rangeStart, lessThan(bytes.length)); final Optional rangeEnd = getRangeEnd(exchange); - assertThat(rangeEnd.isPresent(), is(true)); - assertThat(rangeEnd.get(), greaterThanOrEqualTo(rangeStart)); + assertThat(rangeEnd, isPresentWith(greaterThanOrEqualTo(rangeStart))); final int length = (rangeEnd.get() - rangeStart) + 1; assertThat(length, lessThanOrEqualTo(bytes.length - rangeStart)); exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); diff --git a/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java b/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java index 673499e4b2461..7cfe95bc69f23 100644 --- a/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java +++ b/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java @@ -70,6 +70,7 @@ import static org.elasticsearch.repositories.gcs.GoogleCloudStorageClientSettings.ENDPOINT_SETTING; import static org.elasticsearch.repositories.gcs.GoogleCloudStorageClientSettings.READ_TIMEOUT_SETTING; import static org.elasticsearch.repositories.gcs.GoogleCloudStorageClientSettings.TOKEN_URI_SETTING; +import static org.elasticsearch.test.hamcrest.OptionalMatchers.isPresent; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -203,7 +204,7 @@ public void testWriteBlobWithRetries() throws Exception { assertThat(exchange.getRequestURI().getQuery(), containsString("uploadType=multipart")); if (countDown.countDown()) { Optional> content = parseMultipartRequestBody(exchange.getRequestBody()); - assertThat(content.isPresent(), is(true)); + assertThat(content, isPresent()); assertThat(content.get().v1(), equalTo(blobContainer.path().buildAsString() + "write_blob_max_retries")); if (Objects.deepEquals(bytes, BytesReference.toBytes(content.get().v2()))) { byte[] response = Strings.format(""" diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java index 59f65032272df..f8503bca3ec67 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java @@ -38,13 +38,13 @@ import java.util.concurrent.LinkedBlockingQueue; import static org.elasticsearch.repositories.RepositoriesMetrics.HTTP_REQUEST_TIME_IN_MICROS_HISTOGRAM; -import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_EXCEPTIONS_COUNT; import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_EXCEPTIONS_HISTOGRAM; -import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_OPERATIONS_COUNT; -import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_REQUESTS_COUNT; -import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_THROTTLES_COUNT; +import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_EXCEPTIONS_TOTAL; +import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_OPERATIONS_TOTAL; +import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_REQUESTS_TOTAL; import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_THROTTLES_HISTOGRAM; -import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_UNSUCCESSFUL_OPERATIONS_COUNT; +import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_THROTTLES_TOTAL; +import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_UNSUCCESSFUL_OPERATIONS_TOTAL; import static org.elasticsearch.rest.RestStatus.INTERNAL_SERVER_ERROR; import static org.elasticsearch.rest.RestStatus.NOT_FOUND; import static org.elasticsearch.rest.RestStatus.TOO_MANY_REQUESTS; @@ -104,11 +104,11 @@ public void testMetricsWithErrors() throws IOException { final long batch = i + 1; addErrorStatus(INTERNAL_SERVER_ERROR, TOO_MANY_REQUESTS, TOO_MANY_REQUESTS); blobContainer.writeBlob(purpose, blobName, new BytesArray("blob"), false); - assertThat(getLongCounterValue(plugin, METRIC_REQUESTS_COUNT, Operation.PUT_OBJECT), equalTo(4L * batch)); - assertThat(getLongCounterValue(plugin, METRIC_OPERATIONS_COUNT, Operation.PUT_OBJECT), equalTo(batch)); - assertThat(getLongCounterValue(plugin, METRIC_UNSUCCESSFUL_OPERATIONS_COUNT, Operation.PUT_OBJECT), equalTo(0L)); - assertThat(getLongCounterValue(plugin, METRIC_EXCEPTIONS_COUNT, Operation.PUT_OBJECT), equalTo(batch)); - assertThat(getLongCounterValue(plugin, METRIC_THROTTLES_COUNT, Operation.PUT_OBJECT), equalTo(2L * batch)); + assertThat(getLongCounterValue(plugin, METRIC_REQUESTS_TOTAL, Operation.PUT_OBJECT), equalTo(4L * batch)); + assertThat(getLongCounterValue(plugin, METRIC_OPERATIONS_TOTAL, Operation.PUT_OBJECT), equalTo(batch)); + assertThat(getLongCounterValue(plugin, METRIC_UNSUCCESSFUL_OPERATIONS_TOTAL, Operation.PUT_OBJECT), equalTo(0L)); + assertThat(getLongCounterValue(plugin, METRIC_EXCEPTIONS_TOTAL, Operation.PUT_OBJECT), equalTo(batch)); + assertThat(getLongCounterValue(plugin, METRIC_THROTTLES_TOTAL, Operation.PUT_OBJECT), equalTo(2L * batch)); assertThat(getLongHistogramValue(plugin, METRIC_EXCEPTIONS_HISTOGRAM, Operation.PUT_OBJECT), equalTo(batch)); assertThat(getLongHistogramValue(plugin, METRIC_THROTTLES_HISTOGRAM, Operation.PUT_OBJECT), equalTo(2L * batch)); assertThat(getNumberOfMeasurements(plugin, HTTP_REQUEST_TIME_IN_MICROS_HISTOGRAM, Operation.PUT_OBJECT), equalTo(batch)); @@ -124,11 +124,11 @@ public void testMetricsWithErrors() throws IOException { } catch (Exception e) { // intentional failure } - assertThat(getLongCounterValue(plugin, METRIC_REQUESTS_COUNT, Operation.GET_OBJECT), equalTo(2L * batch)); - assertThat(getLongCounterValue(plugin, METRIC_OPERATIONS_COUNT, Operation.GET_OBJECT), equalTo(batch)); - assertThat(getLongCounterValue(plugin, METRIC_UNSUCCESSFUL_OPERATIONS_COUNT, Operation.GET_OBJECT), equalTo(batch)); - assertThat(getLongCounterValue(plugin, METRIC_EXCEPTIONS_COUNT, Operation.GET_OBJECT), equalTo(batch)); - assertThat(getLongCounterValue(plugin, METRIC_THROTTLES_COUNT, Operation.GET_OBJECT), equalTo(batch)); + assertThat(getLongCounterValue(plugin, METRIC_REQUESTS_TOTAL, Operation.GET_OBJECT), equalTo(2L * batch)); + assertThat(getLongCounterValue(plugin, METRIC_OPERATIONS_TOTAL, Operation.GET_OBJECT), equalTo(batch)); + assertThat(getLongCounterValue(plugin, METRIC_UNSUCCESSFUL_OPERATIONS_TOTAL, Operation.GET_OBJECT), equalTo(batch)); + assertThat(getLongCounterValue(plugin, METRIC_EXCEPTIONS_TOTAL, Operation.GET_OBJECT), equalTo(batch)); + assertThat(getLongCounterValue(plugin, METRIC_THROTTLES_TOTAL, Operation.GET_OBJECT), equalTo(batch)); assertThat(getLongHistogramValue(plugin, METRIC_EXCEPTIONS_HISTOGRAM, Operation.GET_OBJECT), equalTo(batch)); assertThat(getLongHistogramValue(plugin, METRIC_THROTTLES_HISTOGRAM, Operation.GET_OBJECT), equalTo(batch)); assertThat(getNumberOfMeasurements(plugin, HTTP_REQUEST_TIME_IN_MICROS_HISTOGRAM, Operation.GET_OBJECT), equalTo(batch)); @@ -144,11 +144,11 @@ public void testMetricsWithErrors() throws IOException { } catch (Exception e) { // intentional failure } - assertThat(getLongCounterValue(plugin, METRIC_REQUESTS_COUNT, Operation.LIST_OBJECTS), equalTo(5L * batch)); - assertThat(getLongCounterValue(plugin, METRIC_OPERATIONS_COUNT, Operation.LIST_OBJECTS), equalTo(batch)); - assertThat(getLongCounterValue(plugin, METRIC_UNSUCCESSFUL_OPERATIONS_COUNT, Operation.LIST_OBJECTS), equalTo(batch)); - assertThat(getLongCounterValue(plugin, METRIC_EXCEPTIONS_COUNT, Operation.LIST_OBJECTS), equalTo(batch)); - assertThat(getLongCounterValue(plugin, METRIC_THROTTLES_COUNT, Operation.LIST_OBJECTS), equalTo(5L * batch)); + assertThat(getLongCounterValue(plugin, METRIC_REQUESTS_TOTAL, Operation.LIST_OBJECTS), equalTo(5L * batch)); + assertThat(getLongCounterValue(plugin, METRIC_OPERATIONS_TOTAL, Operation.LIST_OBJECTS), equalTo(batch)); + assertThat(getLongCounterValue(plugin, METRIC_UNSUCCESSFUL_OPERATIONS_TOTAL, Operation.LIST_OBJECTS), equalTo(batch)); + assertThat(getLongCounterValue(plugin, METRIC_EXCEPTIONS_TOTAL, Operation.LIST_OBJECTS), equalTo(batch)); + assertThat(getLongCounterValue(plugin, METRIC_THROTTLES_TOTAL, Operation.LIST_OBJECTS), equalTo(5L * batch)); assertThat(getLongHistogramValue(plugin, METRIC_EXCEPTIONS_HISTOGRAM, Operation.LIST_OBJECTS), equalTo(batch)); assertThat(getLongHistogramValue(plugin, METRIC_THROTTLES_HISTOGRAM, Operation.LIST_OBJECTS), equalTo(5L * batch)); assertThat(getNumberOfMeasurements(plugin, HTTP_REQUEST_TIME_IN_MICROS_HISTOGRAM, Operation.LIST_OBJECTS), equalTo(batch)); @@ -156,11 +156,11 @@ public void testMetricsWithErrors() throws IOException { // Delete to clean up blobContainer.deleteBlobsIgnoringIfNotExists(purpose, Iterators.single(blobName)); - assertThat(getLongCounterValue(plugin, METRIC_REQUESTS_COUNT, Operation.DELETE_OBJECTS), equalTo(1L)); - assertThat(getLongCounterValue(plugin, METRIC_OPERATIONS_COUNT, Operation.DELETE_OBJECTS), equalTo(1L)); - assertThat(getLongCounterValue(plugin, METRIC_UNSUCCESSFUL_OPERATIONS_COUNT, Operation.DELETE_OBJECTS), equalTo(0L)); - assertThat(getLongCounterValue(plugin, METRIC_EXCEPTIONS_COUNT, Operation.DELETE_OBJECTS), equalTo(0L)); - assertThat(getLongCounterValue(plugin, METRIC_THROTTLES_COUNT, Operation.DELETE_OBJECTS), equalTo(0L)); + assertThat(getLongCounterValue(plugin, METRIC_REQUESTS_TOTAL, Operation.DELETE_OBJECTS), equalTo(1L)); + assertThat(getLongCounterValue(plugin, METRIC_OPERATIONS_TOTAL, Operation.DELETE_OBJECTS), equalTo(1L)); + assertThat(getLongCounterValue(plugin, METRIC_UNSUCCESSFUL_OPERATIONS_TOTAL, Operation.DELETE_OBJECTS), equalTo(0L)); + assertThat(getLongCounterValue(plugin, METRIC_EXCEPTIONS_TOTAL, Operation.DELETE_OBJECTS), equalTo(0L)); + assertThat(getLongCounterValue(plugin, METRIC_THROTTLES_TOTAL, Operation.DELETE_OBJECTS), equalTo(0L)); assertThat(getLongHistogramValue(plugin, METRIC_EXCEPTIONS_HISTOGRAM, Operation.DELETE_OBJECTS), equalTo(0L)); assertThat(getLongHistogramValue(plugin, METRIC_THROTTLES_HISTOGRAM, Operation.DELETE_OBJECTS), equalTo(0L)); assertThat(getNumberOfMeasurements(plugin, HTTP_REQUEST_TIME_IN_MICROS_HISTOGRAM, Operation.DELETE_OBJECTS), equalTo(1L)); diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index 29342a7f5ea92..e70151cbdf8ee 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -75,7 +75,7 @@ import java.util.stream.Collectors; import java.util.stream.StreamSupport; -import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_REQUESTS_COUNT; +import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_REQUESTS_TOTAL; import static org.elasticsearch.repositories.blobstore.BlobStoreTestUtil.randomNonDataPurpose; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; @@ -267,7 +267,7 @@ public void testMetrics() throws Exception { .filterPlugins(TestTelemetryPlugin.class) .toList(); assertThat(plugins, hasSize(1)); - final List metrics = Measurement.combine(plugins.get(0).getLongCounterMeasurement(METRIC_REQUESTS_COUNT)); + final List metrics = Measurement.combine(plugins.get(0).getLongCounterMeasurement(METRIC_REQUESTS_TOTAL)); assertThat( statsCollectors.keySet().stream().map(S3BlobStore.StatsKey::operation).collect(Collectors.toSet()), diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java index 93b8ef7e57389..dadd15ed640c0 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java @@ -628,7 +628,7 @@ void run(BytesReference expected, BytesReference updated, ActionListenerandThen((l, currentValue) -> ActionListener.completeWith(l, () -> { + .andThenApply(currentValue -> { if (currentValue.isPresent() && currentValue.bytesReference().equals(expected)) { logger.trace("[{}] completing upload [{}]", blobKey, uploadId); completeMultipartUpload(uploadId, partETag); @@ -638,7 +638,7 @@ void run(BytesReference expected, BytesReference updated, ActionListener { - assertThat(maybe, OptionalMatchers.isPresent()); - // noinspection OptionalGetWithoutIsPresent - assertThat(maybe.get(), instanceOf(RuntimeException.class)); - assertThat(maybe.get(), hasToString(containsString("sd_notify returned error [" + rc + "]"))); - }); + runTestOnNodeStarted( + Boolean.TRUE.toString(), + rc, + (maybe, plugin) -> assertThat( + maybe, + isPresentWith( + allOf(instanceOf(RuntimeException.class), hasToString(containsString("sd_notify returned error [" + rc + "]"))) + ) + ) + ); } public void testOnNodeStartedNotEnabled() { diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java index 61ef5f1973854..9202db6f49a8e 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java @@ -12,6 +12,7 @@ import org.apache.logging.log4j.LogManager; import org.elasticsearch.ESNetty4IntegTestCase; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.test.junit.annotations.TestLogging; @@ -51,7 +52,7 @@ public void testLoggingHandler() { + ", request id: \\d+" + ", type: request" + ", version: .*" - + ", action: cluster:monitor/nodes/hot_threads\\[n\\]\\]" + + ", action: cluster:monitor/nodes/stats\\[n\\]\\]" + " WRITE: \\d+B"; final MockLogAppender.LoggingExpectation writeExpectation = new MockLogAppender.PatternSeenEventExpectation( "hot threads request", @@ -71,7 +72,7 @@ public void testLoggingHandler() { + ", request id: \\d+" + ", type: request" + ", version: .*" - + ", action: cluster:monitor/nodes/hot_threads\\[n\\]\\]" + + ", action: cluster:monitor/nodes/stats\\[n\\]\\]" + " READ: \\d+B"; final MockLogAppender.LoggingExpectation readExpectation = new MockLogAppender.PatternSeenEventExpectation( @@ -84,7 +85,7 @@ public void testLoggingHandler() { appender.addExpectation(writeExpectation); appender.addExpectation(flushExpectation); appender.addExpectation(readExpectation); - clusterAdmin().prepareNodesHotThreads().get(); + client().admin().cluster().prepareNodesStats().get(TimeValue.timeValueSeconds(10)); appender.assertAllExpectationsMatched(); } diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/transport/netty4/Netty4TransportMultiPortIntegrationIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/transport/netty4/Netty4TransportMultiPortIntegrationIT.java index 14952a2d37860..104677b64351f 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/transport/netty4/Netty4TransportMultiPortIntegrationIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/transport/netty4/Netty4TransportMultiPortIntegrationIT.java @@ -7,6 +7,7 @@ */ package org.elasticsearch.transport.netty4; +import org.apache.lucene.util.Constants; import org.elasticsearch.ESNetty4IntegTestCase; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; @@ -31,14 +32,16 @@ @ClusterScope(scope = Scope.SUITE, supportsDedicatedMasters = false, numDataNodes = 1, numClientNodes = 0) public class Netty4TransportMultiPortIntegrationIT extends ESNetty4IntegTestCase { + private static final int NUMBER_OF_CLIENT_PORTS = Constants.WINDOWS ? 300 : 10; + private static int randomPort = -1; private static String randomPortRange; @Override protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { if (randomPort == -1) { - randomPort = randomIntBetween(49152, 65525); - randomPortRange = Strings.format("%s-%s", randomPort, randomPort + 10); + randomPort = randomIntBetween(49152, 65535 - NUMBER_OF_CLIENT_PORTS); + randomPortRange = Strings.format("%s-%s", randomPort, randomPort + NUMBER_OF_CLIENT_PORTS); } Settings.Builder builder = Settings.builder() .put(super.nodeSettings(nodeOrdinal, otherSettings)) diff --git a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiTokenizerFactory.java b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiTokenizerFactory.java index d662003530c22..65276c04bed56 100644 --- a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiTokenizerFactory.java +++ b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiTokenizerFactory.java @@ -12,7 +12,6 @@ import org.apache.lucene.analysis.ja.JapaneseTokenizer; import org.apache.lucene.analysis.ja.JapaneseTokenizer.Mode; import org.apache.lucene.analysis.ja.dict.UserDictionary; -import org.apache.lucene.analysis.util.CSVUtil; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; @@ -23,10 +22,8 @@ import java.io.IOException; import java.io.Reader; import java.io.StringReader; -import java.util.HashSet; import java.util.List; import java.util.Locale; -import java.util.Set; public class KuromojiTokenizerFactory extends AbstractTokenizerFactory { @@ -60,11 +57,10 @@ public static UserDictionary getUserDictionary(Environment env, Settings setting "It is not allowed to use [" + USER_DICT_PATH_OPTION + "] in conjunction" + " with [" + USER_DICT_RULES_OPTION + "]" ); } - List ruleList = Analysis.getWordList(env, settings, USER_DICT_PATH_OPTION, USER_DICT_RULES_OPTION, false); + List ruleList = Analysis.getWordList(env, settings, USER_DICT_PATH_OPTION, USER_DICT_RULES_OPTION, false, true); if (ruleList == null || ruleList.isEmpty()) { return null; } - validateDuplicatedWords(ruleList); StringBuilder sb = new StringBuilder(); for (String line : ruleList) { sb.append(line).append(System.lineSeparator()); @@ -76,23 +72,6 @@ public static UserDictionary getUserDictionary(Environment env, Settings setting } } - private static void validateDuplicatedWords(List ruleList) { - Set dup = new HashSet<>(); - int lineNum = 0; - for (String line : ruleList) { - // ignore comments - if (line.startsWith("#") == false) { - String[] values = CSVUtil.parse(line); - if (dup.add(values[0]) == false) { - throw new IllegalArgumentException( - "Found duplicate term [" + values[0] + "] in user dictionary " + "at line [" + lineNum + "]" - ); - } - } - ++lineNum; - } - } - public static JapaneseTokenizer.Mode getMode(Settings settings) { String modeSetting = settings.get("mode", JapaneseTokenizer.DEFAULT_MODE.name()); return JapaneseTokenizer.Mode.valueOf(modeSetting.toUpperCase(Locale.ENGLISH)); diff --git a/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriAnalyzerProvider.java b/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriAnalyzerProvider.java index f2949e45964a4..65c9bb9833177 100644 --- a/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriAnalyzerProvider.java +++ b/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriAnalyzerProvider.java @@ -30,7 +30,7 @@ public class NoriAnalyzerProvider extends AbstractIndexAnalyzerProvider tagList = Analysis.getWordList(env, settings, "stoptags"); final Set stopTags = tagList != null ? resolvePOSList(tagList) : KoreanPartOfSpeechStopFilter.DEFAULT_STOP_TAGS; analyzer = new KoreanAnalyzer(userDictionary, mode, stopTags, false); diff --git a/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriTokenizerFactory.java b/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriTokenizerFactory.java index c0be8322ade95..eedb4c2011af3 100644 --- a/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriTokenizerFactory.java +++ b/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriTokenizerFactory.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.analysis.AbstractTokenizerFactory; import org.elasticsearch.index.analysis.Analysis; @@ -24,6 +25,8 @@ import java.util.List; import java.util.Locale; +import static org.elasticsearch.index.IndexVersions.UPGRADE_LUCENE_9_9_1; + public class NoriTokenizerFactory extends AbstractTokenizerFactory { private static final String USER_DICT_PATH_OPTION = "user_dictionary"; private static final String USER_DICT_RULES_OPTION = "user_dictionary_rules"; @@ -35,17 +38,24 @@ public class NoriTokenizerFactory extends AbstractTokenizerFactory { public NoriTokenizerFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, settings, name); decompoundMode = getMode(settings); - userDictionary = getUserDictionary(env, settings); + userDictionary = getUserDictionary(env, settings, indexSettings); discardPunctuation = settings.getAsBoolean("discard_punctuation", true); } - public static UserDictionary getUserDictionary(Environment env, Settings settings) { + public static UserDictionary getUserDictionary(Environment env, Settings settings, IndexSettings indexSettings) { if (settings.get(USER_DICT_PATH_OPTION) != null && settings.get(USER_DICT_RULES_OPTION) != null) { throw new IllegalArgumentException( "It is not allowed to use [" + USER_DICT_PATH_OPTION + "] in conjunction" + " with [" + USER_DICT_RULES_OPTION + "]" ); } - List ruleList = Analysis.getWordList(env, settings, USER_DICT_PATH_OPTION, USER_DICT_RULES_OPTION, true); + List ruleList = Analysis.getWordList( + env, + settings, + USER_DICT_PATH_OPTION, + USER_DICT_RULES_OPTION, + true, + isSupportDuplicateCheck(indexSettings) + ); if (ruleList == null || ruleList.isEmpty()) { return null; } @@ -60,6 +70,24 @@ public static UserDictionary getUserDictionary(Environment env, Settings setting } } + /** + * Determines if the specified index version supports duplicate checks. + * This method checks if the version of the index where it was created + * is at Version 8.13.0 or above. + * The feature of duplicate checks is introduced starting + * from version 8.13.0, hence any versions earlier than this do not support duplicate checks. + * + * @param indexSettings The settings of the index in question. + * @return Returns true if the version is 8.13.0 or later which means + * that the duplicate check feature is supported. + */ + private static boolean isSupportDuplicateCheck(IndexSettings indexSettings) { + var idxVersion = indexSettings.getIndexVersionCreated(); + // Explicitly exclude the range of versions greater than NORI_DUPLICATES, that + // are also in 8.12. The only version in this range is UPGRADE_LUCENE_9_9_1. + return idxVersion.onOrAfter(IndexVersions.NORI_DUPLICATES) && idxVersion != UPGRADE_LUCENE_9_9_1; + } + public static KoreanTokenizer.DecompoundMode getMode(Settings settings) { String modeSetting = settings.get("decompound_mode", KoreanTokenizer.DEFAULT_DECOMPOUND.name()); return KoreanTokenizer.DecompoundMode.valueOf(modeSetting.toUpperCase(Locale.ENGLISH)); diff --git a/plugins/analysis-nori/src/test/java/org/elasticsearch/plugin/analysis/nori/NoriAnalysisTests.java b/plugins/analysis-nori/src/test/java/org/elasticsearch/plugin/analysis/nori/NoriAnalysisTests.java index e091813184472..642ed19c520d7 100644 --- a/plugins/analysis-nori/src/test/java/org/elasticsearch/plugin/analysis/nori/NoriAnalysisTests.java +++ b/plugins/analysis-nori/src/test/java/org/elasticsearch/plugin/analysis/nori/NoriAnalysisTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.analysis.AnalysisTestsHelper; import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.index.analysis.NamedAnalyzer; @@ -117,6 +118,31 @@ public void testNoriAnalyzerInvalidUserDictOption() throws Exception { ); } + public void testNoriAnalyzerDuplicateUserDictRule() throws Exception { + Settings settings = Settings.builder() + .put("index.analysis.analyzer.my_analyzer.type", "nori") + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersions.NORI_DUPLICATES) + .putList("index.analysis.analyzer.my_analyzer.user_dictionary_rules", "c++", "C쁠쁠", "세종", "세종", "세종시 세종 시") + .build(); + + final IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> createTestAnalysis(settings)); + assertThat(exc.getMessage(), containsString("[세종] in user dictionary at line [3]")); + } + + public void testNoriAnalyzerDuplicateUserDictRuleWithLegacyVersion() throws IOException { + Settings settings = Settings.builder() + .put("index.analysis.analyzer.my_analyzer.type", "nori") + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersions.V_8_10_0) + .putList("index.analysis.analyzer.my_analyzer.user_dictionary_rules", "c++", "C쁠쁠", "세종", "세종", "세종시 세종 시") + .build(); + + final TestAnalysis analysis = createTestAnalysis(settings); + Analyzer analyzer = analysis.indexAnalyzers.get("my_analyzer"); + try (TokenStream stream = analyzer.tokenStream("", "세종")) { + assertTokenStreamContents(stream, new String[] { "세종" }); + } + } + public void testNoriTokenizer() throws Exception { Settings settings = Settings.builder() .put("index.analysis.tokenizer.my_tokenizer.type", "nori_tokenizer") diff --git a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java index b818de468ea2c..88740edffc09a 100644 --- a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java +++ b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java @@ -16,7 +16,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.tests.util.TimeUnits; -import org.elasticsearch.Version; import org.elasticsearch.client.NodeSelector; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -25,12 +24,12 @@ import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.common.Strings; import org.elasticsearch.core.IOUtils; -import org.elasticsearch.core.Tuple; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.FeatureFlag; import org.elasticsearch.test.cluster.local.LocalClusterConfigProvider; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.ObjectPath; +import org.elasticsearch.test.rest.TestFeatureService; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestApi; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestSpec; import org.elasticsearch.test.rest.yaml.section.ClientYamlTestSection; @@ -46,12 +45,15 @@ import org.junit.rules.TestRule; import java.io.IOException; +import java.io.UncheckedIOException; import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.function.BiPredicate; -import java.util.function.Predicate; +import java.util.stream.Collectors; +import java.util.stream.Stream; import static java.util.Collections.unmodifiableList; @@ -159,19 +161,7 @@ public void initSearchClient() throws IOException { searchClient = buildClient(restClientSettings(), clusterHosts.toArray(new HttpHost[clusterHosts.size()])); adminSearchClient = buildClient(restAdminSettings(), clusterHosts.toArray(new HttpHost[clusterHosts.size()])); - Tuple versionVersionTuple = readVersionsFromCatNodes(adminSearchClient); - final Version esVersion = versionVersionTuple.v1(); - final String os = readOsFromNodesInfo(adminSearchClient); - - searchYamlTestClient = new TestCandidateAwareClient( - getRestSpec(), - searchClient, - hosts, - esVersion, - ESRestTestCase::clusterHasFeature, - os, - this::getClientBuilderWithSniffedHosts - ); + searchYamlTestClient = new TestCandidateAwareClient(getRestSpec(), searchClient, hosts, this::getClientBuilderWithSniffedHosts); // check that we have an established CCS connection Request request = new Request("GET", "_remote/info"); @@ -298,18 +288,56 @@ public static Iterable parameters() throws Exception { @Override protected ClientYamlTestExecutionContext createRestTestExecutionContext( ClientYamlTestCandidate clientYamlTestCandidate, - ClientYamlTestClient clientYamlTestClient + ClientYamlTestClient clientYamlTestClient, + final Set nodesVersions, + final TestFeatureService testFeatureService, + final Set osSet ) { - // depending on the API called, we either return the client running against the "write" or the "search" cluster here - return new ClientYamlTestExecutionContext(clientYamlTestCandidate, clientYamlTestClient, randomizeContentType()) { - protected ClientYamlTestClient clientYamlTestClient(String apiName) { - if (CCS_APIS.contains(apiName)) { - return searchYamlTestClient; - } else { - return super.clientYamlTestClient(apiName); + try { + // Ensure the test specific initialization is run by calling it explicitly (@Before annotations on base-derived class may + // be called in a different order) + initSearchClient(); + // Reconcile and provide unified features, os, version(s), based on both clientYamlTestClient and searchYamlTestClient + var searchOs = readOsFromNodesInfo(adminSearchClient); + var searchNodeVersions = readVersionsFromNodesInfo(adminSearchClient); + var semanticNodeVersions = searchNodeVersions.stream() + .map(ESRestTestCase::parseLegacyVersion) + .flatMap(Optional::stream) + .collect(Collectors.toSet()); + final TestFeatureService searchTestFeatureService = createTestFeatureService( + getClusterStateFeatures(adminSearchClient), + semanticNodeVersions + ); + final TestFeatureService combinedTestFeatureService = new TestFeatureService() { + @Override + public boolean clusterHasFeature(String featureId) { + return testFeatureService.clusterHasFeature(featureId) && searchTestFeatureService.clusterHasFeature(featureId); } - } - }; + }; + final Set combinedOsSet = Stream.concat(osSet.stream(), Stream.of(searchOs)).collect(Collectors.toSet()); + final Set combinedNodeVersions = Stream.concat(nodesVersions.stream(), searchNodeVersions.stream()) + .collect(Collectors.toSet()); + + return new ClientYamlTestExecutionContext( + clientYamlTestCandidate, + clientYamlTestClient, + randomizeContentType(), + combinedNodeVersions, + combinedTestFeatureService, + combinedOsSet + ) { + // depending on the API called, we either return the client running against the "write" or the "search" cluster here + protected ClientYamlTestClient clientYamlTestClient(String apiName) { + if (CCS_APIS.contains(apiName)) { + return searchYamlTestClient; + } else { + return super.clientYamlTestClient(apiName); + } + } + }; + } catch (IOException e) { + throw new UncheckedIOException(e); + } } @AfterClass @@ -328,12 +356,9 @@ static class TestCandidateAwareClient extends ClientYamlTestClient { ClientYamlSuiteRestSpec restSpec, RestClient restClient, List hosts, - Version esVersion, - Predicate clusterFeaturesPredicate, - String os, CheckedSupplier clientBuilderWithSniffedNodes ) { - super(restSpec, restClient, hosts, esVersion, clusterFeaturesPredicate, os, clientBuilderWithSniffedNodes); + super(restSpec, restClient, hosts, clientBuilderWithSniffedNodes); } public void setTestCandidate(ClientYamlTestCandidate testCandidate) { diff --git a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/RcsCcsCommonYamlTestSuiteIT.java b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/RcsCcsCommonYamlTestSuiteIT.java index 51d499db61932..a331d6f54cb4a 100644 --- a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/RcsCcsCommonYamlTestSuiteIT.java +++ b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/RcsCcsCommonYamlTestSuiteIT.java @@ -15,7 +15,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.tests.util.TimeUnits; -import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; @@ -26,13 +25,13 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.IOUtils; -import org.elasticsearch.core.Tuple; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.FeatureFlag; import org.elasticsearch.test.cluster.local.LocalClusterConfigProvider; import org.elasticsearch.test.cluster.util.resource.Resource; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.ObjectPath; +import org.elasticsearch.test.rest.TestFeatureService; import org.elasticsearch.test.rest.yaml.CcsCommonYamlTestSuiteIT.TestCandidateAwareClient; import org.junit.AfterClass; import org.junit.Before; @@ -45,7 +44,11 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.Optional; +import java.util.Set; import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; +import java.util.stream.Stream; import static java.util.Collections.unmodifiableList; import static org.elasticsearch.test.rest.yaml.CcsCommonYamlTestSuiteIT.CCS_APIS; @@ -221,19 +224,7 @@ public void initSearchClient() throws IOException { clusterHosts.toArray(new HttpHost[clusterHosts.size()]) ); - Tuple versionVersionTuple = readVersionsFromCatNodes(adminSearchClient); - final Version esVersion = versionVersionTuple.v1(); - final String os = readOsFromNodesInfo(adminSearchClient); - - searchYamlTestClient = new TestCandidateAwareClient( - getRestSpec(), - searchClient, - hosts, - esVersion, - ESRestTestCase::clusterHasFeature, - os, - this::getClientBuilderWithSniffedHosts - ); + searchYamlTestClient = new TestCandidateAwareClient(getRestSpec(), searchClient, hosts, this::getClientBuilderWithSniffedHosts); configureRemoteCluster(); // check that we have an established CCS connection @@ -282,18 +273,56 @@ public static Iterable parameters() throws Exception { @Override protected ClientYamlTestExecutionContext createRestTestExecutionContext( ClientYamlTestCandidate clientYamlTestCandidate, - ClientYamlTestClient clientYamlTestClient + ClientYamlTestClient clientYamlTestClient, + final Set nodesVersions, + final TestFeatureService testFeatureService, + final Set osSet ) { - // depending on the API called, we either return the client running against the "write" or the "search" cluster here - return new ClientYamlTestExecutionContext(clientYamlTestCandidate, clientYamlTestClient, randomizeContentType()) { - protected ClientYamlTestClient clientYamlTestClient(String apiName) { - if (CCS_APIS.contains(apiName)) { - return searchYamlTestClient; - } else { - return super.clientYamlTestClient(apiName); + try { + // Ensure the test specific initialization is run by calling it explicitly (@Before annotations on base-derived class may + // be called in a different order) + initSearchClient(); + // Reconcile and provide unified features, os, version(s), based on both clientYamlTestClient and searchYamlTestClient + var searchOs = readOsFromNodesInfo(adminSearchClient); + var searchNodeVersions = readVersionsFromNodesInfo(adminSearchClient); + var semanticNodeVersions = searchNodeVersions.stream() + .map(ESRestTestCase::parseLegacyVersion) + .flatMap(Optional::stream) + .collect(Collectors.toSet()); + final TestFeatureService searchTestFeatureService = createTestFeatureService( + getClusterStateFeatures(adminSearchClient), + semanticNodeVersions + ); + final TestFeatureService combinedTestFeatureService = new TestFeatureService() { + @Override + public boolean clusterHasFeature(String featureId) { + return testFeatureService.clusterHasFeature(featureId) && searchTestFeatureService.clusterHasFeature(featureId); } - } - }; + }; + final Set combinedOsSet = Stream.concat(osSet.stream(), Stream.of(searchOs)).collect(Collectors.toSet()); + final Set combinedNodeVersions = Stream.concat(nodesVersions.stream(), searchNodeVersions.stream()) + .collect(Collectors.toSet()); + + return new ClientYamlTestExecutionContext( + clientYamlTestCandidate, + clientYamlTestClient, + randomizeContentType(), + combinedNodeVersions, + combinedTestFeatureService, + combinedOsSet + ) { + // depending on the API called, we either return the client running against the "write" or the "search" cluster here + protected ClientYamlTestClient clientYamlTestClient(String apiName) { + if (CCS_APIS.contains(apiName)) { + return searchYamlTestClient; + } else { + return super.clientYamlTestClient(apiName); + } + } + }; + } catch (IOException e) { + throw new UncheckedIOException(e); + } } @AfterClass diff --git a/qa/ccs-rolling-upgrade-remote-cluster/build.gradle b/qa/ccs-rolling-upgrade-remote-cluster/build.gradle index ff28b1213079d..caa48db634f46 100644 --- a/qa/ccs-rolling-upgrade-remote-cluster/build.gradle +++ b/qa/ccs-rolling-upgrade-remote-cluster/build.gradle @@ -6,6 +6,7 @@ * Side Public License, v 1. */ +import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask @@ -32,6 +33,7 @@ BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> versions = [bwcVersion.toString(), project.version] setting 'cluster.remote.node.attr', 'gateway' setting 'xpack.security.enabled', 'false' + requiresFeature 'es.failure_store_feature_flag_enabled', new Version(8, 12, 0) } def remoteCluster = testClusters.register("${baseName}-remote") { numberOfNodes = 3 @@ -39,6 +41,7 @@ BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> firstNode.setting 'node.attr.gateway', 'true' lastNode.setting 'node.attr.gateway', 'true' setting 'xpack.security.enabled', 'false' + requiresFeature 'es.failure_store_feature_flag_enabled', new Version(8, 12, 0) } diff --git a/qa/ccs-unavailable-clusters/src/javaRestTest/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java b/qa/ccs-unavailable-clusters/src/javaRestTest/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java index b17b81b6ac188..25bd24515a04b 100644 --- a/qa/ccs-unavailable-clusters/src/javaRestTest/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java +++ b/qa/ccs-unavailable-clusters/src/javaRestTest/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java @@ -37,7 +37,6 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.ObjectPath; import org.elasticsearch.test.transport.MockTransportService; @@ -92,18 +91,15 @@ private static MockTransportService startTransport( TransportSearchAction.TYPE.name(), EsExecutors.DIRECT_EXECUTOR_SERVICE, SearchRequest::new, - (request, channel, task) -> { - InternalSearchResponse response = new InternalSearchResponse( - new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), Float.NaN), + (request, channel, task) -> channel.sendResponse( + new SearchResponse( + SearchHits.empty(new TotalHits(0, TotalHits.Relation.EQUAL_TO), Float.NaN), InternalAggregations.EMPTY, null, - null, false, null, - 1 - ); - SearchResponse searchResponse = new SearchResponse( - response, + null, + 1, null, 1, 1, @@ -111,9 +107,8 @@ private static MockTransportService startTransport( 100, ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY - ); - channel.sendResponse(searchResponse); - } + ) + ) ); newService.registerRequestHandler( ClusterStateAction.NAME, diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 47f7bb488d83d..16209a73826ca 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -8,6 +8,8 @@ package org.elasticsearch.upgrades; +import io.netty.handler.codec.http.HttpMethod; + import com.carrotsearch.randomizedtesting.annotations.Name; import org.apache.http.util.EntityUtils; @@ -42,8 +44,10 @@ import org.elasticsearch.test.rest.ObjectPath; import org.elasticsearch.test.rest.RestTestLegacyFeatures; import org.elasticsearch.transport.Compression; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonXContent; import org.hamcrest.Matchers; import org.junit.Before; @@ -137,15 +141,12 @@ public void setIndex() { public void testSearch() throws Exception { int count; if (isRunningAgainstOldCluster()) { - XContentBuilder mappingsAndSettings = jsonBuilder(); - mappingsAndSettings.startObject(); - { + final var createIndex = newXContentRequest(HttpMethod.PUT, "/" + index, (mappingsAndSettings, params) -> { mappingsAndSettings.startObject("settings"); mappingsAndSettings.field("number_of_shards", 1); mappingsAndSettings.field("number_of_replicas", 0); mappingsAndSettings.endObject(); - } - { + mappingsAndSettings.startObject("mappings"); mappingsAndSettings.startObject("properties"); { @@ -166,11 +167,8 @@ public void testSearch() throws Exception { } mappingsAndSettings.endObject(); mappingsAndSettings.endObject(); - } - mappingsAndSettings.endObject(); - - Request createIndex = new Request("PUT", "/" + index); - createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); + return mappingsAndSettings; + }); client().performRequest(createIndex); count = randomIntBetween(2000, 3000); @@ -207,15 +205,12 @@ public void testSearch() throws Exception { public void testNewReplicas() throws Exception { if (isRunningAgainstOldCluster()) { - XContentBuilder mappingsAndSettings = jsonBuilder(); - mappingsAndSettings.startObject(); - { + final var createIndex = newXContentRequest(HttpMethod.PUT, "/" + index, (mappingsAndSettings, params) -> { mappingsAndSettings.startObject("settings"); mappingsAndSettings.field("number_of_shards", 1); mappingsAndSettings.field("number_of_replicas", 0); mappingsAndSettings.endObject(); - } - { + mappingsAndSettings.startObject("mappings"); mappingsAndSettings.startObject("properties"); { @@ -225,11 +220,8 @@ public void testNewReplicas() throws Exception { } mappingsAndSettings.endObject(); mappingsAndSettings.endObject(); - } - mappingsAndSettings.endObject(); - - Request createIndex = new Request("PUT", "/" + index); - createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); + return mappingsAndSettings; + }); client().performRequest(createIndex); int numDocs = randomIntBetween(2000, 3000); @@ -247,8 +239,11 @@ public void testNewReplicas() throws Exception { final int numReplicas = 1; final long startTime = System.currentTimeMillis(); logger.debug("--> creating [{}] replicas for index [{}]", numReplicas, index); - Request setNumberOfReplicas = new Request("PUT", "/" + index + "/_settings"); - setNumberOfReplicas.setJsonEntity("{ \"index\": { \"number_of_replicas\" : " + numReplicas + " }}"); + Request setNumberOfReplicas = newXContentRequest( + HttpMethod.PUT, + "/" + index + "/_settings", + (builder, params) -> builder.startObject("index").field("number_of_replicas", numReplicas).endObject() + ); client().performRequest(setNumberOfReplicas); ensureGreenLongWait(index); @@ -282,30 +277,26 @@ public void testSearchTimeSeriesMode() throws Exception { numDocs = countOfIndexedRandomDocuments(); } assertCountAll(numDocs); - Request request = new Request("GET", "/" + index + "/_search"); - XContentBuilder body = jsonBuilder().startObject(); - body.field("size", 0); - body.startObject("aggs").startObject("check").startObject("scripted_metric"); - { + Request request = newXContentRequest(HttpMethod.GET, "/" + index + "/_search", (body, params) -> { + body.field("size", 0); + body.startObject("aggs").startObject("check").startObject("scripted_metric"); body.field("init_script", "state.timeSeries = new HashSet()"); body.field("map_script", "state.timeSeries.add(doc['dim'].value)"); body.field("combine_script", "return state.timeSeries"); - StringBuilder reduceScript = new StringBuilder(); - reduceScript.append("Set timeSeries = new TreeSet();"); - reduceScript.append("for (s in states) {"); - reduceScript.append(" for (ts in s) {"); - reduceScript.append(" boolean newTs = timeSeries.add(ts);"); - reduceScript.append(" if (false == newTs) {"); - reduceScript.append(" throw new IllegalArgumentException(ts + ' appeared in two shards');"); - reduceScript.append(" }"); - reduceScript.append(" }"); - reduceScript.append("}"); - reduceScript.append("return timeSeries;"); - body.field("reduce_script", reduceScript.toString()); - } - body.endObject().endObject().endObject(); - body.endObject(); - request.setJsonEntity(Strings.toString(body)); + body.field("reduce_script", """ + Set timeSeries = new TreeSet(); + for (s in states) { + for (ts in s) { + boolean newTs = timeSeries.add(ts); + if (false == newTs) { + throw new IllegalArgumentException(ts + ' appeared in two shards'); + } + } + } + return timeSeries;"""); + body.endObject().endObject().endObject(); + return body; + }); Map response = entityAsMap(client().performRequest(request)); assertMap( response, @@ -326,8 +317,11 @@ public void testNewReplicasTimeSeriesMode() throws Exception { final int numReplicas = 1; final long startTime = System.currentTimeMillis(); logger.debug("--> creating [{}] replicas for index [{}]", numReplicas, index); - Request setNumberOfReplicas = new Request("PUT", "/" + index + "/_settings"); - setNumberOfReplicas.setJsonEntity("{ \"index\": { \"number_of_replicas\" : " + numReplicas + " }}"); + Request setNumberOfReplicas = newXContentRequest( + HttpMethod.PUT, + "/" + index + "/_settings", + (builder, params) -> builder.startObject("index").field("number_of_replicas", numReplicas).endObject() + ); client().performRequest(setNumberOfReplicas); ensureGreenLongWait(index); @@ -350,9 +344,7 @@ public void testNewReplicasTimeSeriesMode() throws Exception { } private int createTimeSeriesModeIndex(int replicas) throws IOException { - XContentBuilder mappingsAndSettings = jsonBuilder(); - mappingsAndSettings.startObject(); - { + final var createIndex = newXContentRequest(HttpMethod.PUT, "/" + index, (mappingsAndSettings, params) -> { mappingsAndSettings.startObject("settings"); mappingsAndSettings.field("number_of_shards", 1); mappingsAndSettings.field("number_of_replicas", replicas); @@ -361,8 +353,7 @@ private int createTimeSeriesModeIndex(int replicas) throws IOException { mappingsAndSettings.field("time_series.start_time", 1L); mappingsAndSettings.field("time_series.end_time", DateUtils.MAX_MILLIS_BEFORE_9999 - 1); mappingsAndSettings.endObject(); - } - { + mappingsAndSettings.startObject("mappings"); mappingsAndSettings.startObject("properties"); { @@ -371,11 +362,8 @@ private int createTimeSeriesModeIndex(int replicas) throws IOException { } mappingsAndSettings.endObject(); mappingsAndSettings.endObject(); - } - mappingsAndSettings.endObject(); - - Request createIndex = new Request("PUT", "/" + index); - createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); + return mappingsAndSettings; + }); client().performRequest(createIndex); int numDocs = randomIntBetween(2000, 3000); @@ -398,19 +386,15 @@ private int createTimeSeriesModeIndex(int replicas) throws IOException { public void testClusterState() throws Exception { if (isRunningAgainstOldCluster()) { - XContentBuilder mappingsAndSettings = jsonBuilder(); - mappingsAndSettings.startObject(); - mappingsAndSettings.field("index_patterns", index); - mappingsAndSettings.field("order", "1000"); - { + final Request createTemplate = newXContentRequest(HttpMethod.PUT, "/_template/template_1", (mappingsAndSettings, params) -> { + mappingsAndSettings.field("index_patterns", index); + mappingsAndSettings.field("order", "1000"); mappingsAndSettings.startObject("settings"); mappingsAndSettings.field("number_of_shards", 1); mappingsAndSettings.field("number_of_replicas", 0); mappingsAndSettings.endObject(); - } - mappingsAndSettings.endObject(); - Request createTemplate = new Request("PUT", "/_template/template_1"); - createTemplate.setJsonEntity(Strings.toString(mappingsAndSettings)); + return mappingsAndSettings; + }); createTemplate.setOptions(expectWarnings(RestPutIndexTemplateAction.DEPRECATION_WARNING)); client().performRequest(createTemplate); client().performRequest(new Request("PUT", "/" + index)); @@ -455,9 +439,7 @@ public void testShrink() throws IOException { String shrunkenIndex = index + "_shrunk"; int numDocs; if (isRunningAgainstOldCluster()) { - XContentBuilder mappingsAndSettings = jsonBuilder(); - mappingsAndSettings.startObject(); - { + final var createIndex = newXContentRequest(HttpMethod.PUT, "/" + index, (mappingsAndSettings, params) -> { mappingsAndSettings.startObject("mappings"); { mappingsAndSettings.startObject("properties"); @@ -477,11 +459,8 @@ public void testShrink() throws IOException { mappingsAndSettings.field("index.number_of_shards", 5); } mappingsAndSettings.endObject(); - } - mappingsAndSettings.endObject(); - - Request createIndex = new Request("PUT", "/" + index); - createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); + return mappingsAndSettings; + }); client().performRequest(createIndex); numDocs = randomIntBetween(512, 1024); @@ -495,14 +474,21 @@ public void testShrink() throws IOException { ensureGreen(index); // wait for source index to be available on both nodes before starting shrink - Request updateSettingsRequest = new Request("PUT", "/" + index + "/_settings"); - updateSettingsRequest.setJsonEntity("{\"settings\": {\"index.blocks.write\": true}}"); - client().performRequest(updateSettingsRequest); - - Request shrinkIndexRequest = new Request("PUT", "/" + index + "/_shrink/" + shrunkenIndex); + client().performRequest( + newXContentRequest( + HttpMethod.PUT, + "/" + index + "/_settings", + (builder, params) -> builder.startObject("settings").field("index.blocks.write", true).endObject() + ) + ); - shrinkIndexRequest.setJsonEntity("{\"settings\": {\"index.number_of_shards\": 1}}"); - client().performRequest(shrinkIndexRequest); + client().performRequest( + newXContentRequest( + HttpMethod.PUT, + "/" + index + "/_shrink/" + shrunkenIndex, + (builder, params) -> builder.startObject("settings").field("index.number_of_shards", 1).endObject() + ) + ); refreshAllIndices(); } else { @@ -532,9 +518,7 @@ public void testShrinkAfterUpgrade() throws IOException { String shrunkenIndex = index + "_shrunk"; int numDocs; if (isRunningAgainstOldCluster()) { - XContentBuilder mappingsAndSettings = jsonBuilder(); - mappingsAndSettings.startObject(); - { + final var createIndex = newXContentRequest(HttpMethod.PUT, "/" + index, (mappingsAndSettings, params) -> { mappingsAndSettings.startObject("mappings"); { mappingsAndSettings.startObject("properties"); @@ -552,11 +536,8 @@ public void testShrinkAfterUpgrade() throws IOException { mappingsAndSettings.startObject("settings"); mappingsAndSettings.field("index.number_of_shards", 5); mappingsAndSettings.endObject(); - } - mappingsAndSettings.endObject(); - - Request createIndex = new Request("PUT", "/" + index); - createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); + return mappingsAndSettings; + }); client().performRequest(createIndex); numDocs = randomIntBetween(512, 1024); @@ -570,13 +551,21 @@ public void testShrinkAfterUpgrade() throws IOException { } else { ensureGreen(index); // wait for source index to be available on both nodes before starting shrink - Request updateSettingsRequest = new Request("PUT", "/" + index + "/_settings"); - updateSettingsRequest.setJsonEntity("{\"settings\": {\"index.blocks.write\": true}}"); - client().performRequest(updateSettingsRequest); + client().performRequest( + newXContentRequest( + HttpMethod.PUT, + "/" + index + "/_settings", + (builder, params) -> builder.startObject("settings").field("index.blocks.write", true).endObject() + ) + ); - Request shrinkIndexRequest = new Request("PUT", "/" + index + "/_shrink/" + shrunkenIndex); - shrinkIndexRequest.setJsonEntity("{\"settings\": {\"index.number_of_shards\": 1}}"); - client().performRequest(shrinkIndexRequest); + client().performRequest( + newXContentRequest( + HttpMethod.PUT, + "/" + index + "/_shrink/" + shrunkenIndex, + (builder, params) -> builder.startObject("settings").field("index.number_of_shards", 1).endObject() + ) + ); numDocs = countOfIndexedRandomDocuments(); } @@ -618,14 +607,13 @@ public void testShrinkAfterUpgrade() throws IOException { */ public void testRollover() throws IOException { if (isRunningAgainstOldCluster()) { - Request createIndex = new Request("PUT", "/" + index + "-000001"); - createIndex.setJsonEntity(Strings.format(""" - { - "aliases": { - "%s_write": {} - } - }""", index)); - client().performRequest(createIndex); + client().performRequest( + newXContentRequest( + HttpMethod.PUT, + "/" + index + "-000001", + (builder, params) -> builder.startObject("aliases").startObject(index + "_write").endObject().endObject() + ) + ); } int bulkCount = 10; @@ -641,10 +629,13 @@ public void testRollover() throws IOException { assertThat(EntityUtils.toString(client().performRequest(bulkRequest).getEntity()), containsString("\"errors\":false")); if (isRunningAgainstOldCluster()) { - Request rolloverRequest = new Request("POST", "/" + index + "_write/_rollover"); - rolloverRequest.setJsonEntity(""" - { "conditions": { "max_docs": 5 }}"""); - client().performRequest(rolloverRequest); + client().performRequest( + newXContentRequest( + HttpMethod.POST, + "/" + index + "_write/_rollover", + (builder, params) -> builder.startObject("conditions").field("max_docs", 5).endObject() + ) + ); assertThat( EntityUtils.toString(client().performRequest(new Request("GET", "/_cat/indices?v")).getEntity()), @@ -677,30 +668,53 @@ void assertBasicSearchWorks(int count) throws IOException { logger.info("--> testing basic search with sort"); { - Request searchRequest = new Request("GET", "/" + index + "/_search"); - searchRequest.setJsonEntity(""" - { "sort": [{ "int" : "asc" }]}"""); - Map response = entityAsMap(client().performRequest(searchRequest)); + Map response = entityAsMap( + client().performRequest( + newXContentRequest( + HttpMethod.GET, + "/" + index + "/_search", + (builder, params) -> builder.startArray("sort").startObject().field("int", "asc").endObject().endArray() + ) + ) + ); assertNoFailures(response); assertTotalHits(count, response); } logger.info("--> testing exists filter"); { - Request searchRequest = new Request("GET", "/" + index + "/_search"); - searchRequest.setJsonEntity(""" - { "query": { "exists" : {"field": "string"} }}"""); - Map response = entityAsMap(client().performRequest(searchRequest)); + Map response = entityAsMap( + client().performRequest( + newXContentRequest( + HttpMethod.GET, + "/" + index + "/_search", + (builder, params) -> builder.startObject("query") + .startObject("exists") + .field("field", "string") + .endObject() + .endObject() + ) + ) + ); assertNoFailures(response); assertTotalHits(count, response); } logger.info("--> testing field with dots in the name"); { - Request searchRequest = new Request("GET", "/" + index + "/_search"); - searchRequest.setJsonEntity(""" - { "query": { "exists" : {"field": "field.with.dots"} }}"""); - Map response = entityAsMap(client().performRequest(searchRequest)); + Map response = entityAsMap( + client().performRequest( + newXContentRequest( + HttpMethod.GET, + "/" + index + "/_search", + (builder, params) -> builder.startObject("query") + .startObject("exists") + .field("field", "field.with.dots") + .endObject() + .endObject() + ) + ) + ); assertNoFailures(response); assertTotalHits(count, response); } @@ -719,14 +733,23 @@ void assertAllSearchWorks(int count) throws IOException { assertNotNull(stringValue); String id = (String) bestHit.get("_id"); - Request explainRequest = new Request("GET", "/" + index + "/_explain/" + id); - explainRequest.setJsonEntity("{ \"query\": { \"match_all\" : {} }}"); - String explanation = toStr(client().performRequest(explainRequest)); + String explanation = toStr( + client().performRequest( + newXContentRequest( + HttpMethod.GET, + "/" + index + "/_explain/" + id, + (builder, params) -> builder.startObject("query").startObject("match_all").endObject().endObject() + ) + ) + ); assertFalse("Could not find payload boost in explanation\n" + explanation, explanation.contains("payloadBoost")); // Make sure the query can run on the whole index - Request searchRequest = new Request("GET", "/" + index + "/_search"); - searchRequest.setEntity(explainRequest.getEntity()); + Request searchRequest = newXContentRequest( + HttpMethod.GET, + "/" + index + "/_search", + (builder, params) -> builder.startObject("query").startObject("match_all").endObject().endObject() + ); searchRequest.addParameter("explain", "true"); Map matchAllResponse = entityAsMap(client().performRequest(searchRequest)); assertNoFailures(matchAllResponse); @@ -735,19 +758,22 @@ void assertAllSearchWorks(int count) throws IOException { void assertBasicAggregationWorks() throws IOException { // histogram on a long - Request longHistogramRequest = new Request("GET", "/" + index + "/_search"); - longHistogramRequest.setJsonEntity(""" - { - "aggs": { - "histo": { - "histogram": { - "field": "int", - "interval": 10 - } - } - } - }"""); - Map longHistogram = entityAsMap(client().performRequest(longHistogramRequest)); + Map longHistogram = entityAsMap( + client().performRequest( + newXContentRequest( + HttpMethod.GET, + "/" + index + "/_search", + (builder, params) -> builder.startObject("aggs") + .startObject("histo") + .startObject("histogram") + .field("field", "int") + .field("interval", 10) + .endObject() + .endObject() + .endObject() + ) + ) + ); assertNoFailures(longHistogram); List histoBuckets = (List) XContentMapValues.extractValue("aggregations.histo.buckets", longHistogram); int histoCount = 0; @@ -758,18 +784,21 @@ void assertBasicAggregationWorks() throws IOException { assertTotalHits(histoCount, longHistogram); // terms on a boolean - Request boolTermsRequest = new Request("GET", "/" + index + "/_search"); - boolTermsRequest.setJsonEntity(""" - { - "aggs": { - "bool_terms": { - "terms": { - "field": "bool" - } - } - } - }"""); - Map boolTerms = entityAsMap(client().performRequest(boolTermsRequest)); + Map boolTerms = entityAsMap( + client().performRequest( + newXContentRequest( + HttpMethod.GET, + "/" + index + "/_search", + (builder, params) -> builder.startObject("aggs") + .startObject("bool_terms") + .startObject("terms") + .field("field", "bool") + .endObject() + .endObject() + .endObject() + ) + ) + ); List termsBuckets = (List) XContentMapValues.extractValue("aggregations.bool_terms.buckets", boolTerms); int termsCount = 0; for (Object entry : termsBuckets) { @@ -780,22 +809,33 @@ void assertBasicAggregationWorks() throws IOException { } void assertRealtimeGetWorks() throws IOException { - Request disableAutoRefresh = new Request("PUT", "/" + index + "/_settings"); - disableAutoRefresh.setJsonEntity(""" - { "index": { "refresh_interval" : -1 }}"""); - client().performRequest(disableAutoRefresh); - - Request searchRequest = new Request("GET", "/" + index + "/_search"); - searchRequest.setJsonEntity(""" - { "query": { "match_all" : {} }}"""); - Map searchResponse = entityAsMap(client().performRequest(searchRequest)); + client().performRequest( + newXContentRequest( + HttpMethod.PUT, + "/" + index + "/_settings", + (builder, params) -> builder.startObject("index").field("refresh_interval", -1).endObject() + ) + ); + + Map searchResponse = entityAsMap( + client().performRequest( + newXContentRequest( + HttpMethod.GET, + "/" + index + "/_search", + (builder, params) -> builder.startObject("query").startObject("match_all").endObject().endObject() + ) + ) + ); Map hit = (Map) ((List) (XContentMapValues.extractValue("hits.hits", searchResponse))).get(0); String docId = (String) hit.get("_id"); - Request updateRequest = new Request("POST", "/" + index + "/_update/" + docId); - updateRequest.setJsonEntity(""" - { "doc" : { "foo": "bar"}}"""); - client().performRequest(updateRequest); + client().performRequest( + newXContentRequest( + HttpMethod.POST, + "/" + index + "/_update/" + docId, + (builder, params) -> builder.startObject("doc").field("foo", "bar").endObject() + ) + ); Request getRequest = new Request("GET", "/" + index + "/_doc/" + docId); @@ -803,23 +843,29 @@ void assertRealtimeGetWorks() throws IOException { Map source = (Map) getRsp.get("_source"); assertTrue("doc does not contain 'foo' key: " + source, source.containsKey("foo")); - Request enableAutoRefresh = new Request("PUT", "/" + index + "/_settings"); - enableAutoRefresh.setJsonEntity(""" - { "index": { "refresh_interval" : "1s" }}"""); - client().performRequest(enableAutoRefresh); + client().performRequest( + newXContentRequest( + HttpMethod.PUT, + "/" + index + "/_settings", + (builder, params) -> builder.startObject("index").field("refresh_interval", "1s").endObject() + ) + ); } void assertStoredBinaryFields(int count) throws Exception { - Request request = new Request("GET", "/" + index + "/_search"); - request.setJsonEntity(""" - { - "query": { - "match_all": {} - }, - "size": 100, - "stored_fields": "binary" - }"""); - Map rsp = entityAsMap(client().performRequest(request)); + final var restResponse = client().performRequest( + newXContentRequest( + HttpMethod.GET, + "/" + index + "/_search", + (builder, params) -> builder.startObject("query") + .startObject("match_all") + .endObject() + .endObject() + .field("size", 100) + .field("stored_fields", "binary") + ) + ); + Map rsp = entityAsMap(restResponse); assertTotalHits(count, rsp); List hits = (List) XContentMapValues.extractValue("hits.hits", rsp); @@ -828,9 +874,11 @@ void assertStoredBinaryFields(int count) throws Exception { Map hitRsp = (Map) hit; List values = (List) XContentMapValues.extractValue("fields.binary", hitRsp); assertEquals(1, values.size()); - String value = (String) values.get(0); - byte[] binaryValue = Base64.getDecoder().decode(value); - assertEquals("Unexpected string length [" + value + "]", 16, binaryValue.length); + byte[] binaryValue = switch (XContentType.fromMediaType(restResponse.getEntity().getContentType().getValue())) { + case JSON, VND_JSON -> Base64.getDecoder().decode((String) values.get(0)); + case SMILE, CBOR, YAML, VND_SMILE, VND_CBOR, VND_YAML -> (byte[]) values.get(0); + }; + assertEquals("Unexpected binary length [" + Base64.getEncoder().encodeToString(binaryValue) + "]", 16, binaryValue.length); } } @@ -969,76 +1017,80 @@ public void testSnapshotRestore() throws IOException { assertTotalHits(count, countResponse); // Stick a routing attribute into to cluster settings so we can see it after the restore - Request addRoutingSettings = new Request("PUT", "/_cluster/settings"); - addRoutingSettings.setJsonEntity(Strings.format(""" - {"persistent": {"cluster.routing.allocation.exclude.test_attr": "%s"}} - """, getOldClusterVersion())); - client().performRequest(addRoutingSettings); + client().performRequest( + newXContentRequest( + HttpMethod.PUT, + "/_cluster/settings", + (builder, params) -> builder.startObject("persistent") + .field("cluster.routing.allocation.exclude.test_attr", getOldClusterVersion()) + .endObject() + ) + ); // Stick a template into the cluster so we can see it after the restore - XContentBuilder templateBuilder = JsonXContent.contentBuilder().startObject(); - templateBuilder.field("index_patterns", "evil_*"); // Don't confuse other tests by applying the template - templateBuilder.startObject("settings"); - { - templateBuilder.field("number_of_shards", 1); - } - templateBuilder.endObject(); - templateBuilder.startObject("mappings"); - { + Request createTemplateRequest = newXContentRequest(HttpMethod.PUT, "/_template/test_template", (templateBuilder, params) -> { + templateBuilder.field("index_patterns", "evil_*"); // Don't confuse other tests by applying the template + templateBuilder.startObject("settings"); + { + templateBuilder.field("number_of_shards", 1); + } + templateBuilder.endObject(); + templateBuilder.startObject("mappings"); { - templateBuilder.startObject("_source"); { - templateBuilder.field("enabled", true); + templateBuilder.startObject("_source"); + { + templateBuilder.field("enabled", true); + } + templateBuilder.endObject(); } - templateBuilder.endObject(); } - } - templateBuilder.endObject(); - templateBuilder.startObject("aliases"); - { - templateBuilder.startObject("alias1").endObject(); - templateBuilder.startObject("alias2"); + templateBuilder.endObject(); + templateBuilder.startObject("aliases"); { - templateBuilder.startObject("filter"); + templateBuilder.startObject("alias1").endObject(); + templateBuilder.startObject("alias2"); { - templateBuilder.startObject("term"); + templateBuilder.startObject("filter"); { - templateBuilder.field("version", isRunningAgainstOldCluster() ? getOldClusterVersion() : Build.current().version()); + templateBuilder.startObject("term"); + { + templateBuilder.field( + "version", + isRunningAgainstOldCluster() ? getOldClusterVersion() : Build.current().version() + ); + } + templateBuilder.endObject(); } templateBuilder.endObject(); } templateBuilder.endObject(); } templateBuilder.endObject(); - } - templateBuilder.endObject().endObject(); - Request createTemplateRequest = new Request("PUT", "/_template/test_template"); - createTemplateRequest.setJsonEntity(Strings.toString(templateBuilder)); + return templateBuilder; + }); createTemplateRequest.setOptions(expectWarnings(RestPutIndexTemplateAction.DEPRECATION_WARNING)); - client().performRequest(createTemplateRequest); if (isRunningAgainstOldCluster()) { // Create the repo - XContentBuilder repoConfig = JsonXContent.contentBuilder().startObject(); - { + client().performRequest(newXContentRequest(HttpMethod.PUT, "/_snapshot/repo", (repoConfig, params) -> { repoConfig.field("type", "fs"); repoConfig.startObject("settings"); { repoConfig.field("compress", randomBoolean()); repoConfig.field("location", repoDirectory.getRoot().getPath()); } - repoConfig.endObject(); - } - repoConfig.endObject(); - Request createRepoRequest = new Request("PUT", "/_snapshot/repo"); - createRepoRequest.setJsonEntity(Strings.toString(repoConfig)); - client().performRequest(createRepoRequest); + return repoConfig.endObject(); + })); } - Request createSnapshot = new Request("PUT", "/_snapshot/repo/" + (isRunningAgainstOldCluster() ? "old_snap" : "new_snap")); + Request createSnapshot = newXContentRequest( + HttpMethod.PUT, + "/_snapshot/repo/" + (isRunningAgainstOldCluster() ? "old_snap" : "new_snap"), + (builder, params) -> builder.field("indices", index) + ); createSnapshot.addParameter("wait_for_completion", "true"); - createSnapshot.setJsonEntity("{\"indices\": \"" + index + "\"}"); client().performRequest(createSnapshot); checkSnapshot("old_snap", count, getOldClusterVersion(), getOldClusterIndexVersion()); @@ -1049,18 +1101,13 @@ public void testSnapshotRestore() throws IOException { public void testHistoryUUIDIsAdded() throws Exception { if (isRunningAgainstOldCluster()) { - XContentBuilder mappingsAndSettings = jsonBuilder(); - mappingsAndSettings.startObject(); - { + client().performRequest(newXContentRequest(HttpMethod.PUT, '/' + index, (mappingsAndSettings, params) -> { mappingsAndSettings.startObject("settings"); mappingsAndSettings.field("number_of_shards", 1); mappingsAndSettings.field("number_of_replicas", 1); mappingsAndSettings.endObject(); - } - mappingsAndSettings.endObject(); - Request createIndex = new Request("PUT", "/" + index); - createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); - client().performRequest(createIndex); + return mappingsAndSettings; + })); } else { ensureGreenLongWait(index); @@ -1092,9 +1139,7 @@ public void testHistoryUUIDIsAdded() throws Exception { public void testSoftDeletes() throws Exception { if (isRunningAgainstOldCluster()) { - XContentBuilder mappingsAndSettings = jsonBuilder(); - mappingsAndSettings.startObject(); - { + client().performRequest(newXContentRequest(HttpMethod.PUT, "/" + index, (mappingsAndSettings, params) -> { mappingsAndSettings.startObject("settings"); mappingsAndSettings.field("number_of_shards", 1); mappingsAndSettings.field("number_of_replicas", 1); @@ -1102,17 +1147,13 @@ public void testSoftDeletes() throws Exception { mappingsAndSettings.field("soft_deletes.enabled", true); } mappingsAndSettings.endObject(); - } - mappingsAndSettings.endObject(); - Request createIndex = new Request("PUT", "/" + index); - createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); - client().performRequest(createIndex); + return mappingsAndSettings; + })); int numDocs = between(10, 100); for (int i = 0; i < numDocs; i++) { - String doc = Strings.toString(JsonXContent.contentBuilder().startObject().field("field", "v1").endObject()); - Request request = new Request("POST", "/" + index + "/_doc/" + i); - request.setJsonEntity(doc); - client().performRequest(request); + client().performRequest( + newXContentRequest(HttpMethod.POST, "/" + index + "/_doc/" + i, (builder, params) -> builder.field("field", "v1")) + ); refreshAllIndices(); } client().performRequest(new Request("POST", "/" + index + "/_flush")); @@ -1120,10 +1161,9 @@ public void testSoftDeletes() throws Exception { assertTotalHits(liveDocs, entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search")))); for (int i = 0; i < numDocs; i++) { if (randomBoolean()) { - String doc = Strings.toString(JsonXContent.contentBuilder().startObject().field("field", "v2").endObject()); - Request request = new Request("POST", "/" + index + "/_doc/" + i); - request.setJsonEntity(doc); - client().performRequest(request); + client().performRequest( + newXContentRequest(HttpMethod.POST, "/" + index + "/_doc/" + i, (builder, params) -> builder.field("field", "v2")) + ); } else if (randomBoolean()) { client().performRequest(new Request("DELETE", "/" + index + "/_doc/" + i)); liveDocs--; @@ -1151,9 +1191,15 @@ public void testClosedIndices() throws Exception { if (randomBoolean()) { numDocs = between(1, 100); for (int i = 0; i < numDocs; i++) { - final Request request = new Request("POST", "/" + index + "/_doc/" + i); - request.setJsonEntity(Strings.toString(JsonXContent.contentBuilder().startObject().field("field", "v1").endObject())); - assertOK(client().performRequest(request)); + assertOK( + client().performRequest( + newXContentRequest( + HttpMethod.POST, + "/" + index + "/_doc/" + i, + (builder, params) -> builder.field("field", "v1") + ) + ) + ); if (rarely()) { refreshAllIndices(); } @@ -1252,22 +1298,29 @@ private void checkSnapshot(String snapshotName, int count, String tookOnVersion, ); // Remove the routing setting and template so we can test restoring them. - Request clearRoutingFromSettings = new Request("PUT", "/_cluster/settings"); - clearRoutingFromSettings.setJsonEntity(""" - {"persistent":{"cluster.routing.allocation.exclude.test_attr": null}}"""); - client().performRequest(clearRoutingFromSettings); + client().performRequest( + newXContentRequest( + HttpMethod.PUT, + "/_cluster/settings", + (builder, params) -> builder.startObject("persistent").nullField("cluster.routing.allocation.exclude.test_attr").endObject() + ) + ); + client().performRequest(new Request("DELETE", "/_template/test_template")); // Restore - XContentBuilder restoreCommand = JsonXContent.contentBuilder().startObject(); - restoreCommand.field("include_global_state", true); - restoreCommand.field("indices", index); - restoreCommand.field("rename_pattern", index); - restoreCommand.field("rename_replacement", "restored_" + index); - restoreCommand.endObject(); - Request restoreRequest = new Request("POST", "/_snapshot/repo/" + snapshotName + "/_restore"); + Request restoreRequest = newXContentRequest( + HttpMethod.POST, + "/_snapshot/repo/" + snapshotName + "/_restore", + (restoreCommand, params) -> { + restoreCommand.field("include_global_state", true); + restoreCommand.field("indices", index); + restoreCommand.field("rename_pattern", index); + restoreCommand.field("rename_replacement", "restored_" + index); + return restoreCommand; + } + ); restoreRequest.addParameter("wait_for_completion", "true"); - restoreRequest.setJsonEntity(Strings.toString(restoreCommand)); client().performRequest(restoreRequest); // Make sure search finds all documents @@ -1361,9 +1414,8 @@ private void indexRandomDocuments( } private void indexDocument(String id) throws IOException { - final Request indexRequest = new Request("POST", "/" + index + "/" + "_doc/" + id); - indexRequest.setJsonEntity(Strings.toString(JsonXContent.contentBuilder().startObject().field("f", "v").endObject())); - assertOK(client().performRequest(indexRequest)); + final var req = newXContentRequest(HttpMethod.POST, "/" + index + "/" + "_doc/" + id, (builder, params) -> builder.field("f", "v")); + assertOK(client().performRequest(req)); } private int countOfIndexedRandomDocuments() throws IOException { @@ -1371,13 +1423,9 @@ private int countOfIndexedRandomDocuments() throws IOException { } private void saveInfoDocument(String id, String value) throws IOException { - XContentBuilder infoDoc = JsonXContent.contentBuilder().startObject(); - infoDoc.field("value", value); - infoDoc.endObject(); // Only create the first version so we know how many documents are created when the index is first created - Request request = new Request("PUT", "/info/_doc/" + id); + Request request = newXContentRequest(HttpMethod.PUT, "/info/_doc/" + id, (builder, params) -> builder.field("value", value)); request.addParameter("op_type", "create"); - request.setJsonEntity(Strings.toString(infoDoc)); client().performRequest(request); } @@ -1422,19 +1470,13 @@ protected void ensureGreenLongWait(String indexName) throws IOException { public void testPeerRecoveryRetentionLeases() throws Exception { if (isRunningAgainstOldCluster()) { - XContentBuilder settings = jsonBuilder(); - settings.startObject(); - { + client().performRequest(newXContentRequest(HttpMethod.PUT, "/" + index, (settings, params) -> { settings.startObject("settings"); settings.field("number_of_shards", between(1, 5)); settings.field("number_of_replicas", between(0, 1)); settings.endObject(); - } - settings.endObject(); - - Request createIndex = new Request("PUT", "/" + index); - createIndex.setJsonEntity(Strings.toString(settings)); - client().performRequest(createIndex); + return settings; + })); } ensureGreen(index); ensurePeerRecoveryRetentionLeasesRenewedAndSynced(index); @@ -1536,18 +1578,19 @@ public void testResize() throws Exception { flush(index, randomBoolean()); } } - Request updateSettingsRequest = new Request("PUT", "/" + index + "/_settings"); - updateSettingsRequest.setJsonEntity("{\"settings\": {\"index.blocks.write\": true}}"); - client().performRequest(updateSettingsRequest); + final ToXContent settings0 = (builder, params) -> builder.startObject("settings").field("index.blocks.write", true).endObject(); + client().performRequest(newXContentRequest(HttpMethod.PUT, "/" + index + "/_settings", settings0)); { final String target = index + "_shrunken"; - Request shrinkRequest = new Request("PUT", "/" + index + "/_shrink/" + target); Settings.Builder settings = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1); if (randomBoolean()) { settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true); } - shrinkRequest.setJsonEntity("{\"settings\":" + Strings.toString(settings.build()) + "}"); - client().performRequest(shrinkRequest); + client().performRequest(newXContentRequest(HttpMethod.PUT, "/" + index + "/_shrink/" + target, (builder, params) -> { + builder.startObject("settings"); + settings.build().toXContent(builder, params); + return builder.endObject(); + })); ensureGreenLongWait(target); assertNumHits(target, numDocs + moreDocs, 1); } @@ -1557,9 +1600,11 @@ public void testResize() throws Exception { if (randomBoolean()) { settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true); } - Request splitRequest = new Request("PUT", "/" + index + "/_split/" + target); - splitRequest.setJsonEntity("{\"settings\":" + Strings.toString(settings.build()) + "}"); - client().performRequest(splitRequest); + client().performRequest(newXContentRequest(HttpMethod.PUT, "/" + index + "/_split/" + target, (builder, params) -> { + builder.startObject("settings"); + settings.build().toXContent(builder, params); + return builder.endObject(); + })); ensureGreenLongWait(target); assertNumHits(target, numDocs + moreDocs, 6); } @@ -1584,9 +1629,13 @@ public void testSystemIndexMetadataIsUpgraded() throws Exception { + "access to system indices will be prevented by default"; if (isRunningAgainstOldCluster()) { // create index - Request createTestIndex = new Request("PUT", "/test_index_old"); - createTestIndex.setJsonEntity("{\"settings\": {\"index.number_of_replicas\": 0}}"); - client().performRequest(createTestIndex); + client().performRequest( + newXContentRequest( + HttpMethod.PUT, + "/test_index_old", + (builder, params) -> builder.startObject("settings").field("index.number_of_replicas", 0).endObject() + ) + ); Request bulk = new Request("POST", "/_bulk"); bulk.addParameter("refresh", "true"); @@ -1597,16 +1646,16 @@ public void testSystemIndexMetadataIsUpgraded() throws Exception { client().performRequest(bulk); // start a async reindex job - Request reindex = new Request("POST", "/_reindex"); - reindex.setJsonEntity(""" - { - "source":{ - "index":"test_index_old" - }, - "dest":{ - "index":"test_index_reindex" - } - }"""); + Request reindex = newXContentRequest( + HttpMethod.POST, + "/_reindex", + (builder, params) -> builder.startObject("source") + .field("index", "test_index_old") + .endObject() + .startObject("dest") + .field("index", "test_index_reindex") + .endObject() + ); reindex.addParameter("wait_for_completion", "false"); Map response = entityAsMap(client().performRequest(reindex)); String taskId = (String) response.get("task"); @@ -1640,14 +1689,18 @@ public void testSystemIndexMetadataIsUpgraded() throws Exception { // upgraded properly. If we're already on 8.x, skip this part of the test. if (clusterHasFeature(RestTestLegacyFeatures.SYSTEM_INDICES_REST_ACCESS_ENFORCED) == false) { // Create an alias to make sure it gets upgraded properly - Request putAliasRequest = new Request("POST", "/_aliases"); - putAliasRequest.setJsonEntity(""" - { - "actions": [ - {"add": {"index": ".tasks", "alias": "test-system-alias"}}, - {"add": {"index": "test_index_reindex", "alias": "test-system-alias"}} - ] - }"""); + Request putAliasRequest = newXContentRequest(HttpMethod.POST, "/_aliases", (builder, params) -> { + builder.startArray("actions"); + for (var index : List.of(".tasks", "test_index_reindex")) { + builder.startObject() + .startObject("add") + .field("index", index) + .field("alias", "test-system-alias") + .endObject() + .endObject(); + } + return builder.endArray(); + }); putAliasRequest.setOptions(expectVersionSpecificWarnings(v -> { v.current(systemIndexWarning); v.compatible(systemIndexWarning); @@ -1711,41 +1764,37 @@ public void testEnableSoftDeletesOnRestore() throws Exception { i -> jsonBuilder().startObject().field("field", "value").endObject() ); // create repo - XContentBuilder repoConfig = JsonXContent.contentBuilder().startObject(); - { + client().performRequest(newXContentRequest(HttpMethod.PUT, "/_snapshot/repo", (repoConfig, params) -> { repoConfig.field("type", "fs"); repoConfig.startObject("settings"); - { - repoConfig.field("compress", randomBoolean()); - repoConfig.field("location", repoDirectory.getRoot().getPath()); - } + repoConfig.field("compress", randomBoolean()); + repoConfig.field("location", repoDirectory.getRoot().getPath()); repoConfig.endObject(); - } - repoConfig.endObject(); - Request createRepoRequest = new Request("PUT", "/_snapshot/repo"); - createRepoRequest.setJsonEntity(Strings.toString(repoConfig)); - client().performRequest(createRepoRequest); + return repoConfig; + })); // create snapshot - Request createSnapshot = new Request("PUT", "/_snapshot/repo/" + snapshot); + Request createSnapshot = newXContentRequest( + HttpMethod.PUT, + "/_snapshot/repo/" + snapshot, + (builder, params) -> builder.field("indices", index) + ); createSnapshot.addParameter("wait_for_completion", "true"); - createSnapshot.setJsonEntity("{\"indices\": \"" + index + "\"}"); client().performRequest(createSnapshot); } else { String restoredIndex = "restored-" + index; // Restore - XContentBuilder restoreCommand = JsonXContent.contentBuilder().startObject(); - restoreCommand.field("indices", index); - restoreCommand.field("rename_pattern", index); - restoreCommand.field("rename_replacement", restoredIndex); - restoreCommand.startObject("index_settings"); - { - restoreCommand.field("index.soft_deletes.enabled", true); - } - restoreCommand.endObject(); - restoreCommand.endObject(); - Request restoreRequest = new Request("POST", "/_snapshot/repo/" + snapshot + "/_restore"); + Request restoreRequest = newXContentRequest( + HttpMethod.POST, + "/_snapshot/repo/" + snapshot + "/_restore", + (restoreCommand, params) -> { + restoreCommand.field("indices", index); + restoreCommand.field("rename_pattern", index); + restoreCommand.field("rename_replacement", restoredIndex); + restoreCommand.startObject("index_settings").field("index.soft_deletes.enabled", true).endObject(); + return restoreCommand; + } + ); restoreRequest.addParameter("wait_for_completion", "true"); - restoreRequest.setJsonEntity(Strings.toString(restoreCommand)); client().performRequest(restoreRequest); ensureGreen(restoredIndex); int numDocs = countOfIndexedRandomDocuments(); @@ -1768,40 +1817,36 @@ public void testForbidDisableSoftDeletesOnRestore() throws Exception { i -> jsonBuilder().startObject().field("field", "value").endObject() ); // create repo - XContentBuilder repoConfig = JsonXContent.contentBuilder().startObject(); - { + client().performRequest(newXContentRequest(HttpMethod.PUT, "/_snapshot/repo", (repoConfig, params) -> { repoConfig.field("type", "fs"); repoConfig.startObject("settings"); - { - repoConfig.field("compress", randomBoolean()); - repoConfig.field("location", repoDirectory.getRoot().getPath()); - } + repoConfig.field("compress", randomBoolean()); + repoConfig.field("location", repoDirectory.getRoot().getPath()); repoConfig.endObject(); - } - repoConfig.endObject(); - Request createRepoRequest = new Request("PUT", "/_snapshot/repo"); - createRepoRequest.setJsonEntity(Strings.toString(repoConfig)); - client().performRequest(createRepoRequest); + return repoConfig; + })); // create snapshot - Request createSnapshot = new Request("PUT", "/_snapshot/repo/" + snapshot); + Request createSnapshot = newXContentRequest( + HttpMethod.PUT, + "/_snapshot/repo/" + snapshot, + (builder, params) -> builder.field("indices", index) + ); createSnapshot.addParameter("wait_for_completion", "true"); - createSnapshot.setJsonEntity("{\"indices\": \"" + index + "\"}"); client().performRequest(createSnapshot); } else { // Restore - XContentBuilder restoreCommand = JsonXContent.contentBuilder().startObject(); - restoreCommand.field("indices", index); - restoreCommand.field("rename_pattern", index); - restoreCommand.field("rename_replacement", "restored-" + index); - restoreCommand.startObject("index_settings"); - { - restoreCommand.field("index.soft_deletes.enabled", false); - } - restoreCommand.endObject(); - restoreCommand.endObject(); - Request restoreRequest = new Request("POST", "/_snapshot/repo/" + snapshot + "/_restore"); + Request restoreRequest = newXContentRequest( + HttpMethod.POST, + "/_snapshot/repo/" + snapshot + "/_restore", + (restoreCommand, params) -> { + restoreCommand.field("indices", index); + restoreCommand.field("rename_pattern", index); + restoreCommand.field("rename_replacement", "restored-" + index); + restoreCommand.startObject("index_settings").field("index.soft_deletes.enabled", false).endObject(); + return restoreCommand; + } + ); restoreRequest.addParameter("wait_for_completion", "true"); - restoreRequest.setJsonEntity(Strings.toString(restoreCommand)); final ResponseException error = expectThrows(ResponseException.class, () -> client().performRequest(restoreRequest)); assertThat(error.getMessage(), containsString("cannot disable setting [index.soft_deletes.enabled] on restore")); } @@ -1818,21 +1863,16 @@ public void testTransportCompressionSetting() throws IOException { .orElse(false); assumeTrue("the old transport.compress setting existed before 7.14", originalClusterCompressSettingIsBoolean); if (isRunningAgainstOldCluster()) { - final Request putSettingsRequest = new Request("PUT", "/_cluster/settings"); - try (XContentBuilder builder = jsonBuilder()) { - builder.startObject(); - { - builder.startObject("persistent"); - { - builder.field("cluster.remote.foo.seeds", Collections.singletonList("localhost:9200")); - builder.field("cluster.remote.foo.transport.compress", "true"); - } - builder.endObject(); - } - builder.endObject(); - putSettingsRequest.setJsonEntity(Strings.toString(builder)); - } - client().performRequest(putSettingsRequest); + client().performRequest( + newXContentRequest( + HttpMethod.PUT, + "/_cluster/settings", + (builder, params) -> builder.startObject("persistent") + .field("cluster.remote.foo.seeds", Collections.singletonList("localhost:9200")) + .field("cluster.remote.foo.transport.compress", "true") + .endObject() + ) + ); } else { final Request getSettingsRequest = new Request("GET", "/_cluster/settings"); final Response getSettingsResponse = client().performRequest(getSettingsRequest); diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java index d75519002f92e..b2df6b0fa01a3 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java @@ -39,6 +39,7 @@ import org.elasticsearch.index.query.functionscore.RandomScoreFunctionBuilder; import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.FeatureFlag; import org.elasticsearch.test.cluster.local.LocalClusterConfigProvider; import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.xcontent.XContentBuilder; @@ -76,6 +77,7 @@ public class QueryBuilderBWCIT extends ParameterizedFullClusterRestartTestCase { .version(getOldClusterTestVersion()) .nodes(2) .setting("xpack.security.enabled", "false") + .feature(FeatureFlag.FAILURE_STORE_ENABLED) .apply(() -> clusterConfig) .build(); diff --git a/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/MultiClusterSearchYamlTestSuiteIT.java b/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/MultiClusterSearchYamlTestSuiteIT.java index 0f92a19098026..0ebd36ec50f1a 100644 --- a/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/MultiClusterSearchYamlTestSuiteIT.java +++ b/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/MultiClusterSearchYamlTestSuiteIT.java @@ -13,47 +13,61 @@ import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; import org.apache.lucene.tests.util.TimeUnits; -import org.elasticsearch.Version; +import org.elasticsearch.test.rest.TestFeatureService; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ClientYamlTestClient; import org.elasticsearch.test.rest.yaml.ClientYamlTestExecutionContext; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; import org.junit.BeforeClass; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + @TimeoutSuite(millis = 5 * TimeUnits.MINUTE) // to account for slow as hell VMs public class MultiClusterSearchYamlTestSuiteIT extends ESClientYamlSuiteTestCase { - private static Version remoteEsVersion = null; + private static String remoteEsVersion = null; @BeforeClass - public static void determineRemoteClusterMinimumVersion() { + public static void readRemoteClusterVersion() { String remoteClusterVersion = System.getProperty("tests.rest.remote_cluster_version"); if (remoteClusterVersion != null) { - remoteEsVersion = Version.fromString(remoteClusterVersion); + remoteEsVersion = remoteClusterVersion; } } + @Override protected ClientYamlTestExecutionContext createRestTestExecutionContext( ClientYamlTestCandidate clientYamlTestCandidate, - ClientYamlTestClient clientYamlTestClient + ClientYamlTestClient clientYamlTestClient, + final Set nodesVersions, + final TestFeatureService testFeatureService, + final Set osSet ) { - return new ClientYamlTestExecutionContext(clientYamlTestCandidate, clientYamlTestClient, randomizeContentType()) { + /* + * Since the esVersion is used to skip tests in ESClientYamlSuiteTestCase, we also take into account the + * remote cluster version here. This is used to skip tests if some feature isn't available on the remote cluster yet. + */ + final Set commonVersions; + if (remoteEsVersion == null || nodesVersions.contains(remoteEsVersion)) { + commonVersions = nodesVersions; + } else { + var versionsCopy = new HashSet<>(nodesVersions); + versionsCopy.add(remoteEsVersion); + commonVersions = Collections.unmodifiableSet(versionsCopy); + } - /** - * Since the esVersion is used to skip tests in ESClientYamlSuiteTestCase, we also take into account the - * remote cluster version here and return it if it is lower than the local client version. This is used to - * skip tests if some feature isn't available on the remote cluster yet. - */ - @Override - public Version esVersion() { - Version clientEsVersion = clientYamlTestClient.getEsVersion(); - if (remoteEsVersion == null) { - return clientEsVersion; - } else { - return remoteEsVersion.before(clientEsVersion) ? remoteEsVersion : clientEsVersion; - } - } - }; + // TODO: same for os and features. Better to do that once this test(s) have been migrated to the new ElasticsearchCluster-based + // framework. See CcsCommonYamlTestSuiteIT for example. + return new ClientYamlTestExecutionContext( + clientYamlTestCandidate, + clientYamlTestClient, + randomizeContentType(), + commonVersions, + testFeatureService, + osSet + ); } @Override diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FieldCapsIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FieldCapsIT.java index f3971d832be3e..860cd2c0e8617 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FieldCapsIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FieldCapsIT.java @@ -11,6 +11,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import org.apache.http.HttpHost; +import org.apache.lucene.tests.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.Build; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; import org.elasticsearch.client.Request; @@ -38,6 +39,7 @@ * In 8.2 we also added the ability to filter fields by type and metadata, with some post-hoc filtering applied on * the co-ordinating node if older nodes were included in the system */ +@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103473") public class FieldCapsIT extends ParameterizedRollingUpgradeTestCase { public FieldCapsIT(@Name("upgradedNodes") int upgradedNodes) { diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/BlockedSearcherRestCancellationTestCase.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/BlockedSearcherRestCancellationTestCase.java index 7806c14156fba..8803ad4af7348 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/BlockedSearcherRestCancellationTestCase.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/BlockedSearcherRestCancellationTestCase.java @@ -55,7 +55,7 @@ */ public abstract class BlockedSearcherRestCancellationTestCase extends HttpSmokeTestCase { - private static final Setting BLOCK_SEARCHER_SETTING = Setting.boolSetting( + protected static final Setting BLOCK_SEARCHER_SETTING = Setting.boolSetting( "index.block_searcher", false, Setting.Property.IndexScope diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/DanglingIndicesRestIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/DanglingIndicesRestIT.java index 70df4aaeaf5de..f7f46671e2354 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/DanglingIndicesRestIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/DanglingIndicesRestIT.java @@ -8,6 +8,8 @@ package org.elasticsearch.http; +import io.netty.handler.codec.http.HttpMethod; + import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; @@ -18,6 +20,7 @@ import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.XContentTestUtils; +import org.elasticsearch.test.rest.ESRestTestCase; import java.io.IOException; import java.util.ArrayList; @@ -213,22 +216,12 @@ private Map createIndices(String... indices) throws IOException assert indices.length > 0; for (String index : indices) { - String indexSettings = """ - { - "settings": { - "index": { - "number_of_shards": 1, - "number_of_replicas": 2, - "routing": { - "allocation": { - "total_shards_per_node": 1 - } - } - } - } - }"""; - Request request = new Request("PUT", "/" + index); - request.setJsonEntity(indexSettings); + final var request = ESRestTestCase.newXContentRequest(HttpMethod.PUT, "/" + index, (builder, params) -> { + builder.startObject("settings").startObject("index"); + builder.field("number_of_shards", 1).field("number_of_replicas", 2); + builder.startObject("routing").startObject("allocation").field("total_shards_per_node", 1).endObject().endObject(); + return builder.endObject().endObject(); + }); assertOK(getRestClient().performRequest(request)); } ensureGreen(indices); diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RolloverRestCancellationIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RolloverRestCancellationIT.java index 240d2c7d38974..055a9a29519c3 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RolloverRestCancellationIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RolloverRestCancellationIT.java @@ -9,12 +9,21 @@ package org.elasticsearch.http; import org.apache.http.client.methods.HttpPost; +import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.rollover.RolloverAction; import org.elasticsearch.client.Request; +import org.elasticsearch.common.settings.Settings; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; public class RolloverRestCancellationIT extends BlockedSearcherRestCancellationTestCase { public void testRolloverRestCancellation() throws Exception { - runTest(new Request(HttpPost.METHOD_NAME, "test/_rollover"), RolloverAction.NAME); + assertAcked( + prepareCreate("test-000001").addAlias(new Alias("test-alias").writeIndex(true)) + .setSettings(Settings.builder().put(BLOCK_SEARCHER_SETTING.getKey(), true)) + ); + ensureGreen("test-000001"); + runTest(new Request(HttpPost.METHOD_NAME, "test-alias/_rollover"), RolloverAction.NAME); } } diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/java/org/elasticsearch/ingest/IngestDocumentMustacheIT.java b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/java/org/elasticsearch/ingest/IngestDocumentMustacheIT.java index 724f5c2d51be6..f5a1839001e5c 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/java/org/elasticsearch/ingest/IngestDocumentMustacheIT.java +++ b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/java/org/elasticsearch/ingest/IngestDocumentMustacheIT.java @@ -23,10 +23,13 @@ public void testAccessMetadataViaTemplate() { Map document = new HashMap<>(); document.put("foo", "bar"); IngestDocument ingestDocument = new IngestDocument("index", "id", 1, null, null, document); - ingestDocument.setFieldValue(compile("field1"), ValueSource.wrap("1 {{foo}}", scriptService)); + ingestDocument.setFieldValue(ingestDocument.renderTemplate(compile("field1")), ValueSource.wrap("1 {{foo}}", scriptService)); assertThat(ingestDocument.getFieldValue("field1", String.class), equalTo("1 bar")); - ingestDocument.setFieldValue(compile("field1"), ValueSource.wrap("2 {{_source.foo}}", scriptService)); + ingestDocument.setFieldValue( + ingestDocument.renderTemplate(compile("field1")), + ValueSource.wrap("2 {{_source.foo}}", scriptService) + ); assertThat(ingestDocument.getFieldValue("field1", String.class), equalTo("2 bar")); } @@ -38,11 +41,14 @@ public void testAccessMapMetadataViaTemplate() { innerObject.put("qux", Collections.singletonMap("fubar", "hello qux and fubar")); document.put("foo", innerObject); IngestDocument ingestDocument = new IngestDocument("index", "id", 1, null, null, document); - ingestDocument.setFieldValue(compile("field1"), ValueSource.wrap("1 {{foo.bar}} {{foo.baz}} {{foo.qux.fubar}}", scriptService)); + ingestDocument.setFieldValue( + ingestDocument.renderTemplate(compile("field1")), + ValueSource.wrap("1 {{foo.bar}} {{foo.baz}} {{foo.qux.fubar}}", scriptService) + ); assertThat(ingestDocument.getFieldValue("field1", String.class), equalTo("1 hello bar hello baz hello qux and fubar")); ingestDocument.setFieldValue( - compile("field1"), + ingestDocument.renderTemplate(compile("field1")), ValueSource.wrap("2 {{_source.foo.bar}} {{_source.foo.baz}} {{_source.foo.qux.fubar}}", scriptService) ); assertThat(ingestDocument.getFieldValue("field1", String.class), equalTo("2 hello bar hello baz hello qux and fubar")); @@ -58,7 +64,10 @@ public void testAccessListMetadataViaTemplate() { list.add(null); document.put("list2", list); IngestDocument ingestDocument = new IngestDocument("index", "id", 1, null, null, document); - ingestDocument.setFieldValue(compile("field1"), ValueSource.wrap("1 {{list1.0}} {{list2.0}}", scriptService)); + ingestDocument.setFieldValue( + ingestDocument.renderTemplate(compile("field1")), + ValueSource.wrap("1 {{list1.0}} {{list2.0}}", scriptService) + ); assertThat(ingestDocument.getFieldValue("field1", String.class), equalTo("1 foo {field=value}")); } @@ -69,7 +78,7 @@ public void testAccessIngestMetadataViaTemplate() { document.put("_ingest", ingestMap); IngestDocument ingestDocument = new IngestDocument("index", "id", 1, null, null, document); ingestDocument.setFieldValue( - compile("ingest_timestamp"), + ingestDocument.renderTemplate(compile("ingest_timestamp")), ValueSource.wrap("{{_ingest.timestamp}} and {{_source._ingest.timestamp}}", scriptService) ); assertThat( diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/java/org/elasticsearch/ingest/ValueSourceMustacheIT.java b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/java/org/elasticsearch/ingest/ValueSourceMustacheIT.java index c93ef30731960..df4c5827cebc1 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/java/org/elasticsearch/ingest/ValueSourceMustacheIT.java +++ b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/java/org/elasticsearch/ingest/ValueSourceMustacheIT.java @@ -57,9 +57,9 @@ public void testValueSourceWithTemplates() { public void testAccessSourceViaTemplate() { IngestDocument ingestDocument = new IngestDocument("marvel", "id", 1, null, null, new HashMap<>()); assertThat(ingestDocument.hasField("marvel"), is(false)); - ingestDocument.setFieldValue(compile("{{_index}}"), ValueSource.wrap("{{_index}}", scriptService)); + ingestDocument.setFieldValue(ingestDocument.renderTemplate(compile("{{_index}}")), ValueSource.wrap("{{_index}}", scriptService)); assertThat(ingestDocument.getFieldValue("marvel", String.class), equalTo("marvel")); - ingestDocument.removeField(compile("{{marvel}}")); + ingestDocument.removeField(ingestDocument.renderTemplate(compile("{{marvel}}"))); assertThat(ingestDocument.hasField("index"), is(false)); } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.check_in.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.check_in.json index e95621d30fc16..36535109df8e7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.check_in.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.check_in.json @@ -1,7 +1,7 @@ { "connector.check_in": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/check-in-connector-api.html", "description": "Updates the last_seen timestamp in the connector document." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.delete.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.delete.json index dcb3a4f83c287..88c4e85dac2ef 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.delete.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.delete.json @@ -1,7 +1,7 @@ { "connector.delete": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-connector-api.html", "description": "Deletes a connector." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.get.json index bcddef8cb5cb9..2645df28c5d1e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.get.json @@ -1,7 +1,7 @@ { "connector.get": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/get-connector-api.html", "description": "Returns the details about a connector." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.last_sync.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.last_sync.json index 7bc1504253070..f6d93555b72ed 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.last_sync.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.last_sync.json @@ -1,7 +1,7 @@ { "connector.last_sync": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-last-sync-api.html", "description": "Updates the stats of last sync in the connector document." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.list.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.list.json index 852a5fbd85998..bc8f12a933b1e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.list.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.list.json @@ -1,7 +1,7 @@ { "connector.list": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/list-connector-api.html", "description": "Lists all connectors." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.post.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.post.json index aadb59e99af7a..edc865012876e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.post.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.post.json @@ -1,7 +1,7 @@ { "connector.post": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/create-connector-api.html", "description": "Creates a connector." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.put.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.put.json index 0ab5c18671040..af733de6aa06c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.put.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.put.json @@ -1,7 +1,7 @@ { "connector.put": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/create-connector-api.html", "description": "Creates or updates a connector." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_configuration.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_configuration.json index a82f9e0f29225..1ececd7ea95f7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_configuration.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_configuration.json @@ -1,7 +1,7 @@ { "connector.update_configuration": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-configuration-api.html", "description": "Updates the connector configuration." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_error.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_error.json index 51d5a1b25973b..150f71ad033ac 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_error.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_error.json @@ -1,7 +1,7 @@ { "connector.update_error": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-error-api.html", "description": "Updates the error field in the connector document." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_filtering.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_filtering.json index b9815fc111c06..c2a9bf0720746 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_filtering.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_filtering.json @@ -1,7 +1,7 @@ { "connector.update_filtering": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-filtering-api.html", "description": "Updates the filtering field in the connector document." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_name.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_name.json index dabac5599932b..a7ca1a9730ab9 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_name.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_name.json @@ -1,7 +1,7 @@ { "connector.update_name": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-name-description-api.html", "description": "Updates the name and/or description fields in the connector document." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_pipeline.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_pipeline.json index 25687e41a48de..b7ab6abcf088d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_pipeline.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_pipeline.json @@ -1,7 +1,7 @@ { "connector.update_pipeline": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-pipeline-api.html", "description": "Updates the pipeline field in the connector document." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_scheduling.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_scheduling.json index 8d934b8025145..98cee5c257b90 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_scheduling.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_scheduling.json @@ -1,7 +1,7 @@ { "connector.update_scheduling": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-scheduling-api.html", "description": "Updates the scheduling field in the connector document." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.cancel.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.cancel.json index dbea6935f8a87..1e8cf154cf652 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.cancel.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.cancel.json @@ -1,7 +1,7 @@ { "connector_sync_job.cancel": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/cancel-connector-sync-job-api.html", "description": "Cancels a connector sync job." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.check_in.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.check_in.json index 8193d92395255..a6c96f506b115 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.check_in.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.check_in.json @@ -1,7 +1,7 @@ { "connector_sync_job.check_in": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/check-in-connector-sync-job-api.html", "description": "Checks in a connector sync job (refreshes 'last_seen')." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.delete.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.delete.json index ba9b5095a5275..11894a48db576 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.delete.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.delete.json @@ -1,7 +1,7 @@ { "connector_sync_job.delete": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-connector-sync-job-api.html", "description": "Deletes a connector sync job." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.error.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.error.json index 394e6e2fcb38f..c6fbd15559e2d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.error.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.error.json @@ -1,7 +1,7 @@ { "connector_sync_job.error": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/set-connector-sync-job-error-api.html", "description": "Sets an error for a connector sync job." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.get.json index d0f14b0001bd8..6dd29069badc4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.get.json @@ -1,7 +1,7 @@ { "connector_sync_job.get": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/get-connector-sync-job-api.html", "description": "Returns the details about a connector sync job." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.list.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.list.json index 86995477f060a..7b816cae1cd00 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.list.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.list.json @@ -1,7 +1,7 @@ { "connector_sync_job.list": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/list-connector-sync-jobs-api.html", "description": "Lists all connector sync jobs." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.post.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.post.json index 1db58c31dfa38..8050b34014d2c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.post.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.post.json @@ -1,7 +1,7 @@ { "connector_sync_job.post": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/create-connector-sync-job-api.html", "description": "Creates a connector sync job." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.update_stats.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.update_stats.json index 825e5d8939e2d..d5f18df0a74da 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.update_stats.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.update_stats.json @@ -1,7 +1,7 @@ { "connector_sync_job.update_stats": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/set-connector-sync-job-stats-api.html", "description": "Updates the stats fields in the connector sync job document." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.rollover.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.rollover.json index 603883dab0af8..47a1bee665506 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.rollover.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.rollover.json @@ -58,6 +58,11 @@ "wait_for_active_shards":{ "type":"string", "description":"Set the number of active shards to wait for on the newly created rollover index before the operation returns." + }, + "lazy":{ + "type":"boolean", + "default":"false", + "description":"If set to true, the rollover action will only mark a data stream to signal that it needs to be rolled over at the next write. Only allowed on data streams." } }, "body":{ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/profiling.flamegraph.json b/rest-api-spec/src/main/resources/rest-api-spec/api/profiling.flamegraph.json new file mode 100644 index 0000000000000..f9cec6663b417 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/profiling.flamegraph.json @@ -0,0 +1,27 @@ +{ + "profiling.flamegraph":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/observability/current/universal-profiling.html", + "description":"Extracts a UI-optimized structure to render flamegraphs from Universal Profiling." + }, + "stability":"stable", + "visibility":"private", + "headers":{ + "accept": ["application/json"] + }, + "url":{ + "paths":[ + { + "path":"/_profiling/flamegraph", + "methods":[ + "POST" + ] + } + ] + }, + "body":{ + "description":"The filter conditions for the flamegraph", + "required":true + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/profiling.stacktraces.json b/rest-api-spec/src/main/resources/rest-api-spec/api/profiling.stacktraces.json new file mode 100644 index 0000000000000..547e2d628bd20 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/profiling.stacktraces.json @@ -0,0 +1,27 @@ +{ + "profiling.stacktraces":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/observability/current/universal-profiling.html", + "description":"Extracts raw stacktrace information from Universal Profiling." + }, + "stability":"stable", + "visibility":"private", + "headers":{ + "accept": ["application/json"] + }, + "url":{ + "paths":[ + { + "path":"/_profiling/stacktraces", + "methods":[ + "POST" + ] + } + ] + }, + "body":{ + "description":"The filter conditions for stacktraces", + "required":true + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/security.get_api_key.json b/rest-api-spec/src/main/resources/rest-api-spec/api/security.get_api_key.json index bf782e96a0499..452ad7cef607c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/security.get_api_key.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/security.get_api_key.json @@ -45,6 +45,11 @@ "type":"boolean", "default":false, "description": "flag to show the limited-by role descriptors of API Keys" + }, + "active_only":{ + "type":"boolean", + "default":false, + "description": "flag to limit response to only active (not invalidated or expired) API keys" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.delete_synonym.json b/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.delete_synonym.json index 00142ebcf00fc..9273a8dea87c3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.delete_synonym.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.delete_synonym.json @@ -4,7 +4,7 @@ "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-synonyms-set.html", "description": "Deletes a synonym set" }, - "stability": "experimental", + "stability": "stable", "visibility": "public", "headers": { "accept": [ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.delete_synonym_rule.json b/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.delete_synonym_rule.json index 11fb113d6b629..5a0de4ab94a7c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.delete_synonym_rule.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.delete_synonym_rule.json @@ -4,7 +4,7 @@ "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-synonym-rule.html", "description": "Deletes a synonym rule in a synonym set" }, - "stability": "experimental", + "stability": "stable", "visibility": "public", "headers": { "accept": [ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.get_synonym.json b/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.get_synonym.json index 6cb4fcc46f26b..25c177cabbdf1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.get_synonym.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.get_synonym.json @@ -4,7 +4,7 @@ "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/get-synonyms-set.html", "description": "Retrieves a synonym set" }, - "stability": "experimental", + "stability": "stable", "visibility": "public", "headers": { "accept": [ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.get_synonym_rule.json b/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.get_synonym_rule.json index 5a718f1a48e46..ff9e7eb57b8a7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.get_synonym_rule.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.get_synonym_rule.json @@ -4,7 +4,7 @@ "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/get-synonym-rule.html", "description": "Retrieves a synonym rule from a synonym set" }, - "stability": "experimental", + "stability": "stable", "visibility": "public", "headers": { "accept": [ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.get_synonyms_sets.json b/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.get_synonyms_sets.json index 66bd8df92e1e7..d94bef32cddcd 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.get_synonyms_sets.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.get_synonyms_sets.json @@ -4,7 +4,7 @@ "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/list-synonyms-sets.html", "description": "Retrieves a summary of all defined synonym sets" }, - "stability": "experimental", + "stability": "stable", "visibility": "public", "headers": { "accept": [ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.put_synonym.json b/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.put_synonym.json index 6c412d174434b..e09bbb7e428a1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.put_synonym.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.put_synonym.json @@ -4,7 +4,7 @@ "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/put-synonyms-set.html", "description": "Creates or updates a synonyms set" }, - "stability": "experimental", + "stability": "stable", "visibility": "public", "headers": { "accept": [ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.put_synonym_rule.json b/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.put_synonym_rule.json index 082432ae662f0..51503b5819862 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.put_synonym_rule.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.put_synonym_rule.json @@ -4,7 +4,7 @@ "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/put-synonym-rule.html", "description": "Creates or updates a synonym rule in a synonym set" }, - "stability": "experimental", + "stability": "stable", "visibility": "public", "headers": { "accept": [ diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/simulate.ingest/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/simulate.ingest/10_basic.yml index 38aaaa9847efb..52e80887f6b95 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/simulate.ingest/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/simulate.ingest/10_basic.yml @@ -117,9 +117,7 @@ setup: "Test index templates with pipelines": - skip: - features: headers - version: "all" - reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/102339" + features: [headers, allowed_warnings] - do: headers: @@ -146,6 +144,8 @@ setup: - match: { acknowledged: true } - do: + allowed_warnings: + - "index template [my-template] has index patterns [index-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template] will take precedence during new index creation" indices.put_index_template: name: my-template body: @@ -196,9 +196,7 @@ setup: "Test bad pipeline substitution": - skip: - features: headers - version: "all" - reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/102339" + features: [headers, allowed_warnings] - do: headers: @@ -213,6 +211,8 @@ setup: - match: { acknowledged: true } - do: + allowed_warnings: + - "index template [my-template] has index patterns [index-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template] will take precedence during new index creation" indices.put_index_template: name: my-template body: diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java index f11144d698242..3b34cedcd3635 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java @@ -24,8 +24,8 @@ import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequest; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsAction; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.action.admin.indices.mapping.put.TransportPutMappingAction; import org.elasticsearch.action.admin.indices.open.OpenIndexAction; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.recovery.RecoveryAction; @@ -36,7 +36,7 @@ import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsAction; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; -import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction; +import org.elasticsearch.action.admin.indices.settings.put.TransportUpdateSettingsAction; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; @@ -523,13 +523,13 @@ public void testGetMappings() { } public void testPutMapping() { - interceptTransportActions(PutMappingAction.NAME); + interceptTransportActions(TransportPutMappingAction.TYPE.name()); PutMappingRequest putMappingRequest = new PutMappingRequest(randomUniqueIndicesOrAliases()).source("field", "type=text"); internalCluster().coordOnlyNodeClient().admin().indices().putMapping(putMappingRequest).actionGet(); clearInterceptedActions(); - assertSameIndices(putMappingRequest, PutMappingAction.NAME); + assertSameIndices(putMappingRequest, TransportPutMappingAction.TYPE.name()); } public void testGetSettings() { @@ -543,7 +543,7 @@ public void testGetSettings() { } public void testUpdateSettings() { - interceptTransportActions(UpdateSettingsAction.NAME); + interceptTransportActions(TransportUpdateSettingsAction.TYPE.name()); UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(randomIndicesOrAliases()).settings( Settings.builder().put("refresh_interval", -1) @@ -551,7 +551,7 @@ public void testUpdateSettings() { internalCluster().coordOnlyNodeClient().admin().indices().updateSettings(updateSettingsRequest).actionGet(); clearInterceptedActions(); - assertSameIndices(updateSettingsRequest, UpdateSettingsAction.NAME); + assertSameIndices(updateSettingsRequest, TransportUpdateSettingsAction.TYPE.name()); } public void testSearchQueryThenFetch() throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java index 45906abd29ff8..45865ddd35ced 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java @@ -11,8 +11,10 @@ import org.apache.lucene.util.Constants; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.hotthreads.NodeHotThreads; -import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequestBuilder; +import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequest; import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsResponse; +import org.elasticsearch.action.admin.cluster.node.hotthreads.TransportNodesHotThreadsAction; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.logging.ChunkedLoggingStreamTests; import org.elasticsearch.core.TimeValue; @@ -23,7 +25,6 @@ import java.util.Map; import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicBoolean; import static org.elasticsearch.index.query.QueryBuilders.boolQuery; @@ -41,38 +42,26 @@ public class HotThreadsIT extends ESIntegTestCase { - public void testHotThreadsDontFail() throws ExecutionException, InterruptedException { - /** - * This test just checks if nothing crashes or gets stuck etc. - */ + public void testHotThreadsDontFail() throws InterruptedException { + // This test just checks if nothing crashes or gets stuck etc. createIndex("test"); final int iters = scaledRandomIntBetween(2, 20); final AtomicBoolean hasErrors = new AtomicBoolean(false); for (int i = 0; i < iters; i++) { - final String type; - NodesHotThreadsRequestBuilder nodesHotThreadsRequestBuilder = clusterAdmin().prepareNodesHotThreads(); + final NodesHotThreadsRequest request = new NodesHotThreadsRequest(); if (randomBoolean()) { TimeValue timeValue = new TimeValue(rarely() ? randomIntBetween(500, 5000) : randomIntBetween(20, 500)); - nodesHotThreadsRequestBuilder.setInterval(timeValue); + request.interval(timeValue); } if (randomBoolean()) { - nodesHotThreadsRequestBuilder.setThreads(rarely() ? randomIntBetween(500, 5000) : randomIntBetween(1, 500)); + request.threads(rarely() ? randomIntBetween(500, 5000) : randomIntBetween(1, 500)); } - nodesHotThreadsRequestBuilder.setIgnoreIdleThreads(randomBoolean()); + request.ignoreIdleThreads(randomBoolean()); if (randomBoolean()) { - type = switch (randomIntBetween(0, 3)) { - case 3 -> "mem"; - case 2 -> "cpu"; - case 1 -> "wait"; - default -> "block"; - }; - assertThat(type, notNullValue()); - nodesHotThreadsRequestBuilder.setType(HotThreads.ReportType.of(type)); - } else { - type = null; + request.type(HotThreads.ReportType.of(randomFrom("block", "mem", "cpu", "wait"))); } final CountDownLatch latch = new CountDownLatch(1); - nodesHotThreadsRequestBuilder.execute(new ActionListener() { + client().execute(TransportNodesHotThreadsAction.TYPE, request, new ActionListener<>() { @Override public void onResponse(NodesHotThreadsResponse nodeHotThreads) { boolean success = false; @@ -83,7 +72,6 @@ public void onResponse(NodesHotThreadsResponse nodeHotThreads) { assertThat(nodesMap.size(), equalTo(cluster().size())); for (NodeHotThreads ht : nodeHotThreads.getNodes()) { assertNotNull(ht.getHotThreads()); - // logger.info(ht.getHotThreads()); } success = true; } finally { @@ -120,66 +108,80 @@ public void onFailure(Exception e) { 3L ); } - latch.await(); + safeAwait(latch); assertThat(hasErrors.get(), is(false)); } } - public void testIgnoreIdleThreads() throws ExecutionException, InterruptedException { + public void testIgnoreIdleThreads() { assumeTrue("no support for hot_threads on FreeBSD", Constants.FREE_BSD == false); - // First time, don't ignore idle threads: - NodesHotThreadsRequestBuilder builder = clusterAdmin().prepareNodesHotThreads(); - builder.setIgnoreIdleThreads(false); - builder.setThreads(Integer.MAX_VALUE); - NodesHotThreadsResponse response = builder.execute().get(); - final Matcher containsCachedTimeThreadRunMethod = containsString( "org.elasticsearch.threadpool.ThreadPool$CachedTimeThread.run" ); - int totSizeAll = 0; - for (NodeHotThreads node : response.getNodesMap().values()) { - totSizeAll += node.getHotThreads().length(); - assertThat(node.getHotThreads(), containsCachedTimeThreadRunMethod); - } + // First time, don't ignore idle threads: + final var totSizeAll = safeAwait( + SubscribableListener.newForked( + l -> client().execute( + TransportNodesHotThreadsAction.TYPE, + new NodesHotThreadsRequest().ignoreIdleThreads(false).threads(Integer.MAX_VALUE), + l.map(response -> { + int length = 0; + for (NodeHotThreads node : response.getNodesMap().values()) { + length += node.getHotThreads().length(); + assertThat(node.getHotThreads(), containsCachedTimeThreadRunMethod); + } + return length; + }) + ) + ) + ); // Second time, do ignore idle threads: - builder = clusterAdmin().prepareNodesHotThreads(); - builder.setThreads(Integer.MAX_VALUE); - + final var request = new NodesHotThreadsRequest().threads(Integer.MAX_VALUE); // Make sure default is true: - assertEquals(true, builder.request().ignoreIdleThreads()); - response = builder.execute().get(); - - int totSizeIgnoreIdle = 0; - for (NodeHotThreads node : response.getNodesMap().values()) { - totSizeIgnoreIdle += node.getHotThreads().length(); - assertThat(node.getHotThreads(), not(containsCachedTimeThreadRunMethod)); - } + assertTrue(request.ignoreIdleThreads()); + final var totSizeIgnoreIdle = safeAwait( + SubscribableListener.newForked(l -> client().execute(TransportNodesHotThreadsAction.TYPE, request, l.map(response -> { + int length = 0; + for (NodeHotThreads node : response.getNodesMap().values()) { + length += node.getHotThreads().length(); + assertThat(node.getHotThreads(), not(containsCachedTimeThreadRunMethod)); + } + return length; + }))) + ); // The filtered stacks should be smaller than unfiltered ones: assertThat(totSizeIgnoreIdle, lessThan(totSizeAll)); } - public void testTimestampAndParams() throws ExecutionException, InterruptedException { - - NodesHotThreadsResponse response = clusterAdmin().prepareNodesHotThreads().execute().get(); - - if (Constants.FREE_BSD) { - for (NodeHotThreads node : response.getNodesMap().values()) { - String result = node.getHotThreads(); - assertTrue(result.indexOf("hot_threads is not supported") != -1); - } - } else { - for (NodeHotThreads node : response.getNodesMap().values()) { - String result = node.getHotThreads(); - assertTrue(result.indexOf("Hot threads at") != -1); - assertTrue(result.indexOf("interval=500ms") != -1); - assertTrue(result.indexOf("busiestThreads=3") != -1); - assertTrue(result.indexOf("ignoreIdleThreads=true") != -1); - } - } + public void testTimestampAndParams() { + safeAwait( + SubscribableListener.newForked( + l -> client().execute(TransportNodesHotThreadsAction.TYPE, new NodesHotThreadsRequest(), l.map(response -> { + if (Constants.FREE_BSD) { + for (NodeHotThreads node : response.getNodesMap().values()) { + assertThat(node.getHotThreads(), containsString("hot_threads is not supported")); + } + } else { + for (NodeHotThreads node : response.getNodesMap().values()) { + assertThat( + node.getHotThreads(), + allOf( + containsString("Hot threads at"), + containsString("interval=500ms"), + containsString("busiestThreads=3"), + containsString("ignoreIdleThreads=true") + ) + ); + } + } + return null; + })) + ) + ); } @TestLogging(reason = "testing logging at various levels", value = "org.elasticsearch.action.admin.HotThreadsIT:TRACE") diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java index 4aa3598608fb6..954ef3d6d7887 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java @@ -10,7 +10,9 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsRequest; import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsResponse; +import org.elasticsearch.action.admin.cluster.node.reload.TransportNodesReloadSecureSettingsAction; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.KeyStoreWrapper; @@ -58,6 +60,25 @@ public static void disableInFips() { ); } + private static void executeReloadSecureSettings( + String[] nodeIds, + SecureString password, + ActionListener listener + ) { + final var request = new NodesReloadSecureSettingsRequest(); + try { + request.nodesIds(nodeIds); + request.setSecureStorePassword(password); + client().execute(TransportNodesReloadSecureSettingsAction.TYPE, request, listener); + } finally { + request.decRef(); + } + } + + private static SecureString emptyPassword() { + return randomBoolean() ? new SecureString(new char[0]) : null; + } + public void testMissingKeystoreFile() throws Exception { final PluginsService pluginsService = internalCluster().getInstance(PluginsService.class); final MockReloadablePlugin mockReloadablePlugin = pluginsService.filterPlugins(MockReloadablePlugin.class).findFirst().get(); @@ -67,36 +88,32 @@ public void testMissingKeystoreFile() throws Exception { Files.deleteIfExists(KeyStoreWrapper.keystorePath(environment.configFile())); final int initialReloadCount = mockReloadablePlugin.getReloadCount(); final CountDownLatch latch = new CountDownLatch(1); - final SecureString emptyPassword = randomBoolean() ? new SecureString(new char[0]) : null; - clusterAdmin().prepareReloadSecureSettings() - .setSecureStorePassword(emptyPassword) - .setNodesIds(Strings.EMPTY_ARRAY) - .execute(new ActionListener() { - @Override - public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { - try { - assertThat(nodesReloadResponse, notNullValue()); - final Map nodesMap = nodesReloadResponse.getNodesMap(); - assertThat(nodesMap.size(), equalTo(cluster().size())); - for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) { - assertThat(nodeResponse.reloadException(), notNullValue()); - assertThat(nodeResponse.reloadException(), instanceOf(IllegalStateException.class)); - assertThat(nodeResponse.reloadException().getMessage(), containsString("Keystore is missing")); - } - } catch (final AssertionError e) { - reloadSettingsError.set(e); - } finally { - latch.countDown(); + executeReloadSecureSettings(Strings.EMPTY_ARRAY, emptyPassword(), new ActionListener<>() { + @Override + public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { + try { + assertThat(nodesReloadResponse, notNullValue()); + final Map nodesMap = nodesReloadResponse.getNodesMap(); + assertThat(nodesMap.size(), equalTo(cluster().size())); + for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) { + assertThat(nodeResponse.reloadException(), notNullValue()); + assertThat(nodeResponse.reloadException(), instanceOf(IllegalStateException.class)); + assertThat(nodeResponse.reloadException().getMessage(), containsString("Keystore is missing")); } - } - - @Override - public void onFailure(Exception e) { - reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + } catch (final AssertionError e) { + reloadSettingsError.set(e); + } finally { latch.countDown(); } - }); - latch.await(); + } + + @Override + public void onFailure(Exception e) { + reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + latch.countDown(); + } + }); + safeAwait(latch); if (reloadSettingsError.get() != null) { throw reloadSettingsError.get(); } @@ -118,34 +135,30 @@ public void testInvalidKeystoreFile() throws Exception { Files.copy(keystore, KeyStoreWrapper.keystorePath(environment.configFile()), StandardCopyOption.REPLACE_EXISTING); } final CountDownLatch latch = new CountDownLatch(1); - final SecureString emptyPassword = randomBoolean() ? new SecureString(new char[0]) : null; - clusterAdmin().prepareReloadSecureSettings() - .setSecureStorePassword(emptyPassword) - .setNodesIds(Strings.EMPTY_ARRAY) - .execute(new ActionListener() { - @Override - public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { - try { - assertThat(nodesReloadResponse, notNullValue()); - final Map nodesMap = nodesReloadResponse.getNodesMap(); - assertThat(nodesMap.size(), equalTo(cluster().size())); - for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) { - assertThat(nodeResponse.reloadException(), notNullValue()); - } - } catch (final AssertionError e) { - reloadSettingsError.set(e); - } finally { - latch.countDown(); + executeReloadSecureSettings(Strings.EMPTY_ARRAY, emptyPassword(), new ActionListener<>() { + @Override + public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { + try { + assertThat(nodesReloadResponse, notNullValue()); + final Map nodesMap = nodesReloadResponse.getNodesMap(); + assertThat(nodesMap.size(), equalTo(cluster().size())); + for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) { + assertThat(nodeResponse.reloadException(), notNullValue()); } - } - - @Override - public void onFailure(Exception e) { - reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + } catch (final AssertionError e) { + reloadSettingsError.set(e); + } finally { latch.countDown(); } - }); - latch.await(); + } + + @Override + public void onFailure(Exception e) { + reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + latch.countDown(); + } + }); + safeAwait(latch); if (reloadSettingsError.get() != null) { throw reloadSettingsError.get(); } @@ -162,31 +175,27 @@ public void testReloadAllNodesWithPasswordWithoutTLSFails() throws Exception { final char[] password = randomAlphaOfLength(12).toCharArray(); writeEmptyKeystore(environment, password); final CountDownLatch latch = new CountDownLatch(1); - clusterAdmin().prepareReloadSecureSettings() - // No filter should try to hit all nodes - .setNodesIds(Strings.EMPTY_ARRAY) - .setSecureStorePassword(new SecureString(password)) - .execute(new ActionListener() { - @Override - public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { - reloadSettingsError.set(new AssertionError("Nodes request succeeded when it should have failed", null)); - latch.countDown(); - } + executeReloadSecureSettings(Strings.EMPTY_ARRAY, new SecureString(password), new ActionListener<>() { + @Override + public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { + reloadSettingsError.set(new AssertionError("Nodes request succeeded when it should have failed", null)); + latch.countDown(); + } - @Override - public void onFailure(Exception e) { - try { - assertThat(e, instanceOf(ElasticsearchException.class)); - assertThat( - e.getMessage(), - containsString("Secure settings cannot be updated cluster wide when TLS for the transport layer is not enabled") - ); - } finally { - latch.countDown(); - } + @Override + public void onFailure(Exception e) { + try { + assertThat(e, instanceOf(ElasticsearchException.class)); + assertThat( + e.getMessage(), + containsString("Secure settings cannot be updated cluster wide when TLS for the transport layer is not enabled") + ); + } finally { + latch.countDown(); } - }); - latch.await(); + } + }); + safeAwait(latch); if (reloadSettingsError.get() != null) { throw reloadSettingsError.get(); } @@ -200,33 +209,30 @@ public void testReloadLocalNodeWithPasswordWithoutTLSSucceeds() throws Exception final char[] password = randomAlphaOfLength(12).toCharArray(); writeEmptyKeystore(environment, password); final CountDownLatch latch = new CountDownLatch(1); - clusterAdmin().prepareReloadSecureSettings() - .setNodesIds("_local") - .setSecureStorePassword(new SecureString(password)) - .execute(new ActionListener() { - @Override - public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { - try { - assertThat(nodesReloadResponse, notNullValue()); - final Map nodesMap = nodesReloadResponse.getNodesMap(); - assertThat(nodesMap.size(), equalTo(1)); - assertThat(nodesReloadResponse.getNodes().size(), equalTo(1)); - final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse = nodesReloadResponse.getNodes().get(0); - assertThat(nodeResponse.reloadException(), nullValue()); - } catch (final AssertionError e) { - reloadSettingsError.set(e); - } finally { - latch.countDown(); - } - } - - @Override - public void onFailure(Exception e) { - reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + executeReloadSecureSettings(new String[] { "_local" }, new SecureString(password), new ActionListener<>() { + @Override + public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { + try { + assertThat(nodesReloadResponse, notNullValue()); + final Map nodesMap = nodesReloadResponse.getNodesMap(); + assertThat(nodesMap.size(), equalTo(1)); + assertThat(nodesReloadResponse.getNodes().size(), equalTo(1)); + final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse = nodesReloadResponse.getNodes().get(0); + assertThat(nodeResponse.reloadException(), nullValue()); + } catch (final AssertionError e) { + reloadSettingsError.set(e); + } finally { latch.countDown(); } - }); - latch.await(); + } + + @Override + public void onFailure(Exception e) { + reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + latch.countDown(); + } + }); + safeAwait(latch); if (reloadSettingsError.get() != null) { throw reloadSettingsError.get(); } @@ -241,10 +247,10 @@ public void testWrongKeystorePassword() throws Exception { // "some" keystore should be present in this case writeEmptyKeystore(environment, new char[0]); final CountDownLatch latch = new CountDownLatch(1); - clusterAdmin().prepareReloadSecureSettings() - .setNodesIds("_local") - .setSecureStorePassword(new SecureString(new char[] { 'W', 'r', 'o', 'n', 'g' })) - .execute(new ActionListener() { + executeReloadSecureSettings( + new String[] { "_local" }, + new SecureString(new char[] { 'W', 'r', 'o', 'n', 'g' }), + new ActionListener<>() { @Override public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { try { @@ -267,8 +273,9 @@ public void onFailure(Exception e) { reloadSettingsError.set(new AssertionError("Nodes request failed", e)); latch.countDown(); } - }); - latch.await(); + } + ); + safeAwait(latch); if (reloadSettingsError.get() != null) { throw reloadSettingsError.get(); } @@ -297,35 +304,31 @@ public void testMisbehavingPlugin() throws Exception { Settings.builder().put(environment.settings()).setSecureSettings(secureSettings).build() ).toString(); final CountDownLatch latch = new CountDownLatch(1); - final SecureString emptyPassword = randomBoolean() ? new SecureString(new char[0]) : null; - clusterAdmin().prepareReloadSecureSettings() - .setSecureStorePassword(emptyPassword) - .setNodesIds(Strings.EMPTY_ARRAY) - .execute(new ActionListener() { - @Override - public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { - try { - assertThat(nodesReloadResponse, notNullValue()); - final Map nodesMap = nodesReloadResponse.getNodesMap(); - assertThat(nodesMap.size(), equalTo(cluster().size())); - for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) { - assertThat(nodeResponse.reloadException(), notNullValue()); - assertThat(nodeResponse.reloadException().getMessage(), containsString("If shouldThrow I throw")); - } - } catch (final AssertionError e) { - reloadSettingsError.set(e); - } finally { - latch.countDown(); + executeReloadSecureSettings(Strings.EMPTY_ARRAY, emptyPassword(), new ActionListener<>() { + @Override + public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { + try { + assertThat(nodesReloadResponse, notNullValue()); + final Map nodesMap = nodesReloadResponse.getNodesMap(); + assertThat(nodesMap.size(), equalTo(cluster().size())); + for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) { + assertThat(nodeResponse.reloadException(), notNullValue()); + assertThat(nodeResponse.reloadException().getMessage(), containsString("If shouldThrow I throw")); } - } - - @Override - public void onFailure(Exception e) { - reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + } catch (final AssertionError e) { + reloadSettingsError.set(e); + } finally { latch.countDown(); } - }); - latch.await(); + } + + @Override + public void onFailure(Exception e) { + reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + latch.countDown(); + } + }); + safeAwait(latch); if (reloadSettingsError.get() != null) { throw reloadSettingsError.get(); } @@ -364,10 +367,7 @@ public void testInvalidKeyInSettings() throws Exception { } PlainActionFuture actionFuture = new PlainActionFuture<>(); - clusterAdmin().prepareReloadSecureSettings() - .setSecureStorePassword(new SecureString(new char[0])) - .setNodesIds(Strings.EMPTY_ARRAY) - .execute(actionFuture); + executeReloadSecureSettings(Strings.EMPTY_ARRAY, new SecureString(new char[0]), actionFuture); actionFuture.get().getNodes().forEach(nodeResponse -> assertThat(nodeResponse.reloadException(), nullValue())); @@ -378,10 +378,7 @@ public void testInvalidKeyInSettings() throws Exception { } actionFuture = new PlainActionFuture<>(); - clusterAdmin().prepareReloadSecureSettings() - .setSecureStorePassword(new SecureString(new char[0])) - .setNodesIds(Strings.EMPTY_ARRAY) - .execute(actionFuture); + executeReloadSecureSettings(Strings.EMPTY_ARRAY, new SecureString(new char[0]), actionFuture); actionFuture.get() .getNodes() @@ -404,33 +401,30 @@ private void successfulReloadCall() throws InterruptedException { final AtomicReference reloadSettingsError = new AtomicReference<>(); final CountDownLatch latch = new CountDownLatch(1); final SecureString emptyPassword = randomBoolean() ? new SecureString(new char[0]) : null; - clusterAdmin().prepareReloadSecureSettings() - .setSecureStorePassword(emptyPassword) - .setNodesIds(Strings.EMPTY_ARRAY) - .execute(new ActionListener() { - @Override - public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { - try { - assertThat(nodesReloadResponse, notNullValue()); - final Map nodesMap = nodesReloadResponse.getNodesMap(); - assertThat(nodesMap.size(), equalTo(cluster().size())); - for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) { - assertThat(nodeResponse.reloadException(), nullValue()); - } - } catch (final AssertionError e) { - reloadSettingsError.set(e); - } finally { - latch.countDown(); + executeReloadSecureSettings(Strings.EMPTY_ARRAY, new SecureString(new char[0]), new ActionListener<>() { + @Override + public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { + try { + assertThat(nodesReloadResponse, notNullValue()); + final Map nodesMap = nodesReloadResponse.getNodesMap(); + assertThat(nodesMap.size(), equalTo(cluster().size())); + for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) { + assertThat(nodeResponse.reloadException(), nullValue()); } - } - - @Override - public void onFailure(Exception e) { - reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + } catch (final AssertionError e) { + reloadSettingsError.set(e); + } finally { latch.countDown(); } - }); - latch.await(); + } + + @Override + public void onFailure(Exception e) { + reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + latch.countDown(); + } + }); + safeAwait(latch); if (reloadSettingsError.get() != null) { throw reloadSettingsError.get(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java index 07c6ba4945eaa..b20f658a01510 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java @@ -272,7 +272,7 @@ public void testFailedToStartChildTaskAfterCancelled() throws Exception { TestRequest subRequest = generateTestRequest(nodes, 0, between(0, 1), false); beforeSendLatches.get(subRequest).countDown(); mainAction.startSubTask(taskId, subRequest, future); - TaskCancelledException te = expectThrows(TaskCancelledException.class, future::actionGet); + TaskCancelledException te = expectThrows(TaskCancelledException.class, future); assertThat(te.getMessage(), equalTo("parent task was cancelled [by user request]")); allowEntireRequest(rootRequest); waitForRootTask(rootTaskFuture, false); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java index 1a230154b27bf..21497b2e6fcfb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java @@ -406,7 +406,7 @@ public void testSearchTaskHeaderLimit() { headers.put("Custom-Task-Header", randomAlphaOfLengthBetween(maxSize, maxSize + 100)); IllegalArgumentException ex = expectThrows( IllegalArgumentException.class, - () -> client().filterWithHeader(headers).admin().cluster().prepareListTasks().get() + client().filterWithHeader(headers).admin().cluster().prepareListTasks() ); assertThat(ex.getMessage(), startsWith("Request exceeded the maximum size of task headers ")); } @@ -506,7 +506,7 @@ public void testTasksCancellation() throws Exception { CancelTasksResponse cancelTasksResponse = clusterAdmin().prepareCancelTasks().setActions(TEST_TASK_ACTION.name()).get(); assertEquals(1, cancelTasksResponse.getTasks().size()); - expectThrows(TaskCancelledException.class, future::actionGet); + expectThrows(TaskCancelledException.class, future); logger.info("--> checking that test tasks are not running"); assertEquals(0, clusterAdmin().prepareListTasks().setActions(TEST_TASK_ACTION.name() + "*").get().getTasks().size()); @@ -640,7 +640,7 @@ public void testGetTaskWaitForTimeout() throws Exception { waitForTimeoutTestCase(id -> { Exception e = expectThrows( Exception.class, - () -> clusterAdmin().prepareGetTask(id).setWaitForCompletion(true).setTimeout(timeValueMillis(100)).get() + clusterAdmin().prepareGetTask(id).setWaitForCompletion(true).setTimeout(timeValueMillis(100)) ); return singleton(e); }); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoIT.java index cdbc19611eb24..395f8e5c67642 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoIT.java @@ -38,9 +38,7 @@ public void testRemoteClusterClientRole() { final String nodeWithoutRemoteClientRole = localCluster.startNode(NodeRoles.onlyRoles(Set.of(DiscoveryNodeRole.DATA_ROLE))); final IllegalArgumentException error = expectThrows( IllegalArgumentException.class, - () -> localCluster.client(nodeWithoutRemoteClientRole) - .execute(TransportRemoteInfoAction.TYPE, new RemoteInfoRequest()) - .actionGet() + localCluster.client(nodeWithoutRemoteClientRole).execute(TransportRemoteInfoAction.TYPE, new RemoteInfoRequest()) ); assertThat( error.getMessage(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java index 93fc17a9a02eb..1fda9c67a0beb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java @@ -207,7 +207,7 @@ public void testValuesSmokeScreen() throws IOException, ExecutionException, Inte ClusterStatsResponse response = clusterAdmin().prepareClusterStats().get(); String msg = response.toString(); assertThat(msg, response.getTimestamp(), greaterThan(946681200000L)); // 1 Jan 2000 - assertThat(msg, response.indicesStats.getStore().getSizeInBytes(), greaterThan(0L)); + assertThat(msg, response.indicesStats.getStore().sizeInBytes(), greaterThan(0L)); assertThat(msg, response.nodesStats.getFs().getTotal().getBytes(), greaterThan(0L)); assertThat(msg, response.nodesStats.getJvm().getVersions().size(), greaterThan(0)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/ListTasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/ListTasksIT.java index cb508334f835e..955ec4a0bbc99 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/ListTasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/ListTasksIT.java @@ -56,7 +56,7 @@ public void testListTasksValidation() { ActionRequestValidationException ex = expectThrows( ActionRequestValidationException.class, - () -> clusterAdmin().prepareListTasks().setDescriptions("*").get() + clusterAdmin().prepareListTasks().setDescriptions("*") ); assertThat(ex.getMessage(), containsString("matching on descriptions is not available when [detailed] is false")); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java index 595788b1eb9f5..eaf8948348684 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java @@ -37,7 +37,7 @@ public void testPendingTasksWithIndexBlocks() { )) { try { enableIndexBlock("test", blockSetting); - PendingClusterTasksResponse response = clusterAdmin().preparePendingClusterTasks().get(); + PendingClusterTasksResponse response = getClusterPendingTasks(); assertNotNull(response.pendingTasks()); } finally { disableIndexBlock("test", blockSetting); @@ -53,7 +53,7 @@ public void testPendingTasksWithClusterReadOnlyBlock() { try { setClusterReadOnly(true); - PendingClusterTasksResponse response = clusterAdmin().preparePendingClusterTasks().get(); + PendingClusterTasksResponse response = getClusterPendingTasks(); assertNotNull(response.pendingTasks()); } finally { setClusterReadOnly(false); @@ -80,7 +80,7 @@ public boolean validateClusterForming() { } }); - assertNotNull(clusterAdmin().preparePendingClusterTasks().get().pendingTasks()); + assertNotNull(getClusterPendingTasks().pendingTasks()); // starting one more node allows the cluster to recover internalCluster().startNode(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/alias/ValidateIndicesAliasesRequestIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/alias/ValidateIndicesAliasesRequestIT.java index 63239ec2419f9..7f6bb0239b730 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/alias/ValidateIndicesAliasesRequestIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/alias/ValidateIndicesAliasesRequestIT.java @@ -96,7 +96,7 @@ public void testNotAllowed() { final String origin = randomFrom("", "not-allowed"); final IndicesAliasesRequest request = new IndicesAliasesRequest().origin(origin); request.addAliasAction(IndicesAliasesRequest.AliasActions.add().index("index").alias("alias")); - final Exception e = expectThrows(IllegalStateException.class, () -> client().admin().indices().aliases(request).actionGet()); + final Exception e = expectThrows(IllegalStateException.class, client().admin().indices().aliases(request)); assertThat(e, hasToString(containsString("origin [" + origin + "] not allowed for index [index]"))); } @@ -113,7 +113,7 @@ public void testSomeAllowed() { final IndicesAliasesRequest request = new IndicesAliasesRequest().origin(origin); request.addAliasAction(IndicesAliasesRequest.AliasActions.add().index("foo").alias("alias")); request.addAliasAction(IndicesAliasesRequest.AliasActions.add().index("bar").alias("alias")); - final Exception e = expectThrows(IllegalStateException.class, () -> client().admin().indices().aliases(request).actionGet()); + final Exception e = expectThrows(IllegalStateException.class, client().admin().indices().aliases(request)); final String index = "foo_allowed".equals(origin) ? "bar" : "foo"; assertThat(e, hasToString(containsString("origin [" + origin + "] not allowed for index [" + index + "]"))); assertTrue(client().admin().indices().getAliases(new GetAliasesRequest("alias")).actionGet().getAliases().isEmpty()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/AutoCreateSystemIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/AutoCreateSystemIndexIT.java index c0d62ba54621a..dc00c36470de2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/AutoCreateSystemIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/AutoCreateSystemIndexIT.java @@ -13,8 +13,8 @@ import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; import org.elasticsearch.action.admin.indices.get.GetIndexResponse; -import org.elasticsearch.action.admin.indices.template.delete.DeleteComposableIndexTemplateAction; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.delete.TransportDeleteComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.metadata.AliasMetadata; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; @@ -153,13 +153,8 @@ public void testSystemIndicesAutoCreatedAsHidden() throws Exception { public void testSystemIndicesAutoCreateRejectedWhenNotHidden() { CreateIndexRequest request = new CreateIndexRequest(UnmanagedSystemIndexTestPlugin.SYSTEM_INDEX_NAME); request.settings(Settings.builder().put(SETTING_INDEX_HIDDEN, false).build()); - ExecutionException exception = expectThrows( - ExecutionException.class, - () -> client().execute(AutoCreateAction.INSTANCE, request).get() - ); - assertThat( - exception.getCause().getMessage(), + expectThrows(IllegalStateException.class, client().execute(AutoCreateAction.INSTANCE, request)).getMessage(), containsString("Cannot auto-create system index [.unmanaged-system-idx] with [index.hidden] set to 'false'") ); } @@ -221,8 +216,8 @@ private String autoCreateSystemAliasViaComposableTemplate(String indexName) thro .build(); assertAcked( client().execute( - PutComposableIndexTemplateAction.INSTANCE, - new PutComposableIndexTemplateAction.Request("test-composable-template").indexTemplate(cit) + TransportPutComposableIndexTemplateAction.TYPE, + new TransportPutComposableIndexTemplateAction.Request("test-composable-template").indexTemplate(cit) ) ); @@ -245,8 +240,8 @@ public void testAutoCreateSystemAliasViaComposableTemplate() throws Exception { assertAcked( client().execute( - DeleteComposableIndexTemplateAction.INSTANCE, - new DeleteComposableIndexTemplateAction.Request("test-composable-template") + TransportDeleteComposableIndexTemplateAction.TYPE, + new TransportDeleteComposableIndexTemplateAction.Request("test-composable-template") ) ); } @@ -268,8 +263,8 @@ public void testAutoCreateSystemAliasViaComposableTemplateAllowsTemplates() thro assertAcked( client().execute( - DeleteComposableIndexTemplateAction.INSTANCE, - new DeleteComposableIndexTemplateAction.Request("test-composable-template") + TransportDeleteComposableIndexTemplateAction.TYPE, + new TransportDeleteComposableIndexTemplateAction.Request("test-composable-template") ) ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java index b4d0286b74077..7574cd0271c46 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.admin.indices.create; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.alias.Alias; @@ -41,7 +42,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -117,8 +117,7 @@ public void testEmptyNestedMappings() throws Exception { public void testMappingParamAndNestedMismatch() throws Exception { MapperParsingException e = expectThrows( MapperParsingException.class, - () -> prepareCreate("test").setMapping(XContentFactory.jsonBuilder().startObject().startObject("type2").endObject().endObject()) - .get() + prepareCreate("test").setMapping(XContentFactory.jsonBuilder().startObject().startObject("type2").endObject().endObject()) ); assertThat(e.getMessage(), startsWith("Failed to parse mapping: Root mapping definition has unsupported parameters")); } @@ -293,10 +292,10 @@ public void testFailureToCreateIndexCleansUpIndicesService() { .build(); assertAcked(indicesAdmin().prepareCreate("test-idx-1").setSettings(settings).addAlias(new Alias("alias1").writeIndex(true)).get()); - assertRequestBuilderThrows( - indicesAdmin().prepareCreate("test-idx-2").setSettings(settings).addAlias(new Alias("alias1").writeIndex(true)), - IllegalStateException.class - ); + ActionRequestBuilder builder = indicesAdmin().prepareCreate("test-idx-2") + .setSettings(settings) + .addAlias(new Alias("alias1").writeIndex(true)); + expectThrows(IllegalStateException.class, builder); IndicesService indicesService = internalCluster().getInstance(IndicesService.class, internalCluster().getMasterName()); for (IndexService indexService : indicesService) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateSystemIndicesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateSystemIndicesIT.java index 1c075442d99e6..84a8027418321 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateSystemIndicesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateSystemIndicesIT.java @@ -16,8 +16,8 @@ import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; -import org.elasticsearch.action.admin.indices.template.delete.DeleteComposableIndexTemplateAction; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.delete.TransportDeleteComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequest; @@ -66,7 +66,12 @@ public void beforeEach() { @After public void afterEach() throws Exception { assertAcked(indicesAdmin().prepareDeleteTemplate("*").get()); - assertAcked(client().execute(DeleteComposableIndexTemplateAction.INSTANCE, new DeleteComposableIndexTemplateAction.Request("*"))); + assertAcked( + client().execute( + TransportDeleteComposableIndexTemplateAction.TYPE, + new TransportDeleteComposableIndexTemplateAction.Request("*") + ) + ); } @Override @@ -210,8 +215,8 @@ private void createIndexWithComposableTemplates(String indexName, String primary .build(); assertAcked( client().execute( - PutComposableIndexTemplateAction.INSTANCE, - new PutComposableIndexTemplateAction.Request("test-composable-template").indexTemplate(cit) + TransportPutComposableIndexTemplateAction.TYPE, + new TransportPutComposableIndexTemplateAction.Request("test-composable-template").indexTemplate(cit) ) ); @@ -229,8 +234,8 @@ public void testCreateSystemAliasViaComposableTemplate() throws Exception { assertAcked( client().execute( - DeleteComposableIndexTemplateAction.INSTANCE, - new DeleteComposableIndexTemplateAction.Request("test-composable-template") + TransportDeleteComposableIndexTemplateAction.TYPE, + new TransportDeleteComposableIndexTemplateAction.Request("test-composable-template") ) ); } @@ -256,8 +261,8 @@ public void testCreateSystemAliasViaComposableTemplateWithAllowsTemplates() thro assertAcked( client().execute( - DeleteComposableIndexTemplateAction.INSTANCE, - new DeleteComposableIndexTemplateAction.Request("test-composable-template") + TransportDeleteComposableIndexTemplateAction.TYPE, + new TransportDeleteComposableIndexTemplateAction.Request("test-composable-template") ) ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java index 8f6026da835b6..aa4fee3a3f94d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java @@ -443,9 +443,7 @@ public void testCreateShrinkWithIndexSort() throws Exception { // check that index sort cannot be set on the target index IllegalArgumentException exc = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareResizeIndex("source", "target") - .setSettings(indexSettings(2, 0).put("index.sort.field", "foo").build()) - .get() + indicesAdmin().prepareResizeIndex("source", "target").setSettings(indexSettings(2, 0).put("index.sort.field", "foo").build()) ); assertThat(exc.getMessage(), containsString("can't override index sort when resizing an index")); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java index 56bbe135de66b..27fd54c39cc95 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java @@ -461,10 +461,9 @@ public void testCreateSplitWithIndexSort() throws Exception { // check that index sort cannot be set on the target index IllegalArgumentException exc = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareResizeIndex("source", "target") + indicesAdmin().prepareResizeIndex("source", "target") .setResizeType(ResizeType.SPLIT) .setSettings(indexSettings(4, 0).put("index.sort.field", "foo").build()) - .get() ); assertThat(exc.getMessage(), containsString("can't override index sort when resizing an index")); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksIT.java index 5df1ceea6bfce..3560b74189d1d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksIT.java @@ -65,10 +65,7 @@ public void testClusterBlockMessageHasIndexName() { createIndex("test"); ensureGreen("test"); updateIndexSettings(Settings.builder().put(IndexMetadata.SETTING_READ_ONLY_ALLOW_DELETE, true), "test"); - ClusterBlockException e = expectThrows( - ClusterBlockException.class, - () -> prepareIndex("test").setId("1").setSource("foo", "bar").get() - ); + ClusterBlockException e = expectThrows(ClusterBlockException.class, prepareIndex("test").setId("1").setSource("foo", "bar")); assertEquals( "index [test] blocked by: [TOO_MANY_REQUESTS/12/disk usage exceeded flood-stage watermark, " + "index has read-only-allow-delete block];", diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerIT.java index d6c337dec53b8..a0d437d8baa73 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerIT.java @@ -66,12 +66,12 @@ public static class EngineTestPlugin extends Plugin implements EnginePlugin { public Optional getEngineFactory(IndexSettings indexSettings) { return Optional.of(config -> new InternalEngine(config) { @Override - public void flush(boolean force, boolean waitIfOngoing, ActionListener listener) throws EngineException { + protected void flushHoldingLock(boolean force, boolean waitIfOngoing, ActionListener listener) { final ShardId shardId = config.getShardId(); if (failOnFlushShards.contains(shardId)) { listener.onFailure(new EngineException(shardId, "simulated IO")); } else { - super.flush(force, waitIfOngoing, listener); + super.flushHoldingLock(force, waitIfOngoing, listener); } } }); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/mapping/put/ValidateMappingRequestPluginIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/mapping/put/ValidateMappingRequestPluginIT.java index 9297cf9a60282..e94b0a6e0fb76 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/mapping/put/ValidateMappingRequestPluginIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/mapping/put/ValidateMappingRequestPluginIT.java @@ -57,7 +57,7 @@ public void testValidateMappingRequest() { { String origin = randomFrom("", "3", "4", "5"); PutMappingRequest request = new PutMappingRequest().indices("index_1").source("t1", "type=keyword").origin(origin); - Exception e = expectThrows(IllegalStateException.class, () -> indicesAdmin().putMapping(request).actionGet()); + Exception e = expectThrows(IllegalStateException.class, indicesAdmin().putMapping(request)); assertThat(e.getMessage(), equalTo("not allowed: index[index_1] origin[" + origin + "]")); } { @@ -70,7 +70,7 @@ public void testValidateMappingRequest() { { String origin = randomFrom("", "1", "4", "5"); PutMappingRequest request = new PutMappingRequest().indices("index_2").source("t2", "type=keyword").origin(origin); - Exception e = expectThrows(IllegalStateException.class, () -> indicesAdmin().putMapping(request).actionGet()); + Exception e = expectThrows(IllegalStateException.class, indicesAdmin().putMapping(request)); assertThat(e.getMessage(), equalTo("not allowed: index[index_2] origin[" + origin + "]")); } { @@ -83,7 +83,7 @@ public void testValidateMappingRequest() { { String origin = randomFrom("", "1", "3", "4"); PutMappingRequest request = new PutMappingRequest().indices("*").source("t3", "type=keyword").origin(origin); - Exception e = expectThrows(IllegalStateException.class, () -> indicesAdmin().putMapping(request).actionGet()); + Exception e = expectThrows(IllegalStateException.class, indicesAdmin().putMapping(request)); assertThat(e.getMessage(), containsString("not allowed:")); } { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java index 7ae7fc5c4a180..a5c24ac600564 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java @@ -15,8 +15,8 @@ import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequestBuilder; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.AliasMetadata; @@ -53,6 +53,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; import static org.hamcrest.Matchers.is; @@ -155,7 +156,7 @@ public void testRolloverWithNoWriteIndex() { } IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareRolloverIndex("alias").dryRun(randomBoolean()).get() + indicesAdmin().prepareRolloverIndex("alias").dryRun(randomBoolean()) ); assertThat(exception.getMessage(), equalTo("rollover target [alias] does not point to a write index")); } @@ -272,6 +273,34 @@ public void testRolloverDryRun() throws Exception { assertNull(newIndex); } + public void testRolloverLazy() throws Exception { + if (randomBoolean()) { + PutIndexTemplateRequestBuilder putTemplate = indicesAdmin().preparePutTemplate("test_index") + .setPatterns(List.of("test_index-*")) + .setOrder(-1) + .setSettings(Settings.builder().put(AutoExpandReplicas.SETTING.getKey(), "0-all")); + assertAcked(putTemplate.get()); + } + assertAcked(prepareCreate("test_index-1").addAlias(new Alias("test_alias")).get()); + indexDoc("test_index-1", "1", "field", "value"); + flush("test_index-1"); + ensureGreen(); + + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> { + RolloverConditions.Builder rolloverConditionsBuilder = RolloverConditions.newBuilder(); + if (randomBoolean()) { + rolloverConditionsBuilder.addMaxIndexDocsCondition(1L); + } + indicesAdmin().prepareRolloverIndex("test_alias") + .dryRun(randomBoolean()) + .lazy(true) + .setConditions(rolloverConditionsBuilder) + .get(); + }); + assertThat(exception.getMessage(), containsString("can be applied only on a data stream")); + + } + public void testRolloverConditionsNotMet() throws Exception { boolean explicitWriteIndex = randomBoolean(); Alias testAlias = new Alias("test_alias"); @@ -590,7 +619,7 @@ public void testRejectIfAliasFoundInTemplate() throws Exception { ensureYellow("logs-write"); final IllegalArgumentException error = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareRolloverIndex("logs-write").get() + indicesAdmin().prepareRolloverIndex("logs-write") ); assertThat( error.getMessage(), @@ -777,14 +806,14 @@ public void testMultiThreadedRollover() throws Exception { }); // We should *NOT* have a third index, it should have rolled over *exactly* once - expectThrows(Exception.class, () -> indicesAdmin().prepareGetIndex().addIndices(writeIndexPrefix + "000003").get()); + expectThrows(Exception.class, indicesAdmin().prepareGetIndex().addIndices(writeIndexPrefix + "000003")); } public void testRolloverConcurrently() throws Exception { int numOfThreads = 5; int numberOfRolloversPerThread = 20; - var putTemplateRequest = new PutComposableIndexTemplateAction.Request("my-template"); + var putTemplateRequest = new TransportPutComposableIndexTemplateAction.Request("my-template"); var template = new Template( Settings.builder() // Avoid index check, which gets randomly inserted by test framework. This slows down the test a bit. @@ -797,7 +826,7 @@ public void testRolloverConcurrently() throws Exception { putTemplateRequest.indexTemplate( ComposableIndexTemplate.builder().indexPatterns(List.of("test-*")).template(template).priority(100L).build() ); - assertAcked(client().execute(PutComposableIndexTemplateAction.INSTANCE, putTemplateRequest).actionGet()); + assertAcked(client().execute(TransportPutComposableIndexTemplateAction.TYPE, putTemplateRequest).actionGet()); final CyclicBarrier barrier = new CyclicBarrier(numOfThreads); final Thread[] threads = new Thread[numOfThreads]; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java index 310f9394f60c1..1a070c8bd0de3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java @@ -31,6 +31,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.TimeUnit; import java.util.function.Predicate; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -48,9 +49,13 @@ protected Collection> nodePlugins() { return Arrays.asList(MockFSIndexStore.TestPlugin.class); } + private static IndicesShardStoresResponse execute(IndicesShardStoresRequest request) { + return client().execute(TransportIndicesShardStoresAction.TYPE, request).actionGet(10, TimeUnit.SECONDS); + } + public void testEmpty() { ensureGreen(); - IndicesShardStoresResponse rsp = indicesAdmin().prepareShardStores().get(); + IndicesShardStoresResponse rsp = execute(new IndicesShardStoresRequest()); assertThat(rsp.getStoreStatuses().size(), equalTo(0)); } @@ -62,11 +67,11 @@ public void testBasic() throws Exception { ensureGreen(index); // no unallocated shards - IndicesShardStoresResponse response = indicesAdmin().prepareShardStores(index).get(); + IndicesShardStoresResponse response = execute(new IndicesShardStoresRequest(index)); assertThat(response.getStoreStatuses().size(), equalTo(0)); // all shards - response = indicesAdmin().shardStores(new IndicesShardStoresRequest(index).shardStatuses("all")).get(); + response = execute(new IndicesShardStoresRequest(index).shardStatuses("all")); assertThat(response.getStoreStatuses().containsKey(index), equalTo(true)); Map> shardStores = response.getStoreStatuses().get(index); assertThat(shardStores.size(), equalTo(2)); @@ -88,7 +93,7 @@ public void testBasic() throws Exception { assertNoTimeout(clusterAdmin().prepareHealth().setWaitForNodes("" + (num - 1))); ClusterState clusterState = clusterAdmin().prepareState().get().getState(); List unassignedShards = clusterState.routingTable().index(index).shardsWithState(ShardRoutingState.UNASSIGNED); - response = indicesAdmin().shardStores(new IndicesShardStoresRequest(index)).get(); + response = execute(new IndicesShardStoresRequest(index)); assertThat(response.getStoreStatuses().containsKey(index), equalTo(true)); Map> shardStoresStatuses = response.getStoreStatuses().get(index); assertThat(shardStoresStatuses.size(), equalTo(unassignedShards.size())); @@ -108,14 +113,17 @@ public void testIndices() throws Exception { String index1 = "test1"; String index2 = "test2"; internalCluster().ensureAtLeastNumDataNodes(2); - assertAcked(prepareCreate(index1).setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "2"))); - assertAcked(prepareCreate(index2).setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "2"))); + for (final var index : List.of(index1, index2)) { + final var settings = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2); + if (randomBoolean()) { + settings.put(IndexMetadata.SETTING_INDEX_HIDDEN, randomBoolean()); + } + assertAcked(prepareCreate(index).setSettings(settings)); + } indexRandomData(index1); indexRandomData(index2); ensureGreen(); - IndicesShardStoresResponse response = indicesAdmin().shardStores( - new IndicesShardStoresRequest(new String[] {}).shardStatuses("all") - ).get(); + IndicesShardStoresResponse response = execute(new IndicesShardStoresRequest(new String[] {}).shardStatuses("all")); Map>> shardStatuses = response.getStoreStatuses(); assertThat(shardStatuses.containsKey(index1), equalTo(true)); assertThat(shardStatuses.containsKey(index2), equalTo(true)); @@ -123,7 +131,7 @@ public void testIndices() throws Exception { assertThat(shardStatuses.get(index2).size(), equalTo(2)); // ensure index filtering works - response = indicesAdmin().shardStores(new IndicesShardStoresRequest(index1).shardStatuses("all")).get(); + response = execute(new IndicesShardStoresRequest(index1).shardStatuses("all")); shardStatuses = response.getStoreStatuses(); assertThat(shardStatuses.containsKey(index1), equalTo(true)); assertThat(shardStatuses.containsKey(index2), equalTo(false)); @@ -170,7 +178,7 @@ public void testCorruptedShards() throws Exception { } assertBusy(() -> { // IndicesClusterStateService#failAndRemoveShard() called asynchronously but we need it to have completed here. - IndicesShardStoresResponse rsp = indicesAdmin().prepareShardStores(index).setShardStatuses("all").get(); + IndicesShardStoresResponse rsp = execute(new IndicesShardStoresRequest(index).shardStatuses("all")); Map> shardStatuses = rsp.getStoreStatuses().get(index); assertNotNull(shardStatuses); assertThat(shardStatuses.size(), greaterThan(0)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java index 8bc9bac2543d3..38d5719287292 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java @@ -108,7 +108,7 @@ public void testExternallySetAutoGeneratedTimestamp() { indexRequest.id("test"); } assertThat( - expectThrows(IllegalArgumentException.class, () -> client().prepareBulk().add(indexRequest).get()).getMessage(), + expectThrows(IllegalArgumentException.class, client().prepareBulk().add(indexRequest)).getMessage(), containsString("autoGeneratedTimestamp should not be set externally") ); } @@ -119,7 +119,7 @@ public void testBulkWithGlobalDefaults() throws Exception { { BulkRequestBuilder bulkBuilder = client().prepareBulk(); bulkBuilder.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, XContentType.JSON); - ActionRequestValidationException ex = expectThrows(ActionRequestValidationException.class, bulkBuilder::get); + ActionRequestValidationException ex = expectThrows(ActionRequestValidationException.class, bulkBuilder); assertThat(ex.validationErrors(), containsInAnyOrder("index is missing", "index is missing", "index is missing")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java index 21bbd32e6bf26..0c1930c0cf925 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java @@ -83,7 +83,7 @@ public void testBasic() { } refresh("test"); String pitId = openPointInTime(new String[] { "test" }, TimeValue.timeValueMinutes(2)); - assertResponse(prepareSearch().setPreference(null).setPointInTime(new PointInTimeBuilder(pitId)), resp1 -> { + assertResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), resp1 -> { assertThat(resp1.pointInTimeId(), equalTo(pitId)); assertHitCount(resp1, numDocs); }); @@ -99,13 +99,13 @@ public void testBasic() { if (randomBoolean()) { final int delDocCount = deletedDocs; assertNoFailuresAndResponse( - prepareSearch("test").setPreference(null).setQuery(new MatchAllQueryBuilder()), + prepareSearch("test").setQuery(new MatchAllQueryBuilder()), resp2 -> assertHitCount(resp2, numDocs - delDocCount) ); } try { assertNoFailuresAndResponse( - prepareSearch().setPreference(null).setQuery(new MatchAllQueryBuilder()).setPointInTime(new PointInTimeBuilder(pitId)), + prepareSearch().setQuery(new MatchAllQueryBuilder()).setPointInTime(new PointInTimeBuilder(pitId)), resp3 -> { assertHitCount(resp3, numDocs); assertThat(resp3.pointInTimeId(), equalTo(pitId)); @@ -131,7 +131,7 @@ public void testMultipleIndices() { String pitId = openPointInTime(new String[] { "*" }, TimeValue.timeValueMinutes(2)); try { int moreDocs = randomIntBetween(10, 50); - assertNoFailuresAndResponse(prepareSearch().setPreference(null).setPointInTime(new PointInTimeBuilder(pitId)), resp -> { + assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), resp -> { assertHitCount(resp, numDocs); assertNotNull(resp.pointInTimeId()); assertThat(resp.pointInTimeId(), equalTo(pitId)); @@ -143,7 +143,7 @@ public void testMultipleIndices() { refresh(); }); assertNoFailuresAndResponse(prepareSearch(), resp -> assertHitCount(resp, numDocs + moreDocs)); - assertNoFailuresAndResponse(prepareSearch().setPreference(null).setPointInTime(new PointInTimeBuilder(pitId)), resp -> { + assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), resp -> { assertHitCount(resp, numDocs); assertNotNull(resp.pointInTimeId()); assertThat(resp.pointInTimeId(), equalTo(pitId)); @@ -212,7 +212,7 @@ public void testRelocation() throws Exception { refresh(); String pitId = openPointInTime(new String[] { "test" }, TimeValue.timeValueMinutes(2)); try { - assertNoFailuresAndResponse(prepareSearch().setPreference(null).setPointInTime(new PointInTimeBuilder(pitId)), resp -> { + assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), resp -> { assertHitCount(resp, numDocs); assertThat(resp.pointInTimeId(), equalTo(pitId)); }); @@ -232,7 +232,7 @@ public void testRelocation() throws Exception { } refresh(); } - assertNoFailuresAndResponse(prepareSearch().setPreference(null).setPointInTime(new PointInTimeBuilder(pitId)), resp -> { + assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), resp -> { assertHitCount(resp, numDocs); assertThat(resp.pointInTimeId(), equalTo(pitId)); }); @@ -245,7 +245,7 @@ public void testRelocation() throws Exception { .collect(Collectors.toSet()); assertThat(assignedNodes, everyItem(not(in(excludedNodes)))); }, 30, TimeUnit.SECONDS); - assertNoFailuresAndResponse(prepareSearch().setPreference(null).setPointInTime(new PointInTimeBuilder(pitId)), resp -> { + assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), resp -> { assertHitCount(resp, numDocs); assertThat(resp.pointInTimeId(), equalTo(pitId)); }); @@ -263,7 +263,7 @@ public void testPointInTimeNotFound() throws Exception { } refresh(); String pit = openPointInTime(new String[] { "index" }, TimeValue.timeValueSeconds(5)); - assertNoFailuresAndResponse(prepareSearch().setPreference(null).setPointInTime(new PointInTimeBuilder(pit)), resp1 -> { + assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pit)), resp1 -> { assertHitCount(resp1, index1); if (rarely()) { try { @@ -280,7 +280,7 @@ public void testPointInTimeNotFound() throws Exception { }); SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch().setPreference(null).setPointInTime(new PointInTimeBuilder(pit)).get() + prepareSearch().setPointInTime(new PointInTimeBuilder(pit)) ); for (ShardSearchFailure failure : e.shardFailures()) { assertThat(ExceptionsHelper.unwrapCause(failure.getCause()), instanceOf(SearchContextMissingException.class)); @@ -306,7 +306,7 @@ public void testIndexNotFound() { String pit = openPointInTime(new String[] { "index-*" }, TimeValue.timeValueMinutes(2)); try { assertNoFailuresAndResponse( - prepareSearch().setPreference(null).setPointInTime(new PointInTimeBuilder(pit)), + prepareSearch().setPointInTime(new PointInTimeBuilder(pit)), resp -> assertHitCount(resp, index1 + index2) ); indicesAdmin().prepareDelete("index-1").get(); @@ -315,21 +315,15 @@ public void testIndexNotFound() { } // Allow partial search result - assertResponse( - prepareSearch().setPreference(null).setAllowPartialSearchResults(true).setPointInTime(new PointInTimeBuilder(pit)), - resp -> { - assertFailures(resp); - assertHitCount(resp, index2); - } - ); + assertResponse(prepareSearch().setAllowPartialSearchResults(true).setPointInTime(new PointInTimeBuilder(pit)), resp -> { + assertFailures(resp); + assertHitCount(resp, index2); + }); // Do not allow partial search result expectThrows( ElasticsearchException.class, - () -> prepareSearch().setPreference(null) - .setAllowPartialSearchResults(false) - .setPointInTime(new PointInTimeBuilder(pit)) - .get() + prepareSearch().setAllowPartialSearchResults(false).setPointInTime(new PointInTimeBuilder(pit)) ); } finally { closePointInTime(pit); @@ -365,7 +359,6 @@ public void testCanMatch() throws Exception { assertResponse( prepareSearch().setQuery(new RangeQueryBuilder("created_date").gte("2020-01-02").lte("2020-01-03")) .setSearchType(SearchType.QUERY_THEN_FETCH) - .setPreference(null) .setPreFilterShardSize(randomIntBetween(2, 3)) .setMaxConcurrentShardRequests(randomIntBetween(1, 2)) .setPointInTime(new PointInTimeBuilder(pitId)), @@ -422,20 +415,17 @@ public void testPartialResults() throws Exception { refresh(); String pitId = openPointInTime(new String[] { "test-*" }, TimeValue.timeValueMinutes(2)); try { - assertNoFailuresAndResponse(prepareSearch().setPreference(null).setPointInTime(new PointInTimeBuilder(pitId)), resp -> { + assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), resp -> { assertHitCount(resp, numDocs1 + numDocs2); assertThat(resp.pointInTimeId(), equalTo(pitId)); }); internalCluster().restartNode(assignedNodeForIndex1); - assertResponse( - prepareSearch().setPreference(null).setAllowPartialSearchResults(true).setPointInTime(new PointInTimeBuilder(pitId)), - resp -> { - assertFailures(resp); - assertThat(resp.pointInTimeId(), equalTo(pitId)); - assertHitCount(resp, numDocs2); - } - ); + assertResponse(prepareSearch().setAllowPartialSearchResults(true).setPointInTime(new PointInTimeBuilder(pitId)), resp -> { + assertFailures(resp); + assertThat(resp.pointInTimeId(), equalTo(pitId)); + assertHitCount(resp, numDocs2); + }); } finally { closePointInTime(pitId); } @@ -486,10 +476,7 @@ public void testPITTiebreak() throws Exception { } public void testCloseInvalidPointInTime() { - expectThrows( - Exception.class, - () -> client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest("")).actionGet() - ); + expectThrows(Exception.class, client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(""))); List tasks = clusterAdmin().prepareListTasks().setActions(TransportClosePointInTimeAction.TYPE.name()).get().getTasks(); assertThat(tasks, empty()); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/search/TransportSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/search/TransportSearchIT.java index 5bb21dc874747..dd71b82c106a8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/search/TransportSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/search/TransportSearchIT.java @@ -319,8 +319,7 @@ public void testWaitForRefreshIndexValidation() throws Exception { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> prepareSearch("testFailedAlias").setWaitForCheckpoints(Collections.singletonMap("testFailedAlias", validCheckpoints)) - .get() + prepareSearch("testFailedAlias").setWaitForCheckpoints(Collections.singletonMap("testFailedAlias", validCheckpoints)) ); assertThat( e.getMessage(), @@ -332,7 +331,7 @@ public void testWaitForRefreshIndexValidation() throws Exception { IllegalArgumentException e2 = expectThrows( IllegalArgumentException.class, - () -> prepareSearch("test1").setWaitForCheckpoints(Collections.singletonMap("test1", new long[2])).get() + prepareSearch("test1").setWaitForCheckpoints(Collections.singletonMap("test1", new long[2])) ); assertThat( e2.getMessage(), @@ -346,7 +345,7 @@ public void testWaitForRefreshIndexValidation() throws Exception { IllegalArgumentException e3 = expectThrows( IllegalArgumentException.class, - () -> prepareSearch("testAlias").setWaitForCheckpoints(Collections.singletonMap("testAlias", new long[2])).get() + prepareSearch("testAlias").setWaitForCheckpoints(Collections.singletonMap("testAlias", new long[2])) ); assertThat( e3.getMessage(), @@ -360,7 +359,7 @@ public void testWaitForRefreshIndexValidation() throws Exception { IllegalArgumentException e4 = expectThrows( IllegalArgumentException.class, - () -> prepareSearch("testAlias").setWaitForCheckpoints(Collections.singletonMap("test2", validCheckpoints)).get() + prepareSearch("testAlias").setWaitForCheckpoints(Collections.singletonMap("test2", validCheckpoints)) ); assertThat( e4.getMessage(), @@ -383,7 +382,7 @@ public void testShardCountLimit() throws Exception { updateClusterSettings(Settings.builder().put(TransportSearchAction.SHARD_COUNT_LIMIT_SETTING.getKey(), numPrimaries1 - 1)); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> prepareSearch("test1").get()); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, prepareSearch("test1")); assertThat( e.getMessage(), containsString("Trying to query " + numPrimaries1 + " shards, which is over the limit of " + (numPrimaries1 - 1)) @@ -394,7 +393,7 @@ public void testShardCountLimit() throws Exception { // no exception prepareSearch("test1").get().decRef(); - e = expectThrows(IllegalArgumentException.class, () -> prepareSearch("test1", "test2").get()); + e = expectThrows(IllegalArgumentException.class, prepareSearch("test1", "test2")); assertThat( e.getMessage(), containsString( @@ -478,7 +477,7 @@ public void onFailure(Exception e) { assertBusy(() -> { Exception exc = expectThrows( Exception.class, - () -> client.prepareSearch("test").addAggregation(new TestAggregationBuilder("test")).get().decRef() + client.prepareSearch("test").addAggregation(new TestAggregationBuilder("test")) ); assertThat(exc.getCause().getMessage(), containsString("")); }); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java index d98de846bd9da..9661f4ebb966d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java @@ -43,7 +43,6 @@ import java.util.concurrent.ExecutionException; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -420,7 +419,7 @@ public void testDuelESLucene() throws Exception { for (TestConfig test : testConfigs) { TermVectorsRequestBuilder request = getRequestForConfig(test); if (test.expectedException != null) { - assertRequestBuilderThrows(request, test.expectedException); + expectThrows(test.expectedException, request); continue; } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/aliases/IndexAliasesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/aliases/IndexAliasesIT.java index 8ede5dc5ef29f..2f10711db7371 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/aliases/IndexAliasesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/aliases/IndexAliasesIT.java @@ -19,7 +19,6 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; -import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.AliasMetadata; import org.elasticsearch.cluster.metadata.IndexAbstraction; @@ -90,7 +89,7 @@ public void testAliases() throws Exception { logger.info("--> indexing against [alias1], should fail now"); IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, - () -> client().index(new IndexRequest("alias1").id("1").source(source("2", "test"), XContentType.JSON)).actionGet() + client().index(new IndexRequest("alias1").id("1").source(source("2", "test"), XContentType.JSON)) ); assertThat( exception.getMessage(), @@ -124,7 +123,7 @@ public void testAliases() throws Exception { logger.info("--> indexing against [alias1], should fail now"); exception = expectThrows( IllegalArgumentException.class, - () -> client().index(new IndexRequest("alias1").id("1").source(source("2", "test"), XContentType.JSON)).actionGet() + client().index(new IndexRequest("alias1").id("1").source(source("2", "test"), XContentType.JSON)) ); assertThat( exception.getMessage(), @@ -136,7 +135,7 @@ public void testAliases() throws Exception { ); logger.info("--> deleting against [alias1], should fail now"); - exception = expectThrows(IllegalArgumentException.class, () -> client().delete(new DeleteRequest("alias1").id("1")).actionGet()); + exception = expectThrows(IllegalArgumentException.class, client().delete(new DeleteRequest("alias1").id("1"))); assertThat( exception.getMessage(), equalTo( @@ -183,17 +182,11 @@ public void testFailedFilter() throws Exception { createIndex("test"); // invalid filter, invalid json - Exception e = expectThrows( - IllegalArgumentException.class, - () -> indicesAdmin().prepareAliases().addAlias("test", "alias1", "abcde").get() - ); + Exception e = expectThrows(IllegalArgumentException.class, indicesAdmin().prepareAliases().addAlias("test", "alias1", "abcde")); assertThat(e.getMessage(), equalTo("failed to parse filter for alias [alias1]")); // valid json , invalid filter - e = expectThrows( - IllegalArgumentException.class, - () -> indicesAdmin().prepareAliases().addAlias("test", "alias1", "{ \"test\": {} }").get() - ); + e = expectThrows(IllegalArgumentException.class, indicesAdmin().prepareAliases().addAlias("test", "alias1", "{ \"test\": {} }")); assertThat(e.getMessage(), equalTo("failed to parse filter for alias [alias1]")); } @@ -224,7 +217,7 @@ public void testEmptyFilter() throws Exception { logger.info("--> aliasing index [test] with [alias1] and empty filter"); IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareAliases().addAlias("test", "alias1", "{}").get() + indicesAdmin().prepareAliases().addAlias("test", "alias1", "{}") ); assertEquals("failed to parse filter for alias [alias1]", iae.getMessage()); } @@ -677,7 +670,7 @@ public void testDeleteAliases() throws Exception { assertFalse(indicesAdmin().prepareGetAliases("foo").setIndices("bar_bar").get().getAliases().isEmpty()); IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareAliases().addAliasAction(AliasActions.remove().index("foo").alias("foo")).get() + indicesAdmin().prepareAliases().addAliasAction(AliasActions.remove().index("foo").alias("foo")) ); assertEquals( "The provided expression [foo] matches an alias, specify the corresponding concrete indices instead.", @@ -1090,7 +1083,7 @@ public void testAliasesCanBeAddedToIndicesOnly() throws Exception { IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index("week_20").alias("tmp")).get() + indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index("week_20").alias("tmp")) ); assertEquals( "The provided expression [week_20] matches an alias, specify the corresponding concrete indices instead.", @@ -1211,10 +1204,7 @@ public void testAliasActionRemoveIndex() throws InterruptedException, ExecutionE assertAcked(indicesAdmin().prepareAliases().addAlias("bar_bar", "foo")); }); - IllegalArgumentException iae = expectThrows( - IllegalArgumentException.class, - () -> indicesAdmin().prepareAliases().removeIndex("foo").get() - ); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, indicesAdmin().prepareAliases().removeIndex("foo")); assertEquals( "The provided expression [foo] matches an alias, specify the corresponding concrete indices instead.", iae.getMessage() @@ -1249,11 +1239,10 @@ public void testHiddenAliasesMustBeConsistent() { assertAcked(indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index(index1).alias(alias))); - IllegalStateException ex = expectThrows(IllegalStateException.class, () -> { - AcknowledgedResponse res = indicesAdmin().prepareAliases() - .addAliasAction(AliasActions.add().index(index2).alias(alias).isHidden(true)) - .get(); - }); + IllegalStateException ex = expectThrows( + IllegalStateException.class, + indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index(index2).alias(alias).isHidden(true)) + ); logger.error("exception: {}", ex.getMessage()); assertThat(ex.getMessage(), containsString("has is_hidden set to true on indices")); @@ -1261,18 +1250,18 @@ public void testHiddenAliasesMustBeConsistent() { assertAcked(indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index(index1).alias(alias).isHidden(false))); expectThrows( IllegalStateException.class, - () -> indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index(index2).alias(alias).isHidden(true)).get() + indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index(index2).alias(alias).isHidden(true)) ); assertAcked(indicesAdmin().prepareAliases().addAliasAction(AliasActions.remove().index(index1).alias(alias))); assertAcked(indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index(index1).alias(alias).isHidden(true))); expectThrows( IllegalStateException.class, - () -> indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index(index2).alias(alias).isHidden(false)).get() + indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index(index2).alias(alias).isHidden(false)) ); expectThrows( IllegalStateException.class, - () -> indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index(index2).alias(alias)).get() + indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index(index2).alias(alias)) ); // Both visible @@ -1359,7 +1348,7 @@ public void testCreateIndexAndAliasWithSameNameFails() { final String indexName = "index-name"; final IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareCreate(indexName).addAlias(new Alias(indexName)).get() + indicesAdmin().prepareCreate(indexName).addAlias(new Alias(indexName)) ); assertEquals("alias name [" + indexName + "] self-conflicts with index name", iae.getMessage()); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/blocks/SimpleBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/blocks/SimpleBlocksIT.java index 6206f2357218c..136db24767d22 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/blocks/SimpleBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/blocks/SimpleBlocksIT.java @@ -12,7 +12,6 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; -import org.elasticsearch.action.admin.indices.readonly.AddIndexBlockRequestBuilder; import org.elasticsearch.action.admin.indices.readonly.AddIndexBlockResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.support.ActiveShardCount; @@ -194,10 +193,7 @@ public void testAddBlocksWhileExistingBlocks() { } public void testAddBlockToMissingIndex() { - IndexNotFoundException e = expectThrows( - IndexNotFoundException.class, - () -> indicesAdmin().prepareAddBlock(randomAddableBlock(), "test").get() - ); + IndexNotFoundException e = expectThrows(IndexNotFoundException.class, indicesAdmin().prepareAddBlock(randomAddableBlock(), "test")); assertThat(e.getMessage(), is("no such index [test]")); } @@ -205,7 +201,7 @@ public void testAddBlockToOneMissingIndex() { createIndex("test1"); final IndexNotFoundException e = expectThrows( IndexNotFoundException.class, - () -> indicesAdmin().prepareAddBlock(randomAddableBlock(), "test1", "test2").get() + indicesAdmin().prepareAddBlock(randomAddableBlock(), "test1", "test2") ); assertThat(e.getMessage(), is("no such index [test2]")); } @@ -224,7 +220,7 @@ public void testCloseOneMissingIndexIgnoreMissing() throws Exception { public void testAddBlockNoIndex() { final ActionRequestValidationException e = expectThrows( ActionRequestValidationException.class, - () -> indicesAdmin().prepareAddBlock(randomAddableBlock()).get() + indicesAdmin().prepareAddBlock(randomAddableBlock()) ); assertThat(e.getMessage(), containsString("index is missing")); } @@ -235,8 +231,10 @@ public void testAddBlockNullIndex() { public void testCannotAddReadOnlyAllowDeleteBlock() { createIndex("test1"); - final AddIndexBlockRequestBuilder request = indicesAdmin().prepareAddBlock(APIBlock.READ_ONLY_ALLOW_DELETE, "test1"); - final ActionRequestValidationException e = expectThrows(ActionRequestValidationException.class, request::get); + final ActionRequestValidationException e = expectThrows( + ActionRequestValidationException.class, + indicesAdmin().prepareAddBlock(APIBlock.READ_ONLY_ALLOW_DELETE, "test1") + ); assertThat(e.getMessage(), containsString("read_only_allow_delete block is for internal use only")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterHealthIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterHealthIT.java index 214e3f73144d9..7a8accf8cc7ce 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterHealthIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterHealthIT.java @@ -46,8 +46,8 @@ public void testSimpleLocalHealth() { .prepareHealth() .setLocal(true) .setWaitForEvents(Priority.LANGUID) - .setTimeout("30s") - .get("10s"); + .setTimeout(TimeValue.timeValueSeconds(30)) + .get(TimeValue.timeValueSeconds(10)); logger.info("--> got cluster health on [{}]", node); assertFalse("timed out on " + node, health.isTimedOut()); assertThat("health status on " + node, health.getStatus(), equalTo(ClusterHealthStatus.GREEN)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/NoMasterNodeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/NoMasterNodeIT.java index aa54e46389676..d21ec3e343943 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/NoMasterNodeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/NoMasterNodeIT.java @@ -344,14 +344,11 @@ public void testNoMasterActionsMetadataWriteMasterBlock() throws Exception { GetResponse getResponse = client(randomFrom(nodesWithShards)).prepareGet("test1", "1").get(); assertExists(getResponse); - expectThrows(Exception.class, () -> client(partitionedNode).prepareGet("test1", "1").get()); + expectThrows(Exception.class, client(partitionedNode).prepareGet("test1", "1")); assertHitCount(client(randomFrom(nodesWithShards)).prepareSearch("test1").setAllowPartialSearchResults(true).setSize(0), 1L); - expectThrows( - Exception.class, - () -> client(partitionedNode).prepareSearch("test1").setAllowPartialSearchResults(true).setSize(0).get() - ); + expectThrows(Exception.class, client(partitionedNode).prepareSearch("test1").setAllowPartialSearchResults(true).setSize(0)); TimeValue timeout = TimeValue.timeValueMillis(200); client(randomFrom(nodesWithShards)).prepareUpdate("test1", "1") @@ -361,10 +358,7 @@ public void testNoMasterActionsMetadataWriteMasterBlock() throws Exception { expectThrows( Exception.class, - () -> client(partitionedNode).prepareUpdate("test1", "1") - .setDoc(Requests.INDEX_CONTENT_TYPE, "field", "value2") - .setTimeout(timeout) - .get() + client(partitionedNode).prepareUpdate("test1", "1").setDoc(Requests.INDEX_CONTENT_TYPE, "field", "value2").setTimeout(timeout) ); client(randomFrom(nodesWithShards)).prepareIndex("test1") @@ -376,30 +370,27 @@ public void testNoMasterActionsMetadataWriteMasterBlock() throws Exception { // dynamic mapping updates fail expectThrows( MasterNotDiscoveredException.class, - () -> client(randomFrom(nodesWithShards)).prepareIndex("test1") + client(randomFrom(nodesWithShards)).prepareIndex("test1") .setId("1") .setSource(XContentFactory.jsonBuilder().startObject().field("new_field", "value").endObject()) .setTimeout(timeout) - .get() ); // dynamic index creation fails expectThrows( MasterNotDiscoveredException.class, - () -> client(randomFrom(nodesWithShards)).prepareIndex("test2") + client(randomFrom(nodesWithShards)).prepareIndex("test2") .setId("1") .setSource(XContentFactory.jsonBuilder().startObject().endObject()) .setTimeout(timeout) - .get() ); expectThrows( Exception.class, - () -> client(partitionedNode).prepareIndex("test1") + client(partitionedNode).prepareIndex("test1") .setId("1") .setSource(XContentFactory.jsonBuilder().startObject().endObject()) .setTimeout(timeout) - .get() ); internalCluster().clearDisruptionScheme(true); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java index c4f06cc90fdf3..e6ea4823e86f0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java @@ -253,7 +253,7 @@ public void testForceAwarenessSettingValidation() { final IllegalArgumentException illegalArgumentException = expectThrows( IllegalArgumentException.class, - () -> clusterAdmin().prepareUpdateSettings().setPersistentSettings(Settings.builder().put(prefix + "nonsense", "foo")).get() + clusterAdmin().prepareUpdateSettings().setPersistentSettings(Settings.builder().put(prefix + "nonsense", "foo")) ); assertThat(illegalArgumentException.getMessage(), containsString("[cluster.routing.allocation.awareness.force.]")); assertThat(illegalArgumentException.getCause(), instanceOf(SettingsException.class)); @@ -262,9 +262,7 @@ public void testForceAwarenessSettingValidation() { assertThat( expectThrows( IllegalArgumentException.class, - () -> clusterAdmin().prepareUpdateSettings() - .setPersistentSettings(Settings.builder().put(prefix + "attr.not_values", "foo")) - .get() + clusterAdmin().prepareUpdateSettings().setPersistentSettings(Settings.builder().put(prefix + "attr.not_values", "foo")) ).getMessage(), containsString("[cluster.routing.allocation.awareness.force.attr.not_values]") ); @@ -272,9 +270,7 @@ public void testForceAwarenessSettingValidation() { assertThat( expectThrows( IllegalArgumentException.class, - () -> clusterAdmin().prepareUpdateSettings() - .setPersistentSettings(Settings.builder().put(prefix + "attr.values.junk", "foo")) - .get() + clusterAdmin().prepareUpdateSettings().setPersistentSettings(Settings.builder().put(prefix + "attr.values.junk", "foo")) ).getMessage(), containsString("[cluster.routing.allocation.awareness.force.attr.values.junk]") ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java index 33719df372fb1..ae79c388aa104 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java @@ -192,9 +192,8 @@ public void testInvalidIPFilterClusterSettings() { ); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> clusterAdmin().prepareUpdateSettings() + clusterAdmin().prepareUpdateSettings() .setPersistentSettings(Settings.builder().put(filterSetting.getKey() + ipKey, "192.168.1.1.")) - .get() ); assertEquals("invalid IP address [192.168.1.1.] for [" + filterSetting.getKey() + ipKey + "]", e.getMessage()); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java index b65e715b454dc..e7a7a6f2ba727 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java @@ -10,7 +10,9 @@ import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequestBuilder; +import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; +import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.support.ActiveShardCount; @@ -207,9 +209,7 @@ public void testFailedAllocationOfStalePrimaryToDataNodeWithNoData() throws Exce logger.info("--> force allocation of stale copy to node that does not have shard copy"); Throwable iae = expectThrows( IllegalArgumentException.class, - () -> clusterAdmin().prepareReroute() - .add(new AllocateStalePrimaryAllocationCommand("test", 0, dataNodeWithNoShardCopy, true)) - .get() + clusterAdmin().prepareReroute().add(new AllocateStalePrimaryAllocationCommand("test", 0, dataNodeWithNoShardCopy, true)) ); assertThat(iae.getMessage(), equalTo("No data for shard [0] of index [test] found on any node")); @@ -248,10 +248,10 @@ public void testForceStaleReplicaToBePromotedToPrimary() throws Exception { boolean useStaleReplica = randomBoolean(); // if true, use stale replica, otherwise a completely empty copy logger.info("--> explicitly promote old primary shard"); final String idxName = "test"; - Map> storeStatuses = indicesAdmin().prepareShardStores(idxName) - .get() - .getStoreStatuses() - .get(idxName); + Map> storeStatuses = client().execute( + TransportIndicesShardStoresAction.TYPE, + new IndicesShardStoresRequest(idxName) + ).get().getStoreStatuses().get(idxName); ClusterRerouteRequestBuilder rerouteBuilder = clusterAdmin().prepareReroute(); for (Map.Entry> shardStoreStatuses : storeStatuses.entrySet()) { int shardId = shardStoreStatuses.getKey(); @@ -333,7 +333,7 @@ public void testForceStaleReplicaToBePromotedToPrimaryOnWrongNode() throws Excep final int shardId = 0; final List nodeNames = new ArrayList<>(Arrays.asList(internalCluster().getNodeNames())); nodeNames.remove(master); - indicesAdmin().prepareShardStores(idxName) + client().execute(TransportIndicesShardStoresAction.TYPE, new IndicesShardStoresRequest(idxName)) .get() .getStoreStatuses() .get(idxName) @@ -343,9 +343,7 @@ public void testForceStaleReplicaToBePromotedToPrimaryOnWrongNode() throws Excep final String nodeWithoutData = nodeNames.get(0); Throwable iae = expectThrows( IllegalArgumentException.class, - () -> clusterAdmin().prepareReroute() - .add(new AllocateStalePrimaryAllocationCommand(idxName, shardId, nodeWithoutData, true)) - .get() + clusterAdmin().prepareReroute().add(new AllocateStalePrimaryAllocationCommand(idxName, shardId, nodeWithoutData, true)) ); assertThat( iae.getMessage(), @@ -363,9 +361,7 @@ public void testForceStaleReplicaToBePromotedForGreenIndex() { final int shardId = 0; IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, - () -> clusterAdmin().prepareReroute() - .add(new AllocateStalePrimaryAllocationCommand(idxName, shardId, nodeWithoutData, true)) - .get() + clusterAdmin().prepareReroute().add(new AllocateStalePrimaryAllocationCommand(idxName, shardId, nodeWithoutData, true)) ); assertThat(iae.getMessage(), equalTo("[allocate_stale_primary] primary [" + idxName + "][" + shardId + "] is already assigned")); } @@ -376,7 +372,7 @@ public void testForceStaleReplicaToBePromotedForMissingIndex() { final String idxName = "test"; IndexNotFoundException ex = expectThrows( IndexNotFoundException.class, - () -> clusterAdmin().prepareReroute().add(new AllocateStalePrimaryAllocationCommand(idxName, 0, dataNode, true)).get() + clusterAdmin().prepareReroute().add(new AllocateStalePrimaryAllocationCommand(idxName, 0, dataNode, true)) ); assertThat(ex.getIndex().getName(), equalTo(idxName)); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/service/ClusterServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/service/ClusterServiceIT.java index 873f8083f4e0c..fde465346d4be 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/service/ClusterServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/service/ClusterServiceIT.java @@ -357,7 +357,7 @@ public void clusterStateProcessed(ClusterState oldState, ClusterState newState) assertTrue(controlSources.isEmpty()); controlSources = new HashSet<>(Arrays.asList("1", "2", "3", "4", "5", "6", "7", "8", "9", "10")); - PendingClusterTasksResponse response = internalCluster().coordOnlyNodeClient().admin().cluster().preparePendingClusterTasks().get(); + PendingClusterTasksResponse response = getClusterPendingTasks(internalCluster().coordOnlyNodeClient()); assertThat(response.pendingTasks().size(), greaterThanOrEqualTo(10)); assertThat(response.pendingTasks().get(0).getSource().string(), equalTo("1")); assertThat(response.pendingTasks().get(0).isExecuting(), equalTo(true)); @@ -419,7 +419,7 @@ public void onFailure(Exception e) { } assertTrue(controlSources.isEmpty()); - response = internalCluster().coordOnlyNodeClient().admin().cluster().preparePendingClusterTasks().get(); + response = getClusterPendingTasks(internalCluster().coordOnlyNodeClient()); assertThat(response.pendingTasks().size(), greaterThanOrEqualTo(5)); controlSources = new HashSet<>(Arrays.asList("1", "2", "3", "4", "5")); for (PendingClusterTask task : response.pendingTasks()) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java index 9818b0a89bc8e..a142d594fe06e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java @@ -369,21 +369,23 @@ private void testRemoveArchiveSettingsWithBlocks(boolean readOnly, boolean readO assertTrue(state.getMetadata().persistentSettings().getAsBoolean("archived.this.is.unknown", false)); // cannot remove read only block due to archived settings - final IllegalArgumentException e1 = expectThrows(IllegalArgumentException.class, () -> { + { Settings.Builder builder = Settings.builder(); clearOrSetFalse(builder, readOnly, Metadata.SETTING_READ_ONLY_SETTING); clearOrSetFalse(builder, readOnlyAllowDelete, Metadata.SETTING_READ_ONLY_ALLOW_DELETE_SETTING); - clusterAdmin().prepareUpdateSettings().setPersistentSettings(builder).setTransientSettings(builder).get(); - }); - assertTrue(e1.getMessage().contains("unknown setting [archived.this.is.unknown]")); + final IllegalArgumentException e1 = expectThrows( + IllegalArgumentException.class, + clusterAdmin().prepareUpdateSettings().setPersistentSettings(builder).setTransientSettings(builder) + ); + assertTrue(e1.getMessage().contains("unknown setting [archived.this.is.unknown]")); + } // fail to clear archived settings with non-archived settings final ClusterBlockException e2 = expectThrows( ClusterBlockException.class, - () -> clusterAdmin().prepareUpdateSettings() + clusterAdmin().prepareUpdateSettings() .setPersistentSettings(Settings.builder().putNull("cluster.routing.allocation.enable")) .setTransientSettings(Settings.builder().putNull("archived.*")) - .get() ); if (readOnly) { assertTrue(e2.getMessage().contains("cluster read-only (api)")); @@ -395,7 +397,7 @@ private void testRemoveArchiveSettingsWithBlocks(boolean readOnly, boolean readO // fail to clear archived settings due to cluster read only block final ClusterBlockException e3 = expectThrows( ClusterBlockException.class, - () -> clusterAdmin().prepareUpdateSettings().setPersistentSettings(Settings.builder().putNull("archived.*")).get() + clusterAdmin().prepareUpdateSettings().setPersistentSettings(Settings.builder().putNull("archived.*")) ); if (readOnly) { assertTrue(e3.getMessage().contains("cluster read-only (api)")); @@ -404,8 +406,8 @@ private void testRemoveArchiveSettingsWithBlocks(boolean readOnly, boolean readO assertTrue(e3.getMessage().contains("cluster read-only / allow delete (api)")); } - // fail to clear archived settings with adding cluster block - final ClusterBlockException e4 = expectThrows(ClusterBlockException.class, () -> { + { + // fail to clear archived settings with adding cluster block Settings.Builder builder = Settings.builder().putNull("archived.*"); if (randomBoolean()) { builder.put(Metadata.SETTING_READ_ONLY_SETTING.getKey(), "true"); @@ -415,27 +417,33 @@ private void testRemoveArchiveSettingsWithBlocks(boolean readOnly, boolean readO } else { builder.put(Metadata.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.getKey(), "true"); } - clusterAdmin().prepareUpdateSettings().setPersistentSettings(builder).get(); - }); - if (readOnly) { - assertTrue(e4.getMessage().contains("cluster read-only (api)")); - } - if (readOnlyAllowDelete) { - assertTrue(e4.getMessage().contains("cluster read-only / allow delete (api)")); + final ClusterBlockException e4 = expectThrows( + ClusterBlockException.class, + clusterAdmin().prepareUpdateSettings().setPersistentSettings(builder) + ); + if (readOnly) { + assertTrue(e4.getMessage().contains("cluster read-only (api)")); + } + if (readOnlyAllowDelete) { + assertTrue(e4.getMessage().contains("cluster read-only / allow delete (api)")); + } } - // fail to set archived settings to non-null value even with clearing blocks together - final ClusterBlockException e5 = expectThrows(ClusterBlockException.class, () -> { + { + // fail to set archived settings to non-null value even with clearing blocks together Settings.Builder builder = Settings.builder().put("archived.this.is.unknown", "false"); clearOrSetFalse(builder, readOnly, Metadata.SETTING_READ_ONLY_SETTING); clearOrSetFalse(builder, readOnlyAllowDelete, Metadata.SETTING_READ_ONLY_ALLOW_DELETE_SETTING); - clusterAdmin().prepareUpdateSettings().setPersistentSettings(builder).get(); - }); - if (readOnly) { - assertTrue(e5.getMessage().contains("cluster read-only (api)")); - } - if (readOnlyAllowDelete) { - assertTrue(e5.getMessage().contains("cluster read-only / allow delete (api)")); + final ClusterBlockException e5 = expectThrows( + ClusterBlockException.class, + clusterAdmin().prepareUpdateSettings().setPersistentSettings(builder) + ); + if (readOnly) { + assertTrue(e5.getMessage().contains("cluster read-only (api)")); + } + if (readOnlyAllowDelete) { + assertTrue(e5.getMessage().contains("cluster read-only / allow delete (api)")); + } } // we can clear read-only block with archived settings together @@ -536,7 +544,7 @@ private void testLoggerLevelUpdate(final BiConsumer throwBuilder.get()); + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, throwBuilder); assertEquals("Unknown level constant [BOOM].", e.getMessage()); try { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java index 2db8474993d31..2bc6856479ab7 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java @@ -123,10 +123,7 @@ public void testIndexCreationOverLimitFromTemplate() { .setSettings(indexSettings(counts.getFailingIndexShards(), counts.getFailingIndexReplicas())) ); - final IllegalArgumentException e = expectThrows( - IllegalArgumentException.class, - () -> indicesAdmin().prepareCreate("should-fail").get() - ); + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, indicesAdmin().prepareCreate("should-fail")); verifyException(dataNodes, counts, e); ClusterState clusterState = clusterAdmin().prepareState().get().getState(); assertFalse(clusterState.getMetadata().hasIndex("should-fail")); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java index 4aabf0ac66a32..a0efb81c18668 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java @@ -109,7 +109,7 @@ static ConflictMode randomMode() { public void testAckedIndexing() throws Exception { final int seconds = (TEST_NIGHTLY && rarely()) == false ? 1 : 5; - final String timeout = seconds + "s"; + final TimeValue timeout = TimeValue.timeValueSeconds(seconds); final List nodes = startCluster(rarely() ? 5 : 3); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/env/NodeRepurposeCommandIT.java b/server/src/internalClusterTest/java/org/elasticsearch/env/NodeRepurposeCommandIT.java index 2a5295caf31b2..e6839044e2100 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/env/NodeRepurposeCommandIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/env/NodeRepurposeCommandIT.java @@ -81,7 +81,7 @@ public void testRepurpose() throws Exception { internalCluster().startCoordinatingOnlyNode(dataNodeDataPathSettings); assertTrue(indexExists(indexName)); - expectThrows(NoShardAvailableActionException.class, () -> client().prepareGet(indexName, "1").get()); + expectThrows(NoShardAvailableActionException.class, client().prepareGet(indexName, "1")); logger.info("--> Restarting and repurposing other node"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java index 600219da3d90f..5f3b854b74fb4 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java @@ -414,7 +414,7 @@ public void testRecoverBrokenIndexMetadata() throws Exception { assertEquals(IndexMetadata.State.CLOSE, state.getMetadata().index(metadata.getIndex()).getState()); assertEquals("boolean", state.getMetadata().index(metadata.getIndex()).getSettings().get("archived.index.similarity.BM25.type")); // try to open it with the broken setting - fail again! - ElasticsearchException ex = expectThrows(ElasticsearchException.class, () -> indicesAdmin().prepareOpen("test").get()); + ElasticsearchException ex = expectThrows(ElasticsearchException.class, indicesAdmin().prepareOpen("test")); assertEquals(ex.getMessage(), "Failed to verify index " + metadata.getIndex()); assertNotNull(ex.getCause()); assertEquals(IllegalArgumentException.class, ex.getCause().getClass()); @@ -480,7 +480,7 @@ public void testRecoverMissingAnalyzer() throws Exception { indicesAdmin().prepareClose("test").get(); // try to open it with the broken setting - fail again! - ElasticsearchException ex = expectThrows(ElasticsearchException.class, () -> indicesAdmin().prepareOpen("test").get()); + ElasticsearchException ex = expectThrows(ElasticsearchException.class, indicesAdmin().prepareOpen("test")); assertEquals(ex.getMessage(), "Failed to verify index " + metadata.getIndex()); assertNotNull(ex.getCause()); assertEquals(MapperParsingException.class, ex.getCause().getClass()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayServiceIT.java index 66a05a6f96b00..6ac91bd483614 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayServiceIT.java @@ -27,6 +27,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Predicate; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) public class GatewayServiceIT extends ESIntegTestCase { @@ -66,7 +67,7 @@ public void beforeAllocation(RoutingAllocation allocation) { } @Override - public void afterPrimariesBeforeReplicas(RoutingAllocation allocation) {} + public void afterPrimariesBeforeReplicas(RoutingAllocation allocation, Predicate isRelevantShardPredicate) {} @Override public void allocateUnassigned( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorSyncIdIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorSyncIdIT.java index 116a53f5dbfae..27e63e5614744 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorSyncIdIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorSyncIdIT.java @@ -14,7 +14,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; @@ -97,7 +96,7 @@ void syncFlush(String syncId) throws IOException { // make sure that background merges won't happen; otherwise, IndexWriter#hasUncommittedChanges can become true again forceMerge(false, 1, false, UUIDs.randomBase64UUID()); assertNotNull(indexWriter); - try (ReleasableLock ignored = readLock.acquire()) { + try (var ignored = acquireEnsureOpenRef()) { assertThat(getTranslogStats().getUncommittedOperations(), equalTo(0)); Map userData = new HashMap<>(getLastCommittedSegmentInfos().userData); SequenceNumbers.CommitInfo commitInfo = SequenceNumbers.loadSeqNoInfoFromLuceneCommit(userData.entrySet()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java index 3a12856fb92b5..d4fe2fcb4d4c1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java @@ -205,10 +205,7 @@ public void testGetWithAliasPointingToMultipleIndices() { DocWriteResponse indexResponse = prepareIndex("index1").setId("id").setSource(Collections.singletonMap("foo", "bar")).get(); assertThat(indexResponse.status().getStatus(), equalTo(RestStatus.CREATED.getStatus())); - IllegalArgumentException exception = expectThrows( - IllegalArgumentException.class, - () -> client().prepareGet("alias1", "_alias_id").get() - ); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, client().prepareGet("alias1", "_alias_id")); assertThat(exception.getMessage(), endsWith("can't execute a single index op")); } @@ -550,13 +547,13 @@ public void testGetFieldsNonLeafField() throws Exception { IllegalArgumentException exc = expectThrows( IllegalArgumentException.class, - () -> client().prepareGet(indexOrAlias(), "1").setStoredFields("field1").get() + client().prepareGet(indexOrAlias(), "1").setStoredFields("field1") ); assertThat(exc.getMessage(), equalTo("field [field1] isn't a leaf field")); flush(); - exc = expectThrows(IllegalArgumentException.class, () -> client().prepareGet(indexOrAlias(), "1").setStoredFields("field1").get()); + exc = expectThrows(IllegalArgumentException.class, client().prepareGet(indexOrAlias(), "1").setStoredFields("field1")); assertThat(exc.getMessage(), equalTo("field [field1] isn't a leaf field")); } @@ -822,7 +819,7 @@ void indexSingleDocumentWithStringFieldsGeneratedFromText(boolean stored, boolea } public void testGetRemoteIndex() { - IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> client().prepareGet("cluster:index", "id").get()); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, client().prepareGet("cluster:index", "id")); assertEquals( "Cross-cluster calls are not supported in this context but remote indices were requested: [cluster:index]", iae.getMessage() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/health/GetHealthActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/health/GetHealthActionIT.java index a31c3a08b8a4f..798f0e9bfb09f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/health/GetHealthActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/health/GetHealthActionIT.java @@ -27,13 +27,11 @@ import java.util.List; import java.util.Map; import java.util.NoSuchElementException; -import java.util.concurrent.ExecutionException; import java.util.stream.Stream; import static org.elasticsearch.common.util.CollectionUtils.appendToCopy; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.instanceOf; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) public class GetHealthActionIT extends ESIntegTestCase { @@ -172,16 +170,10 @@ public void testGetHealth() throws Exception { testIndicator(client, ilmIndicatorStatus, true); // Next, test that if we ask for a nonexistent indicator, we get an exception - { - ExecutionException exception = expectThrows( - ExecutionException.class, - () -> client.execute( - GetHealthAction.INSTANCE, - new GetHealthAction.Request(NONEXISTENT_INDICATOR_NAME, randomBoolean(), 1000) - ).get() - ); - assertThat(exception.getCause(), instanceOf(ResourceNotFoundException.class)); - } + expectThrows( + ResourceNotFoundException.class, + client.execute(GetHealthAction.INSTANCE, new GetHealthAction.Request(NONEXISTENT_INDICATOR_NAME, randomBoolean(), 1000)) + ); // Check health api stats { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/FinalPipelineIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/FinalPipelineIT.java index 443d0c384a058..216d5e25218e3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/FinalPipelineIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/FinalPipelineIT.java @@ -73,7 +73,7 @@ public void testFinalPipelineCantChangeDestination() { final IllegalStateException e = expectThrows( IllegalStateException.class, - () -> prepareIndex("index").setId("1").setSource(Map.of("field", "value")).get() + prepareIndex("index").setId("1").setSource(Map.of("field", "value")) ); assertThat( e, @@ -93,7 +93,7 @@ public void testFinalPipelineCantRerouteDestination() { final IllegalStateException e = expectThrows( IllegalStateException.class, - () -> prepareIndex("index").setId("1").setSource(Map.of("field", "value")).get() + prepareIndex("index").setId("1").setSource(Map.of("field", "value")) ); assertThat( e, @@ -224,10 +224,7 @@ public void testAvoidIndexingLoop() { IllegalStateException exception = expectThrows( IllegalStateException.class, - () -> prepareIndex("index").setId("1") - .setSource(Map.of("dest", "index")) - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .get() + prepareIndex("index").setId("1").setSource(Map.of("dest", "index")).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) ); assertThat( exception.getMessage(), @@ -242,7 +239,7 @@ public void testFinalPipeline() { // this asserts that the final_pipeline was used, without us having to actually create the pipeline etc. final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> prepareIndex("index").setId("1").setSource(Map.of("field", "value")).get() + prepareIndex("index").setId("1").setSource(Map.of("field", "value")) ); assertThat(e, hasToString(containsString("pipeline with id [final_pipeline] does not exist"))); } @@ -369,7 +366,7 @@ public void testHighOrderFinalPipelinePreferred() throws IOException { // this asserts that the high_order_final_pipeline was selected, without us having to actually create the pipeline etc. final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> prepareIndex("index").setId("1").setSource(Map.of("field", "value")).get() + prepareIndex("index").setId("1").setSource(Map.of("field", "value")) ); assertThat(e, hasToString(containsString("pipeline with id [high_order_final_pipeline] does not exist"))); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/HiddenIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/HiddenIndexIT.java index 14d9cf9e56eae..f225674215bb2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/HiddenIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/HiddenIndexIT.java @@ -117,10 +117,9 @@ public void testGlobalTemplatesDoNotApply() { public void testGlobalTemplateCannotMakeIndexHidden() { InvalidIndexTemplateException invalidIndexTemplateException = expectThrows( InvalidIndexTemplateException.class, - () -> indicesAdmin().preparePutTemplate("a_global_template") + indicesAdmin().preparePutTemplate("a_global_template") .setPatterns(List.of("*")) .setSettings(Settings.builder().put("index.hidden", randomBoolean()).build()) - .get() ); assertThat(invalidIndexTemplateException.getMessage(), containsString("global templates may not specify the setting index.hidden")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/IndexSortIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/IndexSortIT.java index fae08f8d5577e..aca13ecb3b0e7 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/IndexSortIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/IndexSortIT.java @@ -82,25 +82,22 @@ public void testIndexSort() { public void testInvalidIndexSort() { IllegalArgumentException exc = expectThrows( IllegalArgumentException.class, - () -> prepareCreate("test").setSettings(Settings.builder().put(indexSettings()).putList("index.sort.field", "invalid_field")) + prepareCreate("test").setSettings(Settings.builder().put(indexSettings()).putList("index.sort.field", "invalid_field")) .setMapping(TEST_MAPPING) - .get() ); assertThat(exc.getMessage(), containsString("unknown index sort field:[invalid_field]")); exc = expectThrows( IllegalArgumentException.class, - () -> prepareCreate("test").setSettings(Settings.builder().put(indexSettings()).putList("index.sort.field", "numeric")) + prepareCreate("test").setSettings(Settings.builder().put(indexSettings()).putList("index.sort.field", "numeric")) .setMapping(TEST_MAPPING) - .get() ); assertThat(exc.getMessage(), containsString("docvalues not found for index sort field:[numeric]")); exc = expectThrows( IllegalArgumentException.class, - () -> prepareCreate("test").setSettings(Settings.builder().put(indexSettings()).putList("index.sort.field", "keyword")) + prepareCreate("test").setSettings(Settings.builder().put(indexSettings()).putList("index.sort.field", "keyword")) .setMapping(TEST_MAPPING) - .get() ); assertThat(exc.getMessage(), containsString("docvalues not found for index sort field:[keyword]")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/engine/MaxDocsLimitIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/engine/MaxDocsLimitIT.java index ee165d1870571..acfc38ca12f89 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/engine/MaxDocsLimitIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/engine/MaxDocsLimitIT.java @@ -102,10 +102,7 @@ public void testMaxDocsLimit() throws Exception { indexingResult = indexDocs(rejectedRequests, between(1, 8)); assertThat(indexingResult.numFailures, equalTo(rejectedRequests)); assertThat(indexingResult.numSuccess, equalTo(0)); - final IllegalArgumentException deleteError = expectThrows( - IllegalArgumentException.class, - () -> client().prepareDelete("test", "any-id").get() - ); + final IllegalArgumentException deleteError = expectThrows(IllegalArgumentException.class, client().prepareDelete("test", "any-id")); assertThat(deleteError.getMessage(), containsString("Number of documents in the index can't exceed [" + maxDocs.get() + "]")); indicesAdmin().prepareRefresh("test").get(); assertNoFailuresAndResponse( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java index 868540ac3e3f8..4c1c564bdc734 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java @@ -12,7 +12,6 @@ import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; @@ -23,7 +22,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.GeoBoundingBoxQueryBuilder; import org.elasticsearch.index.query.MatchQueryBuilder; import org.elasticsearch.plugins.Plugin; @@ -184,10 +182,10 @@ public void onFailure(Exception e) { }); masterBlockedLatch.await(); - final IndexRequestBuilder indexRequestBuilder = prepareIndex("index").setId("2").setSource("nested3", Map.of("foo", "bar")); try { assertThat( - expectThrows(IllegalArgumentException.class, () -> indexRequestBuilder.get(TimeValue.timeValueSeconds(10))).getMessage(), + expectThrows(IllegalArgumentException.class, prepareIndex("index").setId("2").setSource("nested3", Map.of("foo", "bar"))) + .getMessage(), Matchers.containsString("Limit of nested fields [2] has been exceeded") ); } finally { @@ -219,9 +217,8 @@ public void onFailure(Exception e) { }); masterBlockedLatch.await(); - final IndexRequestBuilder indexRequestBuilder = prepareIndex("index").setId("2").setSource("field2", "value2"); try { - Exception e = expectThrows(DocumentParsingException.class, () -> indexRequestBuilder.get(TimeValue.timeValueSeconds(10))); + Exception e = expectThrows(DocumentParsingException.class, prepareIndex("index").setId("2").setSource("field2", "value2")); assertThat(e.getMessage(), Matchers.containsString("failed to parse")); assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); assertThat( @@ -263,9 +260,11 @@ public void testTotalFieldsLimitWithRuntimeFields() { { // introduction of a new object with 2 new sub-fields fails - final IndexRequestBuilder indexRequestBuilder = prepareIndex("index1").setId("1") - .setSource("field3", "value3", "my_object2", Map.of("new_field1", "value1", "new_field2", "value2")); - Exception exc = expectThrows(DocumentParsingException.class, () -> indexRequestBuilder.get(TimeValue.timeValueSeconds(10))); + Exception exc = expectThrows( + DocumentParsingException.class, + prepareIndex("index1").setId("1") + .setSource("field3", "value3", "my_object2", Map.of("new_field1", "value1", "new_field2", "value2")) + ); assertThat(exc.getMessage(), Matchers.containsString("failed to parse")); assertThat(exc.getCause(), instanceOf(IllegalArgumentException.class)); assertThat( @@ -501,10 +500,7 @@ public void testDynamicRuntimeObjectFields() { assertHitCount(prepareSearch("test").setQuery(new MatchQueryBuilder("obj.runtime.one", "one")), 1); assertHitCount(prepareSearch("test").setQuery(new MatchQueryBuilder("obj.runtime.one.two", "1")), 1); - Exception exception = expectThrows( - DocumentParsingException.class, - () -> prepareIndex("test").setSource("obj.runtime", "value").get() - ); + Exception exception = expectThrows(DocumentParsingException.class, prepareIndex("test").setSource("obj.runtime", "value")); assertThat( exception.getMessage(), containsString("object mapping for [obj.runtime] tried to parse field [runtime] as object, but found a concrete value") @@ -547,7 +543,7 @@ public void testDynamicRuntimeObjectFields() { // a doc with the same field but a different type causes a conflict Exception e = expectThrows( DocumentParsingException.class, - () -> prepareIndex("test").setId("id").setSource("obj.runtime.dynamic.number", "string").get() + prepareIndex("test").setId("id").setSource("obj.runtime.dynamic.number", "string") ); assertThat( e.getMessage(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java index 52bb5159c9b7d..c0263e273354f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -95,6 +95,8 @@ import static org.elasticsearch.cluster.routing.TestShardRouting.newShardRouting; import static org.elasticsearch.index.shard.IndexShardTestCase.getTranslog; import static org.elasticsearch.index.shard.IndexShardTestCase.recoverFromStore; +import static org.elasticsearch.test.LambdaMatchers.falseWith; +import static org.elasticsearch.test.LambdaMatchers.trueWith; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; @@ -167,13 +169,13 @@ public void testDurableFlagHasEffect() { } }; setDurability(shard, Translog.Durability.REQUEST); - assertFalse(needsSync.test(translog)); + assertThat(needsSync, falseWith(translog)); setDurability(shard, Translog.Durability.ASYNC); prepareIndex("test").setId("2").setSource("{}", XContentType.JSON).get(); - assertTrue(needsSync.test(translog)); + assertThat(needsSync, trueWith(translog)); setDurability(shard, Translog.Durability.REQUEST); client().prepareDelete("test", "1").get(); - assertFalse(needsSync.test(translog)); + assertThat(needsSync, falseWith(translog)); setDurability(shard, Translog.Durability.ASYNC); client().prepareDelete("test", "2").get(); @@ -185,7 +187,7 @@ public void testDurableFlagHasEffect() { .add(client().prepareDelete("test", "1")) .get() ); - assertFalse(needsSync.test(translog)); + assertThat(needsSync, falseWith(translog)); setDurability(shard, Translog.Durability.ASYNC); assertNoFailures( @@ -195,7 +197,7 @@ public void testDurableFlagHasEffect() { .get() ); setDurability(shard, Translog.Durability.REQUEST); - assertTrue(needsSync.test(translog)); + assertThat(needsSync, trueWith(translog)); } private void setDurability(IndexShard shard, Translog.Durability durability) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java index ec79b53ccd174..a99f6c4340941 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java @@ -21,7 +21,9 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; +import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.nodes.BaseNodeResponse; @@ -182,11 +184,7 @@ public void testCorruptFileAndRecover() throws InterruptedException, IOException .waitForNoRelocatingShards(true) ).actionGet(); if (health.isTimedOut()) { - logger.info( - "cluster state:\n{}\n{}", - clusterAdmin().prepareState().get().getState(), - clusterAdmin().preparePendingClusterTasks().get() - ); + logger.info("cluster state:\n{}\n{}", clusterAdmin().prepareState().get().getState(), getClusterPendingTasks()); assertThat("timed out waiting for green state", health.isTimedOut(), equalTo(false)); } assertThat(health.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -295,11 +293,7 @@ public void testCorruptPrimaryNoReplica() throws ExecutionException, Interrupted if (response.getStatus() != ClusterHealthStatus.RED) { logger.info("Cluster turned red in busy loop: {}", didClusterTurnRed); - logger.info( - "cluster state:\n{}\n{}", - clusterAdmin().prepareState().get().getState(), - clusterAdmin().preparePendingClusterTasks().get() - ); + logger.info("cluster state:\n{}\n{}", clusterAdmin().prepareState().get().getState(), getClusterPendingTasks()); } assertThat(response.getStatus(), is(ClusterHealthStatus.RED)); ClusterState state = clusterAdmin().prepareState().get().getState(); @@ -628,7 +622,10 @@ public void testReplicaCorruption() throws Exception { final Index index = resolveIndex("test"); - final IndicesShardStoresResponse stores = indicesAdmin().prepareShardStores(index.getName()).get(); + final IndicesShardStoresResponse stores = client().execute( + TransportIndicesShardStoresAction.TYPE, + new IndicesShardStoresRequest(index.getName()) + ).get(); for (Map.Entry> shards : stores.getStoreStatuses() .get(index.getName()) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedTranslogIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedTranslogIT.java index d8d9ef47d4451..9618dcf761be9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedTranslogIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedTranslogIT.java @@ -97,7 +97,7 @@ public void onAllNodesStopped() throws Exception { }); assertThat( - expectThrows(SearchPhaseExecutionException.class, () -> prepareSearch("test").setQuery(matchAllQuery()).get()).getMessage(), + expectThrows(SearchPhaseExecutionException.class, prepareSearch("test").setQuery(matchAllQuery())).getMessage(), containsString("all shards failed") ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indexing/IndexActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indexing/IndexActionIT.java index 5f1b1ab81b9da..bc2f0ec94f0ab 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indexing/IndexActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indexing/IndexActionIT.java @@ -260,7 +260,7 @@ public void testInvalidIndexName() { } public void testDocumentWithBlankFieldName() { - Exception e = expectThrows(DocumentParsingException.class, () -> prepareIndex("test").setId("1").setSource("", "value1_2").get()); + Exception e = expectThrows(DocumentParsingException.class, prepareIndex("test").setId("1").setSource("", "value1_2")); assertThat(e.getMessage(), containsString("failed to parse")); assertThat(e.getCause().getMessage(), containsString("field name cannot be an empty string")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java index 658b9eadd772f..d1462ef8da3dc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java @@ -403,9 +403,9 @@ public void testAllMissingLenient() throws Exception { public void testAllMissingStrict() throws Exception { createIndex("test1"); - expectThrows(IndexNotFoundException.class, () -> prepareSearch("test2").setQuery(matchAllQuery()).get()); + expectThrows(IndexNotFoundException.class, prepareSearch("test2").setQuery(matchAllQuery())); - expectThrows(IndexNotFoundException.class, () -> prepareSearch("test2", "test3").setQuery(matchAllQuery()).get()); + expectThrows(IndexNotFoundException.class, prepareSearch("test2", "test3").setQuery(matchAllQuery())); // you should still be able to run empty searches without things blowing up prepareSearch().setQuery(matchAllQuery()).get().decRef(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java index 9733b2408f886..e0b13d727caa3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java @@ -77,9 +77,8 @@ public void testAnalyzeNumericField() throws IOException { assertAcked(prepareCreate("test").addAlias(new Alias("alias")).setMapping("long", "type=long", "double", "type=double")); ensureGreen("test"); - expectThrows(IllegalArgumentException.class, () -> indicesAdmin().prepareAnalyze(indexOrAlias(), "123").setField("long").get()); - - expectThrows(IllegalArgumentException.class, () -> indicesAdmin().prepareAnalyze(indexOrAlias(), "123.0").setField("double").get()); + expectThrows(IllegalArgumentException.class, indicesAdmin().prepareAnalyze(indexOrAlias(), "123").setField("long")); + expectThrows(IllegalArgumentException.class, indicesAdmin().prepareAnalyze(indexOrAlias(), "123.0").setField("double")); } public void testAnalyzeWithNoIndex() throws Exception { @@ -280,7 +279,7 @@ public void testDetailAnalyzeWithMultiValues() throws Exception { public void testNonExistTokenizer() { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareAnalyze("this is a test").setAnalyzer("not_exist_analyzer").get() + indicesAdmin().prepareAnalyze("this is a test").setAnalyzer("not_exist_analyzer") ); assertThat(e.getMessage(), startsWith("failed to find global analyzer")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/flush/FlushIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/flush/FlushIT.java index 1d41641d027a5..a328148180107 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/flush/FlushIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/flush/FlushIT.java @@ -90,10 +90,7 @@ public void testRejectIllegalFlushParameters() { prepareIndex("test").setSource("{}", XContentType.JSON).get(); } assertThat( - expectThrows( - ValidationException.class, - () -> indicesAdmin().flush(new FlushRequest().force(true).waitIfOngoing(false)).actionGet() - ).getMessage(), + expectThrows(ValidationException.class, indicesAdmin().flush(new FlushRequest().force(true).waitIfOngoing(false))).getMessage(), containsString("wait_if_ongoing must be true for a force flush") ); assertThat(indicesAdmin().flush(new FlushRequest().force(true).waitIfOngoing(true)).actionGet().getShardFailures(), emptyArray()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/MalformedDynamicTemplateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/MalformedDynamicTemplateIT.java index 8d7311e4f7619..937addb473f8b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/MalformedDynamicTemplateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/MalformedDynamicTemplateIT.java @@ -64,9 +64,9 @@ public void testBWCMalformedDynamicTemplate() { MapperParsingException ex = expectThrows( MapperParsingException.class, - () -> prepareCreate("malformed_dynamic_template_8.0").setSettings( + prepareCreate("malformed_dynamic_template_8.0").setSettings( Settings.builder().put(indexSettings()).put("number_of_shards", 1).put("index.version.created", IndexVersion.current()) - ).setMapping(mapping).get() + ).setMapping(mapping) ); assertThat(ex.getMessage(), containsString("dynamic template [my_template] has invalid content")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java index 0f7ca38ca8f6b..984082ec65193 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java @@ -159,7 +159,9 @@ public void testSimpleGetFieldMappingsWithPretty() throws Exception { String responseStrings = Strings.toString(responseBuilder); XContentBuilder prettyJsonBuilder = XContentFactory.jsonBuilder().prettyPrint(); - prettyJsonBuilder.copyCurrentStructure(createParser(JsonXContent.jsonXContent, responseStrings)); + try (var parser = createParser(JsonXContent.jsonXContent, responseStrings)) { + prettyJsonBuilder.copyCurrentStructure(parser); + } assertThat(responseStrings, equalTo(Strings.toString(prettyJsonBuilder))); params.put("pretty", "false"); @@ -170,7 +172,9 @@ public void testSimpleGetFieldMappingsWithPretty() throws Exception { responseStrings = Strings.toString(responseBuilder); prettyJsonBuilder = XContentFactory.jsonBuilder().prettyPrint(); - prettyJsonBuilder.copyCurrentStructure(createParser(JsonXContent.jsonXContent, responseStrings)); + try (var parser = createParser(JsonXContent.jsonXContent, responseStrings)) { + prettyJsonBuilder.copyCurrentStructure(parser); + } assertThat(responseStrings, not(equalTo(Strings.toString(prettyJsonBuilder)))); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/DanglingIndicesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/DanglingIndicesIT.java index 7c0ef90ca8161..3582fa6930f54 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/DanglingIndicesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/DanglingIndicesIT.java @@ -8,12 +8,16 @@ package org.elasticsearch.indices.recovery; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.admin.indices.dangling.DanglingIndexInfo; import org.elasticsearch.action.admin.indices.dangling.delete.DeleteDanglingIndexRequest; +import org.elasticsearch.action.admin.indices.dangling.delete.TransportDeleteDanglingIndexAction; import org.elasticsearch.action.admin.indices.dangling.import_index.ImportDanglingIndexRequest; +import org.elasticsearch.action.admin.indices.dangling.import_index.TransportImportDanglingIndexAction; import org.elasticsearch.action.admin.indices.dangling.list.ListDanglingIndicesRequest; import org.elasticsearch.action.admin.indices.dangling.list.ListDanglingIndicesResponse; import org.elasticsearch.action.admin.indices.dangling.list.NodeListDanglingIndicesResponse; +import org.elasticsearch.action.admin.indices.dangling.list.TransportListDanglingIndicesAction; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; @@ -28,7 +32,9 @@ import java.util.ArrayList; import java.util.List; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; @@ -95,7 +101,7 @@ public void testDanglingIndicesCanBeListed() throws Exception { final String stoppedNodeName = createDanglingIndices(INDEX_NAME); - final ListDanglingIndicesResponse response = clusterAdmin().listDanglingIndices(new ListDanglingIndicesRequest()).actionGet(); + final ListDanglingIndicesResponse response = executeListDanglingIndicesAction(); assertThat(response.status(), equalTo(RestStatus.OK)); final List nodeResponses = response.getNodes(); @@ -123,27 +129,22 @@ public void testDanglingIndicesCanBeImported() throws Exception { final String danglingIndexUUID = findDanglingIndexForNode(stoppedNodeName, INDEX_NAME); - final ImportDanglingIndexRequest request = new ImportDanglingIndexRequest(danglingIndexUUID, true); - clusterAdmin().importDanglingIndex(request).get(); + importDanglingIndex(new ImportDanglingIndexRequest(danglingIndexUUID, true)); assertTrue("Expected dangling index " + INDEX_NAME + " to be recovered", indexExists(INDEX_NAME)); } /** - * Check that the when sending an import-dangling-indices request, the specified UUIDs are validated as - * being dangling. + * Check that when sending an import-dangling-indices request, the specified UUIDs are validated as being dangling. */ public void testDanglingIndicesMustExistToBeImported() { internalCluster().startNodes(1, buildSettings(0, true)); final ImportDanglingIndexRequest request = new ImportDanglingIndexRequest("NonExistentUUID", true); - - final IllegalArgumentException e = expectThrows( - IllegalArgumentException.class, - () -> clusterAdmin().importDanglingIndex(request).actionGet() + assertThat( + expectThrows(ExecutionException.class, IllegalArgumentException.class, () -> importDanglingIndex(request)).getMessage(), + containsString("No dangling index found for UUID [NonExistentUUID]") ); - - assertThat(e.getMessage(), containsString("No dangling index found for UUID [NonExistentUUID]")); } /** @@ -157,9 +158,10 @@ public void testMustAcceptDataLossToImportDanglingIndex() throws Exception { final ImportDanglingIndexRequest request = new ImportDanglingIndexRequest(danglingIndexUUID, false); - Exception e = expectThrows(Exception.class, () -> clusterAdmin().importDanglingIndex(request).actionGet()); - - assertThat(e.getMessage(), containsString("accept_data_loss must be set to true")); + assertThat( + expectThrows(Exception.class, () -> importDanglingIndex(request)).getMessage(), + containsString("accept_data_loss must be set to true") + ); } /** @@ -180,7 +182,7 @@ public void testDanglingIndexCanBeDeleted() throws Exception { final String stoppedNodeName = createDanglingIndices(INDEX_NAME, OTHER_INDEX_NAME); final String danglingIndexUUID = findDanglingIndexForNode(stoppedNodeName, INDEX_NAME); - clusterAdmin().deleteDanglingIndex(new DeleteDanglingIndexRequest(danglingIndexUUID, true)).actionGet(); + deleteDanglingIndex(new DeleteDanglingIndexRequest(danglingIndexUUID, true)); // The dangling index that we deleted ought to have been removed from disk. Check by // creating and deleting another index, which creates a new tombstone entry, which should @@ -231,7 +233,7 @@ public Settings onNodeStopped(String nodeName) throws Exception { danglingIndices.set(results); // Try to delete the index - this request should succeed - clusterAdmin().deleteDanglingIndex(new DeleteDanglingIndexRequest(danglingIndices.get().get(0).getIndexUUID(), true)).actionGet(); + deleteDanglingIndex(new DeleteDanglingIndexRequest(danglingIndices.get().get(0).getIndexUUID(), true)); // The dangling index that we deleted ought to have been removed from disk. Check by // creating and deleting another index, which creates a new tombstone entry, which should @@ -252,12 +254,16 @@ public void testDeleteDanglingIndicesRequiresDataLossFlagToBeTrue() throws Excep final String stoppedNodeName = createDanglingIndices(INDEX_NAME, OTHER_INDEX_NAME); final String danglingIndexUUID = findDanglingIndexForNode(stoppedNodeName, INDEX_NAME); - Exception e = expectThrows( - Exception.class, - () -> clusterAdmin().deleteDanglingIndex(new DeleteDanglingIndexRequest(danglingIndexUUID, false)).actionGet() + assertThat( + ExceptionsHelper.unwrapCause( + expectThrows( + ExecutionException.class, + Exception.class, + () -> deleteDanglingIndex(new DeleteDanglingIndexRequest(danglingIndexUUID, false)) + ) + ).getMessage(), + containsString("accept_data_loss must be set to true") ); - - assertThat(e.getMessage(), containsString("accept_data_loss must be set to true")); } /** @@ -279,7 +285,7 @@ public void testDanglingIndicesImportedAndDeletedCannotBeReimported() throws Exc safeAwait(startLatch); while (isImporting.get()) { try { - clusterAdmin().importDanglingIndex(new ImportDanglingIndexRequest(danglingIndexUUID, true)).get(); + importDanglingIndex(new ImportDanglingIndexRequest(danglingIndexUUID, true)); } catch (Exception e) { // failures are expected } @@ -325,8 +331,8 @@ public void testDanglingIndicesImportedAndDeletedCannotBeReimported() throws Exc /** * Helper that fetches the current list of dangling indices. */ - private List listDanglingIndices() { - final ListDanglingIndicesResponse response = clusterAdmin().listDanglingIndices(new ListDanglingIndicesRequest()).actionGet(); + private static List listDanglingIndices() { + final ListDanglingIndicesResponse response = executeListDanglingIndicesAction(); assertThat(response.status(), equalTo(RestStatus.OK)); final List nodeResponses = response.getNodes(); @@ -340,6 +346,30 @@ private List listDanglingIndices() { return results; } + private static ListDanglingIndicesResponse executeListDanglingIndicesAction() { + try { + return client().execute(TransportListDanglingIndicesAction.TYPE, new ListDanglingIndicesRequest()).get(10, TimeUnit.SECONDS); + } catch (Exception e) { + return fail(e); + } + } + + private static void importDanglingIndex(ImportDanglingIndexRequest request) throws ExecutionException { + try { + client().execute(TransportImportDanglingIndexAction.TYPE, request).get(10, TimeUnit.SECONDS); + } catch (InterruptedException | TimeoutException ex) { + fail(ex); + } + } + + private static void deleteDanglingIndex(DeleteDanglingIndexRequest request) throws ExecutionException { + try { + client().execute(TransportDeleteDanglingIndexAction.TYPE, request).get(10, TimeUnit.SECONDS); + } catch (InterruptedException | TimeoutException ex) { + fail(ex); + } + } + /** * Simple helper that creates one or more indices, and importantly, * checks that they are green before proceeding. This is important @@ -390,7 +420,7 @@ public Settings onNodeStopped(String nodeName) throws Exception { private String findDanglingIndexForNode(String stoppedNodeName, String indexName) { String danglingIndexUUID = null; - final ListDanglingIndicesResponse response = clusterAdmin().listDanglingIndices(new ListDanglingIndicesRequest()).actionGet(); + final ListDanglingIndicesResponse response = executeListDanglingIndicesAction(); assertThat(response.status(), equalTo(RestStatus.OK)); final List nodeResponses = response.getNodes(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java index 0fe5845e9ed32..779072272e59a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java @@ -8,22 +8,23 @@ package org.elasticsearch.indices.recovery; +import org.apache.logging.log4j.Level; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.action.admin.cluster.node.hotthreads.NodeHotThreads; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.elasticsearch.common.Priority; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.monitor.jvm.HotThreads; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import java.util.stream.Collectors; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) public class IndexPrimaryRelocationIT extends ESIntegTestCase { @@ -71,20 +72,14 @@ public void run() { .setWaitForNoRelocatingShards(true) .get(); if (clusterHealthResponse.isTimedOut()) { - final String hotThreads = clusterAdmin().prepareNodesHotThreads() - .setIgnoreIdleThreads(false) - .get() - .getNodes() - .stream() - .map(NodeHotThreads::getHotThreads) - .collect(Collectors.joining("\n")); - final ClusterState clusterState = clusterAdmin().prepareState().get().getState(); - logger.info( - "timed out for waiting for relocation iteration [{}] \ncluster state {} \nhot threads {}", - i, - clusterState, - hotThreads + HotThreads.logLocalHotThreads( + logger, + Level.INFO, + "timed out waiting for relocation iteration [" + i + "]", + ReferenceDocs.LOGGING ); + final ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + logger.info("timed out for waiting for relocation iteration [{}] \ncluster state {}", i, clusterState); finished.set(true); indexingThread.join(); throw new AssertionError("timed out waiting for relocation iteration [" + i + "] "); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index d40d2e02415b1..bd400f9f0f6a1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -782,10 +782,13 @@ public Settings onNodeStopped(String nodeName) { * Tests shard recovery throttling on the target node. Node statistics should show throttling time on the target node, while no * throttling should be shown on the source node because the target will accept data more slowly than the source's throttling threshold. */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103204") public void testTargetThrottling() throws Exception { logger.info("--> starting node A with default settings"); - final String nodeA = internalCluster().startNode(); + final String nodeA = internalCluster().startNode( + Settings.builder() + // Use a high value so that when unthrottling recoveries we do not cause accidental throttling on the source node. + .put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "200mb") + ); logger.info("--> creating index on node A"); ByteSizeValue shardSize = createAndPopulateIndex(INDEX_NAME, 1, SHARD_COUNT_1, REPLICA_COUNT_0).getShards()[0].getStats() @@ -1745,12 +1748,12 @@ public void testReservesBytesDuringPeerRecoveryPhaseOne() throws Exception { .getNodes() .get(0) .getIndices(); - assertThat(nodeIndicesStats.getStore().getReservedSize().getBytes(), equalTo(0L)); + assertThat(nodeIndicesStats.getStore().reservedSizeInBytes(), equalTo(0L)); assertThat( nodeIndicesStats.getShardStats(clusterState.metadata().index(indexName).getIndex()) .stream() .flatMap(s -> Arrays.stream(s.getShards())) - .map(s -> s.getStats().getStore().getReservedSize().getBytes()) + .map(s -> s.getStats().getStore().reservedSizeInBytes()) .toList(), everyItem(equalTo(StoreStats.UNKNOWN_RESERVED_BYTES)) ); @@ -1766,8 +1769,7 @@ public void testReservesBytesDuringPeerRecoveryPhaseOne() throws Exception { .get(0) .getIndices() .getStore() - .getReservedSize() - .getBytes(), + .reservedSizeInBytes(), greaterThan(0L) ); } @@ -1785,7 +1787,7 @@ public void testReservesBytesDuringPeerRecoveryPhaseOne() throws Exception { .get() .getNodes() .stream() - .mapToLong(n -> n.getIndices().getStore().getReservedSize().getBytes()) + .mapToLong(n -> n.getIndices().getStore().reservedSizeInBytes()) .sum(), equalTo(0L) ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/InternalSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/InternalSettingsIT.java index a0a070b3e0eec..3c62f859b3f31 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/InternalSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/InternalSettingsIT.java @@ -42,9 +42,7 @@ public void testUpdateInternalIndexSettingViaSettingsAPI() { // we can not update the setting via the update settings API final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareUpdateSettings("test") - .setSettings(Settings.builder().put("index.internal", "internal-update")) - .get() + indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.internal", "internal-update")) ); final String message = "can not update internal setting [index.internal]; this setting is managed via a dedicated API"; assertThat(e, hasToString(containsString(message))); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/PrivateSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/PrivateSettingsIT.java index 3e8d34222b1e3..0ebd276511795 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/PrivateSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/PrivateSettingsIT.java @@ -42,7 +42,7 @@ public void testUpdatePrivateIndexSettingViaSettingsAPI() { // we can not update the setting via the update settings API final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.private", "private-update")).get() + indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.private", "private-update")) ); final String message = "can not update private setting [index.private]; this setting is managed by Elasticsearch"; assertThat(e, hasToString(containsString(message))); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java index 563e6e0761cb1..165b776f1ebc1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.indices.settings; +import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.cluster.ClusterState; @@ -39,7 +40,6 @@ import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_READ_ONLY; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -51,9 +51,8 @@ public void testInvalidUpdateOnClosedIndex() { assertAcked(indicesAdmin().prepareClose("test").get()); IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareUpdateSettings("test") + indicesAdmin().prepareUpdateSettings("test") .setSettings(Settings.builder().put("index.analysis.char_filter.invalid_char.type", "invalid")) - .get() ); assertEquals(exception.getMessage(), "Unknown char_filter type [invalid] for [invalid_char]"); } @@ -62,7 +61,7 @@ public void testInvalidDynamicUpdate() { createIndex("test"); IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.dummy", "boom")).get() + indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.dummy", "boom")) ); assertEquals(exception.getCause().getMessage(), "this setting goes boom"); IndexMetadata indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); @@ -142,22 +141,21 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { public void testUpdateDependentClusterSettings() { IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, - () -> clusterAdmin().prepareUpdateSettings().setPersistentSettings(Settings.builder().put("cluster.acc.test.pw", "asdf")).get() + clusterAdmin().prepareUpdateSettings().setPersistentSettings(Settings.builder().put("cluster.acc.test.pw", "asdf")) ); assertEquals("missing required setting [cluster.acc.test.user] for setting [cluster.acc.test.pw]", iae.getMessage()); iae = expectThrows( IllegalArgumentException.class, - () -> clusterAdmin().prepareUpdateSettings().setTransientSettings(Settings.builder().put("cluster.acc.test.pw", "asdf")).get() + clusterAdmin().prepareUpdateSettings().setTransientSettings(Settings.builder().put("cluster.acc.test.pw", "asdf")) ); assertEquals("missing required setting [cluster.acc.test.user] for setting [cluster.acc.test.pw]", iae.getMessage()); iae = expectThrows( IllegalArgumentException.class, - () -> clusterAdmin().prepareUpdateSettings() + clusterAdmin().prepareUpdateSettings() .setTransientSettings(Settings.builder().put("cluster.acc.test.pw", "asdf")) .setPersistentSettings(Settings.builder().put("cluster.acc.test.user", "asdf")) - .get() ); assertEquals("missing required setting [cluster.acc.test.user] for setting [cluster.acc.test.pw]", iae.getMessage()); @@ -167,7 +165,7 @@ public void testUpdateDependentClusterSettings() { .get(); iae = expectThrows( IllegalArgumentException.class, - () -> clusterAdmin().prepareUpdateSettings().setTransientSettings(Settings.builder().putNull("cluster.acc.test.user")).get() + clusterAdmin().prepareUpdateSettings().setTransientSettings(Settings.builder().putNull("cluster.acc.test.user")) ); assertEquals("missing required setting [cluster.acc.test.user] for setting [cluster.acc.test.pw]", iae.getMessage()); clusterAdmin().prepareUpdateSettings() @@ -180,9 +178,7 @@ public void testUpdateDependentClusterSettings() { iae = expectThrows( IllegalArgumentException.class, - () -> clusterAdmin().prepareUpdateSettings() - .setPersistentSettings(Settings.builder().putNull("cluster.acc.test.user")) - .get() + clusterAdmin().prepareUpdateSettings().setPersistentSettings(Settings.builder().putNull("cluster.acc.test.user")) ); assertEquals("missing required setting [cluster.acc.test.user] for setting [cluster.acc.test.pw]", iae.getMessage()); @@ -193,7 +189,7 @@ public void testUpdateDependentClusterSettings() { public void testUpdateDependentIndexSettings() { IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, - () -> prepareCreate("test", Settings.builder().put("index.acc.test.pw", "asdf")).get() + prepareCreate("test", Settings.builder().put("index.acc.test.pw", "asdf")) ); assertEquals("missing required setting [index.acc.test.user] for setting [index.acc.test.pw]", iae.getMessage()); @@ -206,7 +202,7 @@ public void testUpdateDependentIndexSettings() { iae = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.acc.test.pw", "asdf")).get() + indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.acc.test.pw", "asdf")) ); assertEquals("missing required setting [index.acc.test.user] for setting [index.acc.test.pw]", iae.getMessage()); @@ -219,7 +215,7 @@ public void testUpdateDependentIndexSettings() { // now try to remove it and make sure it fails iae = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().putNull("index.acc.test.user")).get() + indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().putNull("index.acc.test.user")) ); assertEquals("missing required setting [index.acc.test.user] for setting [index.acc.test.pw]", iae.getMessage()); @@ -290,23 +286,22 @@ public void testOpenCloseUpdateSettings() throws Exception { createIndex("test"); expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareUpdateSettings("test") + indicesAdmin().prepareUpdateSettings("test") .setSettings( Settings.builder() .put("index.refresh_interval", -1) // this one can change .put("index.fielddata.cache", "none") ) // this one can't - .get() + ); expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareUpdateSettings("test") + indicesAdmin().prepareUpdateSettings("test") .setSettings( Settings.builder() .put("index.refresh_interval", -1) // this one can change .put("index.final", "no") ) // this one can't - .get() ); IndexMetadata indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); assertThat(indexMetadata.getSettings().get("index.refresh_interval"), nullValue()); @@ -360,13 +355,12 @@ public void testOpenCloseUpdateSettings() throws Exception { IllegalArgumentException ex = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareUpdateSettings("test") + indicesAdmin().prepareUpdateSettings("test") .setSettings( Settings.builder() .put("index.refresh_interval", -1) // this one can change .put("index.final", "no") ) // this one really can't - .get() ); assertThat(ex.getMessage(), containsString("final test setting [index.final], not updateable")); indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); @@ -384,10 +378,11 @@ public void testEngineGCDeletesSetting() throws Exception { prepareIndex("test").setId("1").setSource("f", 1).setVersionType(VersionType.EXTERNAL).setVersion(1).get(); client().prepareDelete("test", "1").setVersionType(VersionType.EXTERNAL).setVersion(2).get(); // delete is still in cache this should fail - assertRequestBuilderThrows( - prepareIndex("test").setId("1").setSource("f", 3).setVersionType(VersionType.EXTERNAL).setVersion(1), - VersionConflictEngineException.class - ); + ActionRequestBuilder builder = prepareIndex("test").setId("1") + .setSource("f", 3) + .setVersionType(VersionType.EXTERNAL) + .setVersion(1); + expectThrows(VersionConflictEngineException.class, builder); assertAcked(indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.gc_deletes", 0))); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java index 2b07f36551279..6b1aafe2f9b17 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java @@ -37,7 +37,7 @@ public void testCloseAllRequiresName() { IllegalStateException illegalStateException = expectThrows( IllegalStateException.class, - () -> indicesAdmin().prepareClose("test_no_close").get() + indicesAdmin().prepareClose("test_no_close") ); assertEquals( illegalStateException.getMessage(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java index 2ef7dc560b768..77cdc2e99977d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java @@ -79,16 +79,13 @@ public Settings indexSettings() { } public void testCloseMissingIndex() { - IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> indicesAdmin().prepareClose("test").get()); + IndexNotFoundException e = expectThrows(IndexNotFoundException.class, indicesAdmin().prepareClose("test")); assertThat(e.getMessage(), is("no such index [test]")); } public void testCloseOneMissingIndex() { createIndex("test1"); - final IndexNotFoundException e = expectThrows( - IndexNotFoundException.class, - () -> indicesAdmin().prepareClose("test1", "test2").get() - ); + final IndexNotFoundException e = expectThrows(IndexNotFoundException.class, indicesAdmin().prepareClose("test1", "test2")); assertThat(e.getMessage(), is("no such index [test2]")); } @@ -99,17 +96,14 @@ public void testCloseOneMissingIndexIgnoreMissing() throws Exception { } public void testCloseNoIndex() { - final ActionRequestValidationException e = expectThrows( - ActionRequestValidationException.class, - () -> indicesAdmin().prepareClose().get() - ); + final ActionRequestValidationException e = expectThrows(ActionRequestValidationException.class, indicesAdmin().prepareClose()); assertThat(e.getMessage(), containsString("index is missing")); } public void testCloseNullIndex() { final ActionRequestValidationException e = expectThrows( ActionRequestValidationException.class, - () -> indicesAdmin().prepareClose((String[]) null).get() + indicesAdmin().prepareClose((String[]) null) ); assertThat(e.getMessage(), containsString("index is missing")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java index 021515eb4cbcc..61bb48b7f7583 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java @@ -62,7 +62,7 @@ public void testSimpleCloseOpen() { } public void testSimpleOpenMissingIndex() { - Exception e = expectThrows(IndexNotFoundException.class, () -> indicesAdmin().prepareOpen("test1").get()); + Exception e = expectThrows(IndexNotFoundException.class, indicesAdmin().prepareOpen("test1")); assertThat(e.getMessage(), is("no such index [test1]")); } @@ -71,7 +71,7 @@ public void testOpenOneMissingIndex() { createIndex("test1"); ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); - Exception e = expectThrows(IndexNotFoundException.class, () -> client.admin().indices().prepareOpen("test1", "test2").get()); + Exception e = expectThrows(IndexNotFoundException.class, client.admin().indices().prepareOpen("test1", "test2")); assertThat(e.getMessage(), is("no such index [test2]")); } @@ -162,12 +162,12 @@ public void testCloseOpenAllWildcard() { } public void testOpenNoIndex() { - Exception e = expectThrows(ActionRequestValidationException.class, () -> indicesAdmin().prepareOpen().get()); + Exception e = expectThrows(ActionRequestValidationException.class, indicesAdmin().prepareOpen()); assertThat(e.getMessage(), containsString("index is missing")); } public void testOpenNullIndex() { - Exception e = expectThrows(ActionRequestValidationException.class, () -> indicesAdmin().prepareOpen((String[]) null).get()); + Exception e = expectThrows(ActionRequestValidationException.class, indicesAdmin().prepareOpen((String[]) null)); assertThat(e.getMessage(), containsString("index is missing")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/ComposableTemplateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/ComposableTemplateIT.java index 0e385768fc256..c97548942fc7d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/ComposableTemplateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/ComposableTemplateIT.java @@ -9,7 +9,7 @@ package org.elasticsearch.indices.template; import org.elasticsearch.action.admin.indices.template.put.PutComponentTemplateAction; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.cluster.metadata.ComponentTemplate; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.Template; @@ -50,8 +50,8 @@ public void testComponentTemplatesCanBeUpdatedAfterRestart() throws Exception { .metadata(Collections.singletonMap("egg", "bread")) .build(); client().execute( - PutComposableIndexTemplateAction.INSTANCE, - new PutComposableIndexTemplateAction.Request("my-it").indexTemplate(cit) + TransportPutComposableIndexTemplateAction.TYPE, + new TransportPutComposableIndexTemplateAction.Request("my-it").indexTemplate(cit) ).get(); internalCluster().fullRestart(); @@ -85,8 +85,8 @@ public void testComponentTemplatesCanBeUpdatedAfterRestart() throws Exception { .metadata(Collections.singletonMap("egg", "bread")) .build(); client().execute( - PutComposableIndexTemplateAction.INSTANCE, - new PutComposableIndexTemplateAction.Request("my-it").indexTemplate(cit2) + TransportPutComposableIndexTemplateAction.TYPE, + new TransportPutComposableIndexTemplateAction.Request("my-it").indexTemplate(cit2) ).get(); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java index 25cdd413aec2b..1e1333f376e9f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java @@ -7,12 +7,12 @@ */ package org.elasticsearch.indices.template; +import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; -import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.fieldcaps.FieldCapabilities; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; @@ -123,27 +123,25 @@ public void testSimpleIndexTemplateTests() throws Exception { .get(); // test create param - assertRequestBuilderThrows( - indicesAdmin().preparePutTemplate("template_2") - .setPatterns(Collections.singletonList("test*")) - .setSettings(indexSettings()) - .setCreate(true) - .setOrder(1) - .setMapping( - XContentFactory.jsonBuilder() - .startObject() - .startObject("_doc") - .startObject("properties") - .startObject("field2") - .field("type", "text") - .field("store", false) - .endObject() - .endObject() - .endObject() - .endObject() - ), - IllegalArgumentException.class - ); + ActionRequestBuilder builder = indicesAdmin().preparePutTemplate("template_2") + .setPatterns(Collections.singletonList("test*")) + .setSettings(indexSettings()) + .setCreate(true) + .setOrder(1) + .setMapping( + XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("field2") + .field("type", "text") + .field("store", false) + .endObject() + .endObject() + .endObject() + .endObject() + ); + expectThrows(IllegalArgumentException.class, builder); response = indicesAdmin().prepareGetTemplates().get(); assertThat(response.getIndexTemplates(), hasSize(2)); @@ -432,10 +430,9 @@ public void testBrokenMapping() throws Exception { MapperParsingException e = expectThrows( MapperParsingException.class, - () -> indicesAdmin().preparePutTemplate("template_1") + indicesAdmin().preparePutTemplate("template_1") .setPatterns(Collections.singletonList("te*")) .setMapping("{\"foo\": \"abcde\"}", XContentType.JSON) - .get() ); assertThat(e.getMessage(), containsString("Failed to parse mapping")); @@ -453,10 +450,9 @@ public void testInvalidSettings() throws Exception { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().preparePutTemplate("template_1") + indicesAdmin().preparePutTemplate("template_1") .setPatterns(Collections.singletonList("te*")) .setSettings(Settings.builder().put("does_not_exist", "test")) - .get() ); assertEquals( "unknown setting [index.does_not_exist] please check that any required plugins are" @@ -625,11 +621,12 @@ public void testAliasInvalidFilterValidJson() throws Exception { public void testAliasInvalidFilterInvalidJson() throws Exception { // invalid json: put index template fails - PutIndexTemplateRequestBuilder putIndexTemplateRequestBuilder = indicesAdmin().preparePutTemplate("template_1") - .setPatterns(Collections.singletonList("te*")) - .addAlias(new Alias("invalid_alias").filter("abcde")); - - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> putIndexTemplateRequestBuilder.get()); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + indicesAdmin().preparePutTemplate("template_1") + .setPatterns(Collections.singletonList("te*")) + .addAlias(new Alias("invalid_alias").filter("abcde")) + ); assertThat(e.getMessage(), equalTo("failed to parse filter for alias [invalid_alias]")); GetIndexTemplatesResponse response = indicesAdmin().prepareGetTemplates("template_1").get(); @@ -646,20 +643,22 @@ public void testAliasNameExistingIndex() throws Exception { } public void testAliasEmptyName() throws Exception { - PutIndexTemplateRequestBuilder putIndexTemplateRequestBuilder = indicesAdmin().preparePutTemplate("template_1") - .setPatterns(Collections.singletonList("te*")) - .addAlias(new Alias(" ").indexRouting("1,2,3")); - - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> putIndexTemplateRequestBuilder.get()); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + indicesAdmin().preparePutTemplate("template_1") + .setPatterns(Collections.singletonList("te*")) + .addAlias(new Alias(" ").indexRouting("1,2,3")) + ); assertThat(e.getMessage(), equalTo("alias name is required")); } public void testAliasWithMultipleIndexRoutings() throws Exception { - PutIndexTemplateRequestBuilder putIndexTemplateRequestBuilder = indicesAdmin().preparePutTemplate("template_1") - .setPatterns(Collections.singletonList("te*")) - .addAlias(new Alias("alias").indexRouting("1,2,3")); - - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> putIndexTemplateRequestBuilder.get()); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + indicesAdmin().preparePutTemplate("template_1") + .setPatterns(Collections.singletonList("te*")) + .addAlias(new Alias("alias").indexRouting("1,2,3")) + ); assertThat(e.getMessage(), equalTo("alias [alias] has several index routing values associated with it")); } @@ -768,7 +767,7 @@ public void testCombineTemplates() throws Exception { // put template using custom_1 analyzer MapperParsingException e = expectThrows( MapperParsingException.class, - () -> indicesAdmin().preparePutTemplate("template_2") + indicesAdmin().preparePutTemplate("template_2") .setPatterns(Collections.singletonList("test*")) .setCreate(true) .setOrder(1) @@ -785,7 +784,6 @@ public void testCombineTemplates() throws Exception { .endObject() .endObject() ) - .get() ); assertThat(e.getMessage(), containsString("analyzer [custom_1] has not been configured in mappings")); @@ -873,10 +871,9 @@ public void testPartitionedTemplate() throws Exception { // provide more partitions than shards IllegalArgumentException eBadSettings = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().preparePutTemplate("template_1") + indicesAdmin().preparePutTemplate("template_1") .setPatterns(Collections.singletonList("te*")) .setSettings(Settings.builder().put("index.number_of_shards", "5").put("index.routing_partition_size", "6")) - .get() ); assertThat( eBadSettings.getMessage(), @@ -886,11 +883,10 @@ public void testPartitionedTemplate() throws Exception { // provide an invalid mapping for a partitioned index IllegalArgumentException eBadMapping = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().preparePutTemplate("template_2") + indicesAdmin().preparePutTemplate("template_2") .setPatterns(Collections.singletonList("te*")) .setMapping("{\"_doc\":{\"_routing\":{\"required\":false}}}", XContentType.JSON) .setSettings(Settings.builder().put("index.number_of_shards", "6").put("index.routing_partition_size", "3")) - .get() ); assertThat(eBadMapping.getMessage(), containsString("must have routing required for partitioned index")); @@ -908,8 +904,7 @@ public void testPartitionedTemplate() throws Exception { // create an index with too few shards IllegalArgumentException eBadIndex = expectThrows( IllegalArgumentException.class, - () -> prepareCreate("test_bad", Settings.builder().put("index.number_of_shards", 5).put("index.number_of_routing_shards", 5)) - .get() + prepareCreate("test_bad", Settings.builder().put("index.number_of_shards", 5).put("index.number_of_routing_shards", 5)) ); assertThat( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestClientIT.java b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestClientIT.java index afc39cd6b4d7e..9fd7aaabaf2f5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestClientIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestClientIT.java @@ -264,7 +264,7 @@ public void testPutWithPipelineFactoryError() throws Exception { .endObject() ); PutPipelineRequest putPipelineRequest = new PutPipelineRequest("_id2", source, XContentType.JSON); - Exception e = expectThrows(ElasticsearchParseException.class, () -> clusterAdmin().putPipeline(putPipelineRequest).actionGet()); + Exception e = expectThrows(ElasticsearchParseException.class, clusterAdmin().putPipeline(putPipelineRequest)); assertThat(e.getMessage(), equalTo("processor [test] doesn't support one or more provided configuration parameters [unused]")); GetPipelineResponse response = clusterAdmin().prepareGetPipeline("_id2").get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestFileSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestFileSettingsIT.java index fc0efca802370..a3c43de39218d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestFileSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestFileSettingsIT.java @@ -10,8 +10,8 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.ingest.PutPipelineAction; import org.elasticsearch.action.ingest.PutPipelineRequest; +import org.elasticsearch.action.ingest.PutPipelineTransportAction; import org.elasticsearch.action.ingest.ReservedPipelineAction; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterStateListener; @@ -177,7 +177,7 @@ private void assertPipelinesSaveOK(CountDownLatch savedClusterState, AtomicLong + "[[my_ingest_pipeline] set as read-only by [file_settings]]", expectThrows( IllegalArgumentException.class, - () -> client().execute(PutPipelineAction.INSTANCE, sampleRestRequest("my_ingest_pipeline")).actionGet() + client().execute(PutPipelineTransportAction.TYPE, sampleRestRequest("my_ingest_pipeline")) ).getMessage() ); } @@ -221,7 +221,7 @@ private void assertPipelinesNotSaved(CountDownLatch savedClusterState, AtomicLon assertTrue(awaitSuccessful); // This should succeed, nothing was reserved - client().execute(PutPipelineAction.INSTANCE, sampleRestRequest("my_ingest_pipeline_bad")).get(); + client().execute(PutPipelineTransportAction.TYPE, sampleRestRequest("my_ingest_pipeline_bad")).get(); } public void testErrorSaved() throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestStatsNamesAndTypesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestStatsNamesAndTypesIT.java index 2a4174ba427af..ded319fd0848d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestStatsNamesAndTypesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestStatsNamesAndTypesIT.java @@ -143,7 +143,10 @@ public void testIngestStatsNamesAndTypes() throws IOException { builder.startObject(); response.toXContent(builder, new ToXContent.MapParams(Map.of())); builder.endObject(); - Map stats = createParser(JsonXContent.jsonXContent, Strings.toString(builder)).map(); + Map stats; + try (var parser = createParser(JsonXContent.jsonXContent, Strings.toString(builder))) { + stats = parser.map(); + } int setProcessorCount = path(stats, "nodes.ingest.processor_stats.set.count"); assertThat(setProcessorCount, equalTo(3)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIT.java index 626a1573a66db..a49fadb0c4b5b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIT.java @@ -37,8 +37,8 @@ public void testDeleteIndexIsRejected() throws Exception { // Special "match none" pattern succeeds, since non-destructive assertAcked(indicesAdmin().prepareDelete("*", "-*").get()); - expectThrows(IllegalArgumentException.class, () -> indicesAdmin().prepareDelete("i*").get()); - expectThrows(IllegalArgumentException.class, () -> indicesAdmin().prepareDelete("_all").get()); + expectThrows(IllegalArgumentException.class, indicesAdmin().prepareDelete("i*")); + expectThrows(IllegalArgumentException.class, indicesAdmin().prepareDelete("_all")); } public void testDeleteIndexDefaultBehaviour() throws Exception { @@ -67,8 +67,8 @@ public void testCloseIndexIsRejected() throws Exception { // Special "match none" pattern succeeds, since non-destructive assertAcked(indicesAdmin().prepareClose("*", "-*").get()); - expectThrows(IllegalArgumentException.class, () -> indicesAdmin().prepareClose("i*").get()); - expectThrows(IllegalArgumentException.class, () -> indicesAdmin().prepareClose("_all").get()); + expectThrows(IllegalArgumentException.class, indicesAdmin().prepareClose("i*")); + expectThrows(IllegalArgumentException.class, indicesAdmin().prepareClose("_all")); } public void testCloseIndexDefaultBehaviour() throws Exception { @@ -99,8 +99,8 @@ public void testOpenIndexIsRejected() throws Exception { // Special "match none" pattern succeeds, since non-destructive assertAcked(indicesAdmin().prepareOpen("*", "-*").get()); - expectThrows(IllegalArgumentException.class, () -> indicesAdmin().prepareOpen("i*").get()); - expectThrows(IllegalArgumentException.class, () -> indicesAdmin().prepareOpen("_all").get()); + expectThrows(IllegalArgumentException.class, indicesAdmin().prepareOpen("i*")); + expectThrows(IllegalArgumentException.class, indicesAdmin().prepareOpen("_all")); } public void testOpenIndexDefaultBehaviour() throws Exception { @@ -133,8 +133,8 @@ public void testAddIndexBlockIsRejected() throws Exception { // Special "match none" pattern succeeds, since non-destructive assertAcked(indicesAdmin().prepareAddBlock(WRITE, "*", "-*").get()); - expectThrows(IllegalArgumentException.class, () -> indicesAdmin().prepareAddBlock(WRITE, "i*").get()); - expectThrows(IllegalArgumentException.class, () -> indicesAdmin().prepareAddBlock(WRITE, "_all").get()); + expectThrows(IllegalArgumentException.class, indicesAdmin().prepareAddBlock(WRITE, "i*")); + expectThrows(IllegalArgumentException.class, indicesAdmin().prepareAddBlock(WRITE, "_all")); } public void testAddIndexBlockDefaultBehaviour() throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/readiness/ReadinessClusterIT.java b/server/src/internalClusterTest/java/org/elasticsearch/readiness/ReadinessClusterIT.java index 450b27eb0db8b..3c06a4c084e04 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/readiness/ReadinessClusterIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/readiness/ReadinessClusterIT.java @@ -103,10 +103,7 @@ private void assertMasterNode(Client client, String node) { } private void expectMasterNotFound() { - expectThrows( - MasterNotDiscoveredException.class, - () -> clusterAdmin().prepareState().setMasterNodeTimeout("100ms").get().getState().nodes().getMasterNodeId() - ); + expectThrows(MasterNotDiscoveredException.class, clusterAdmin().prepareState().setMasterNodeTimeout("100ms")); } public void testReadinessDuringRestarts() throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java index f77cc9ce20020..4b9e4e0fa0932 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java @@ -64,7 +64,7 @@ public void testGetShardSnapshotFromUnknownRepoReturnsAnError() throws Exception ); } } else { - expectThrows(RepositoryException.class, responseFuture::actionGet); + expectThrows(RepositoryException.class, responseFuture); } disableRepoConsistencyCheck("This test checks an empty repository"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/InvalidRepositoryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/InvalidRepositoryIT.java index 730cdba059a69..f931eb717457d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/InvalidRepositoryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/InvalidRepositoryIT.java @@ -106,7 +106,7 @@ public void testCreateInvalidRepository() throws Exception { // verification should fail with some node has InvalidRepository final var expectedException = expectThrows( RepositoryVerificationException.class, - () -> clusterAdmin().prepareVerifyRepository(repositoryName).get() + clusterAdmin().prepareVerifyRepository(repositoryName) ); for (Throwable suppressed : expectedException.getSuppressed()) { Throwable outerCause = suppressed.getCause(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreSizeLimitIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreSizeLimitIT.java index ffe6133e034bc..efc534043ac1e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreSizeLimitIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreSizeLimitIT.java @@ -35,7 +35,7 @@ public void testBlobStoreSizeIsLimited() throws Exception { ); final List snapshotNames = createNSnapshots(repoName, maxSnapshots); final ActionFuture failingSnapshotFuture = startFullSnapshot(repoName, "failing-snapshot"); - final SnapshotException snapshotException = expectThrows(SnapshotException.class, failingSnapshotFuture::actionGet); + final SnapshotException snapshotException = expectThrows(SnapshotException.class, failingSnapshotFuture); assertThat(snapshotException.getRepositoryName(), equalTo(repoName)); assertThat(snapshotException.getSnapshotName(), equalTo("failing-snapshot")); assertThat(snapshotException.getCause(), instanceOf(RepositoryException.class)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/ComponentTemplatesFileSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/ComponentTemplatesFileSettingsIT.java index 895cd3d2a01e7..4ce92610eff17 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/ComponentTemplatesFileSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/ComponentTemplatesFileSettingsIT.java @@ -13,7 +13,7 @@ import org.elasticsearch.action.admin.indices.template.get.GetComponentTemplateAction; import org.elasticsearch.action.admin.indices.template.get.GetComposableIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.put.PutComponentTemplateAction; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.reservedstate.ReservedComposableIndexTemplateAction; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -416,14 +416,14 @@ private void assertClusterStateSaveOK(CountDownLatch savedClusterState, AtomicLo assertTrue( expectThrows( IllegalArgumentException.class, - () -> client().execute(PutComponentTemplateAction.INSTANCE, sampleComponentRestRequest("component_template1")).actionGet() + client().execute(PutComponentTemplateAction.INSTANCE, sampleComponentRestRequest("component_template1")) ).getMessage().contains("[[component_template:component_template1] set as read-only by [file_settings]]") ); assertTrue( expectThrows( IllegalArgumentException.class, - () -> client().execute(PutComposableIndexTemplateAction.INSTANCE, sampleIndexTemplateRestRequest("template_1")).actionGet() + client().execute(TransportPutComposableIndexTemplateAction.TYPE, sampleIndexTemplateRestRequest("template_1")) ).getMessage().contains("[[composable_index_template:template_1] set as read-only by [file_settings]]") ); } @@ -485,8 +485,7 @@ private void assertComponentAndIndexTemplateDelete(CountDownLatch savedClusterSt assertTrue( expectThrows( IllegalArgumentException.class, - () -> client().execute(PutComposableIndexTemplateAction.INSTANCE, sampleIndexTemplateRestRequest("template_other")) - .actionGet() + client().execute(TransportPutComposableIndexTemplateAction.TYPE, sampleIndexTemplateRestRequest("template_other")) ).getMessage() .contains( "with errors: [[component_template:runtime_component_template, " @@ -495,20 +494,21 @@ private void assertComponentAndIndexTemplateDelete(CountDownLatch savedClusterSt ); // this will work now, we are saving template without components - client().execute(PutComposableIndexTemplateAction.INSTANCE, sampleIndexTemplateRestRequestNoComponents("template_other")).get(); + client().execute(TransportPutComposableIndexTemplateAction.TYPE, sampleIndexTemplateRestRequestNoComponents("template_other")) + .get(); // the rest are still locked assertTrue( expectThrows( IllegalArgumentException.class, - () -> client().execute(PutComponentTemplateAction.INSTANCE, sampleComponentRestRequest("component_template1")).actionGet() + client().execute(PutComponentTemplateAction.INSTANCE, sampleComponentRestRequest("component_template1")) ).getMessage().contains("[[component_template:component_template1] set as read-only by [file_settings]]") ); assertTrue( expectThrows( IllegalArgumentException.class, - () -> client().execute(PutComposableIndexTemplateAction.INSTANCE, sampleIndexTemplateRestRequest("template_1")).actionGet() + client().execute(TransportPutComposableIndexTemplateAction.TYPE, sampleIndexTemplateRestRequest("template_1")) ).getMessage().contains("[[composable_index_template:template_1] set as read-only by [file_settings]]") ); } @@ -606,7 +606,7 @@ private void assertClusterStateNotSaved(CountDownLatch savedClusterState, Atomic assertTrue(response.indexTemplates().isEmpty()); // This should succeed, nothing was reserved - client().execute(PutComposableIndexTemplateAction.INSTANCE, sampleIndexTemplateRestRequestNoComponents("err_template")).get(); + client().execute(TransportPutComposableIndexTemplateAction.TYPE, sampleIndexTemplateRestRequestNoComponents("err_template")).get(); } public void testErrorSaved() throws Exception { @@ -645,7 +645,7 @@ private PutComponentTemplateAction.Request sampleComponentRestRequest(String nam } } - private PutComposableIndexTemplateAction.Request sampleIndexTemplateRestRequest(String name) throws Exception { + private TransportPutComposableIndexTemplateAction.Request sampleIndexTemplateRestRequest(String name) throws Exception { var json = """ { "index_patterns": ["te*", "bar*"], @@ -683,11 +683,11 @@ private PutComposableIndexTemplateAction.Request sampleIndexTemplateRestRequest( var bis = new ByteArrayInputStream(json.getBytes(StandardCharsets.UTF_8)); var parser = JSON.xContent().createParser(XContentParserConfiguration.EMPTY, bis) ) { - return new PutComposableIndexTemplateAction.Request(name).indexTemplate(ComposableIndexTemplate.parse(parser)); + return new TransportPutComposableIndexTemplateAction.Request(name).indexTemplate(ComposableIndexTemplate.parse(parser)); } } - private PutComposableIndexTemplateAction.Request sampleIndexTemplateRestRequestNoComponents(String name) throws Exception { + private TransportPutComposableIndexTemplateAction.Request sampleIndexTemplateRestRequestNoComponents(String name) throws Exception { var json = """ { "index_patterns": ["aa*", "vv*"], @@ -724,7 +724,7 @@ private PutComposableIndexTemplateAction.Request sampleIndexTemplateRestRequestN var bis = new ByteArrayInputStream(json.getBytes(StandardCharsets.UTF_8)); var parser = JSON.xContent().createParser(XContentParserConfiguration.EMPTY, bis) ) { - return new PutComposableIndexTemplateAction.Request(name).indexTemplate(ComposableIndexTemplate.parse(parser)); + return new TransportPutComposableIndexTemplateAction.Request(name).indexTemplate(ComposableIndexTemplate.parse(parser)); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/RepositoriesFileSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/RepositoriesFileSettingsIT.java index 9d6a53d8bc818..fa5d8d93c9e45 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/RepositoriesFileSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/RepositoriesFileSettingsIT.java @@ -149,10 +149,8 @@ private void assertClusterStateSaveOK(CountDownLatch savedClusterState, AtomicLo "Failed to process request " + "[org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest/unset] " + "with errors: [[repo] set as read-only by [file_settings]]", - expectThrows( - IllegalArgumentException.class, - () -> client().execute(TransportPutRepositoryAction.TYPE, sampleRestRequest("repo")).actionGet() - ).getMessage() + expectThrows(IllegalArgumentException.class, client().execute(TransportPutRepositoryAction.TYPE, sampleRestRequest("repo"))) + .getMessage() ); } @@ -206,7 +204,7 @@ private void assertClusterStateNotSaved(CountDownLatch savedClusterState, Atomic "[err-repo] missing", expectThrows( RepositoryMissingException.class, - () -> client().execute(GetRepositoriesAction.INSTANCE, new GetRepositoriesRequest(new String[] { "err-repo" })).actionGet() + client().execute(GetRepositoriesAction.INSTANCE, new GetRepositoriesRequest(new String[] { "err-repo" })) ).getMessage() ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/script/StoredScriptsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/script/StoredScriptsIT.java index adac5b3482107..619e7c9d9edec 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/script/StoredScriptsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/script/StoredScriptsIT.java @@ -54,9 +54,9 @@ public void testBasics() { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> { clusterAdmin().preparePutStoredScript().setId("id#").setContent(new BytesArray(Strings.format(""" + clusterAdmin().preparePutStoredScript().setId("id#").setContent(new BytesArray(Strings.format(""" {"script": {"lang": "%s", "source": "1"} } - """, LANG)), XContentType.JSON).get(); } + """, LANG)), XContentType.JSON) ); assertEquals("Validation Failed: 1: id cannot contain '#' for stored script;", e.getMessage()); } @@ -64,9 +64,9 @@ public void testBasics() { public void testMaxScriptSize() { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> { clusterAdmin().preparePutStoredScript().setId("foobar").setContent(new BytesArray(Strings.format(""" + clusterAdmin().preparePutStoredScript().setId("foobar").setContent(new BytesArray(Strings.format(""" {"script": { "lang": "%s", "source":"0123456789abcdef"} }\ - """, LANG)), XContentType.JSON).get(); } + """, LANG)), XContentType.JSON) ); assertEquals("exceeded max allowed stored script size in bytes [64] with size [65] for script [foobar]", e.getMessage()); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java index aaf218e3579be..58c260fb70dfb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java @@ -237,6 +237,7 @@ public void testCancelMultiSearch() throws Exception { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/99929") public void testCancelFailedSearchWhenPartialResultDisallowed() throws Exception { // Have at least two nodes so that we have parallel execution of two request guaranteed even if max concurrent requests per node // are limited to 1 @@ -249,11 +250,10 @@ public void testCancelFailedSearchWhenPartialResultDisallowed() throws Exception Thread searchThread = new Thread(() -> { SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH) + prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH) .setQuery(scriptQuery(new Script(ScriptType.INLINE, "mockscript", SEARCH_BLOCK_SCRIPT_NAME, Collections.emptyMap()))) .setAllowPartialSearchResults(false) .setSize(1000) - .get() ); assertThat(e.getMessage(), containsString("Partial shards failure")); }); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchTimeoutIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchTimeoutIT.java index ecf839bff5e4c..702d4a99df2f1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchTimeoutIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchTimeoutIT.java @@ -94,10 +94,9 @@ public void testPartialResultsIntolerantTimeout() throws Exception { ElasticsearchException ex = expectThrows( ElasticsearchException.class, - () -> prepareSearch("test").setTimeout(new TimeValue(10, TimeUnit.MILLISECONDS)) + prepareSearch("test").setTimeout(new TimeValue(10, TimeUnit.MILLISECONDS)) .setQuery(scriptQuery(new Script(ScriptType.INLINE, "mockscript", SCRIPT_NAME, Collections.emptyMap()))) .setAllowPartialSearchResults(false) // this line causes timeouts to report failures - .get() ); assertTrue(ex.toString().contains("Time exceeded")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java index 0a726fcec5a88..0b92372652597 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java @@ -762,9 +762,8 @@ public void testRangeWithFormatStringValue() throws Exception { // providing numeric input without format should throw an exception ElasticsearchException e = expectThrows( ElasticsearchException.class, - () -> prepareSearch(indexName).setSize(0) + prepareSearch(indexName).setSize(0) .addAggregation(dateRange("date_range").field("date").addRange(1000000, 3000000).addRange(3000000, 4000000)) - .get() ); assertThat(e.getDetailedMessage(), containsString("failed to parse date field [1000000] with format [strict_hour_minute_second]")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java index 1500c203ea4db..668b9d79c49a8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java @@ -262,12 +262,7 @@ private void getMultiSortDocs(List builders) throws IOExcep public void testSizeIsZero() { IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, - () -> prepareSearch("high_card_idx").addAggregation( - new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) - .minDocCount(randomInt(1)) - .size(0) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get() + () -> new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME).minDocCount(randomInt(1)).size(0) ); assertThat(exception.getMessage(), containsString("[size] must be greater than 0. Found [0] in [terms]")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java index e5d13627e1da0..421c1475eb5bc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java @@ -1231,17 +1231,17 @@ public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAsCompound public void testInvalidBounds() { SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("empty_bucket_idx").addAggregation( + prepareSearch("empty_bucket_idx").addAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).hardBounds(new DoubleBounds(0.0, 10.0)).extendedBounds(3, 20) - ).get() + ) ); assertThat(e.toString(), containsString("Extended bounds have to be inside hard bounds, hard bounds")); e = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("empty_bucket_idx").addAggregation( + prepareSearch("empty_bucket_idx").addAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).hardBounds(new DoubleBounds(3.0, null)).extendedBounds(0, 20) - ).get() + ) ); assertThat(e.toString(), containsString("Extended bounds have to be inside hard bounds, hard bounds")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java index d263c14fe4710..3f5d8e441dc44 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java @@ -182,7 +182,6 @@ public void testNullValuesField() throws Exception { .numberOfSignificantValueDigits(sigDigits) .field("value") ) - .get() ); assertThat(e.getMessage(), equalTo("[values] must not be null: [percentile_ranks]")); } @@ -198,7 +197,6 @@ public void testEmptyValuesField() throws Exception { .numberOfSignificantValueDigits(sigDigits) .field("value") ) - .get() ); assertThat(e.getMessage(), equalTo("[values] must not be an empty array: [percentile_ranks]")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java index 0ab26e1d9a049..d40264d9facf0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.support.XContentMapValues; @@ -1246,12 +1245,13 @@ public void testConflictingAggAndScriptParams() { Script combineScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op aggregation", Collections.emptyMap()); Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op list aggregation", Collections.emptyMap()); - SearchRequestBuilder builder = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - scriptedMetric("scripted").params(params).mapScript(mapScript).combineScript(combineScript).reduceScript(reduceScript) - ); - - SearchPhaseExecutionException ex = expectThrows(SearchPhaseExecutionException.class, builder::get); + SearchPhaseExecutionException ex = expectThrows( + SearchPhaseExecutionException.class, + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + scriptedMetric("scripted").params(params).mapScript(mapScript).combineScript(combineScript).reduceScript(reduceScript) + ) + ); assertThat(ex.getCause().getMessage(), containsString("Parameter name \"param1\" used in both aggregation and script parameters")); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java index f1a4c9e5bd7a5..47c443a58eeda 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java @@ -126,7 +126,6 @@ public void testNullValuesField() throws Exception { IllegalArgumentException.class, () -> prepareSearch("idx").setQuery(matchAllQuery()) .addAggregation(percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.TDIGEST).field("value")) - .get() ); assertThat(e.getMessage(), equalTo("[values] must not be null: [percentile_ranks]")); } @@ -137,7 +136,6 @@ public void testEmptyValuesField() throws Exception { IllegalArgumentException.class, () -> prepareSearch("idx").setQuery(matchAllQuery()) .addAggregation(percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.TDIGEST).field("value")) - .get() ); assertThat(e.getMessage(), equalTo("[values] must not be an empty array: [percentile_ranks]")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java index d878dc981b17f..6cf274cb69fb3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java @@ -999,11 +999,11 @@ public void testTooHighResultWindow() throws Exception { Exception e = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("idx").addAggregation( + prepareSearch("idx").addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(TERMS_AGGS_FIELD) .subAggregation(topHits("hits").from(100).size(10).sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC))) - ).get() + ) ); assertThat( e.getCause().getMessage(), @@ -1011,11 +1011,11 @@ public void testTooHighResultWindow() throws Exception { ); e = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("idx").addAggregation( + prepareSearch("idx").addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(TERMS_AGGS_FIELD) .subAggregation(topHits("hits").from(10).size(100).sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC))) - ).get() + ) ); assertThat( e.getCause().getMessage(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java index 16a570b6cd2fd..dc612d6bad5ce 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java @@ -657,10 +657,10 @@ public void testSingleBucketPathAgg() throws Exception { .field("lang", CustomScriptPlugin.NAME) .endObject() .endObject(); - BucketScriptPipelineAggregationBuilder bucketScriptAgg = BucketScriptPipelineAggregationBuilder.PARSER.parse( - createParser(content), - "seriesArithmetic" - ); + BucketScriptPipelineAggregationBuilder bucketScriptAgg; + try (var parser = createParser(content)) { + bucketScriptAgg = BucketScriptPipelineAggregationBuilder.PARSER.parse(parser, "seriesArithmetic"); + } assertNoFailuresAndResponse( prepareSearch("idx", "idx_unmapped").addAggregation( @@ -703,10 +703,10 @@ public void testArrayBucketPathAgg() throws Exception { .field("lang", CustomScriptPlugin.NAME) .endObject() .endObject(); - BucketScriptPipelineAggregationBuilder bucketScriptAgg = BucketScriptPipelineAggregationBuilder.PARSER.parse( - createParser(content), - "seriesArithmetic" - ); + BucketScriptPipelineAggregationBuilder bucketScriptAgg; + try (var parser = createParser(content)) { + bucketScriptAgg = BucketScriptPipelineAggregationBuilder.PARSER.parse(parser, "seriesArithmetic"); + } assertNoFailuresAndResponse( prepareSearch("idx", "idx_unmapped").addAggregation( @@ -761,10 +761,10 @@ public void testObjectBucketPathAgg() throws Exception { .field("lang", CustomScriptPlugin.NAME) .endObject() .endObject(); - BucketScriptPipelineAggregationBuilder bucketScriptAgg = BucketScriptPipelineAggregationBuilder.PARSER.parse( - createParser(content), - "seriesArithmetic" - ); + BucketScriptPipelineAggregationBuilder bucketScriptAgg; + try (var parser = createParser(content)) { + bucketScriptAgg = BucketScriptPipelineAggregationBuilder.PARSER.parse(parser, "seriesArithmetic"); + } assertNoFailuresAndResponse( prepareSearch("idx", "idx_unmapped").addAggregation( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java index bc518eb6c1294..6562c485b9204 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java @@ -162,7 +162,7 @@ public void testBadSigmaAsSubAgg() throws Exception { .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) ) .subAggregation(extendedStatsBucket("extended_stats_bucket", "histo>sum").sigma(-1.0)) - ).get() + ) ); Throwable cause = ExceptionsHelper.unwrapCause(ex); if (cause == null) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchRedStateIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchRedStateIndexIT.java index e4bb11247d230..1525496176418 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchRedStateIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchRedStateIndexIT.java @@ -81,7 +81,7 @@ public void testDisallowPartialsWithRedState() throws Exception { SearchPhaseExecutionException ex = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch().setSize(0).setAllowPartialSearchResults(false).get() + prepareSearch().setSize(0).setAllowPartialSearchResults(false) ); assertThat(ex.getDetailedMessage(), containsString("Search rejected due to missing shard")); } @@ -90,7 +90,7 @@ public void testClusterDisallowPartialsWithRedState() throws Exception { buildRedIndex(cluster().numDataNodes() + 2); setClusterDefaultAllowPartialResults(false); - SearchPhaseExecutionException ex = expectThrows(SearchPhaseExecutionException.class, () -> prepareSearch().setSize(0).get()); + SearchPhaseExecutionException ex = expectThrows(SearchPhaseExecutionException.class, prepareSearch().setSize(0)); assertThat(ex.getDetailedMessage(), containsString("Search rejected due to missing shard")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java index 607c6596d15c9..b536db040e39f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java @@ -963,11 +963,11 @@ public void testTooHighResultWindow() throws Exception { Exception e = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("index2").setQuery( + prepareSearch("index2").setQuery( nestedQuery("nested", matchQuery("nested.field", "value1"), ScoreMode.Avg).innerHit( new InnerHitBuilder().setFrom(100).setSize(10).setName("_name") ) - ).get() + ) ); assertThat( e.getCause().getMessage(), @@ -975,11 +975,11 @@ public void testTooHighResultWindow() throws Exception { ); e = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("index2").setQuery( + prepareSearch("index2").setQuery( nestedQuery("nested", matchQuery("nested.field", "value1"), ScoreMode.Avg).innerHit( new InnerHitBuilder().setFrom(10).setSize(100).setName("_name") ) - ).get() + ) ); assertThat( e.getCause().getMessage(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/CCSFieldCapabilitiesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/CCSFieldCapabilitiesIT.java index 9da7ca4961f9a..afc62323ca544 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/CCSFieldCapabilitiesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/CCSFieldCapabilitiesIT.java @@ -81,7 +81,7 @@ public void testFailuresFromRemote() { // if we only query the remote we should get back an exception only ex = expectThrows( IllegalArgumentException.class, - () -> client().prepareFieldCaps("remote_cluster:*").setFields("*").setIndexFilter(new ExceptionOnRewriteQueryBuilder()).get() + client().prepareFieldCaps("remote_cluster:*").setFields("*").setIndexFilter(new ExceptionOnRewriteQueryBuilder()) ); assertEquals("I throw because I choose to.", ex.getMessage()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java index 6139d6875d5ae..282e29866a699 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java @@ -426,10 +426,7 @@ public void testFailures() throws InterruptedException { // if all requested indices failed, we fail the request by throwing the exception IllegalArgumentException ex = expectThrows( IllegalArgumentException.class, - () -> client().prepareFieldCaps("index1-error", "index2-error") - .setFields("*") - .setIndexFilter(new ExceptionOnRewriteQueryBuilder()) - .get() + client().prepareFieldCaps("index1-error", "index2-error").setFields("*").setIndexFilter(new ExceptionOnRewriteQueryBuilder()) ); assertEquals("I throw because I choose to.", ex.getMessage()); } @@ -499,7 +496,7 @@ public void testNoActiveCopy() throws Exception { { final ElasticsearchException ex = expectThrows( ElasticsearchException.class, - () -> client().prepareFieldCaps("log-index-*").setFields("*").get() + client().prepareFieldCaps("log-index-*").setFields("*") ); assertThat(ex.getMessage(), equalTo("index [log-index-inactive] has no active shard copy")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java index eff2e8d3653c5..dcbf4996358d7 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java @@ -659,14 +659,14 @@ public void testExceptionThrownIfScaleLE0() throws Exception { SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, - () -> client().search( + client().search( new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) .source( searchSource().query( functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num1", "2013-05-28", "-1d")) ) ) - ).actionGet() + ) ); assertThat(e.getMessage(), is("all shards failed")); } @@ -987,7 +987,7 @@ public void testParsingExceptionIfFieldDoesNotExist() throws Exception { SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, - () -> client().search( + client().search( new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) .source( searchSource().size(numDocs) @@ -997,7 +997,7 @@ public void testParsingExceptionIfFieldDoesNotExist() throws Exception { ) ) ) - ).actionGet() + ) ); assertThat(e.getMessage(), is("all shards failed")); } @@ -1028,7 +1028,7 @@ public void testParsingExceptionIfFieldTypeDoesNotMatch() throws Exception { // so, we indexed a string field, but now we try to score a num field SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, - () -> client().search( + client().search( new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) .source( searchSource().query( @@ -1037,7 +1037,7 @@ public void testParsingExceptionIfFieldTypeDoesNotMatch() throws Exception { ) ) ) - ).actionGet() + ) ); assertThat(e.getMessage(), is("all shards failed")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java index c67bdf82b5c2c..110ac76849e0b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java @@ -815,21 +815,19 @@ public void testRescorePhaseWithInvalidSort() throws Exception { Exception exc = expectThrows( Exception.class, - () -> prepareSearch().addSort(SortBuilders.fieldSort("number")) + prepareSearch().addSort(SortBuilders.fieldSort("number")) .setTrackScores(true) .addRescorer(new QueryRescorerBuilder(matchAllQuery()), 50) - .get() ); assertNotNull(exc.getCause()); assertThat(exc.getCause().getMessage(), containsString("Cannot use [sort] option in conjunction with [rescore].")); exc = expectThrows( Exception.class, - () -> prepareSearch().addSort(SortBuilders.fieldSort("number")) + prepareSearch().addSort(SortBuilders.fieldSort("number")) .addSort(SortBuilders.scoreSort()) .setTrackScores(true) .addRescorer(new QueryRescorerBuilder(matchAllQuery()), 50) - .get() ); assertNotNull(exc.getCause()); assertThat(exc.getCause().getMessage(), containsString("Cannot use [sort] option in conjunction with [rescore].")); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java index d79bb903bdb6a..f632bbed134f0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.search.morelikethis; +import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; @@ -42,7 +43,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; @@ -362,20 +362,18 @@ public void testNumericField() throws Exception { ); // Explicit list of fields including numeric fields -> fail - assertRequestBuilderThrows( - prepareSearch().setQuery( - new MoreLikeThisQueryBuilder(new String[] { "string_value", "int_value" }, null, new Item[] { new Item("test", "1") }) - .minTermFreq(1) - .minDocFreq(1) - ), - SearchPhaseExecutionException.class + ActionRequestBuilder builder5 = prepareSearch().setQuery( + new MoreLikeThisQueryBuilder(new String[] { "string_value", "int_value" }, null, new Item[] { new Item("test", "1") }) + .minTermFreq(1) + .minDocFreq(1) ); + expectThrows(SearchPhaseExecutionException.class, builder5); // mlt query with no field -> exception because _all is not enabled) - assertRequestBuilderThrows( - prepareSearch().setQuery(moreLikeThisQuery(new String[] { "index" }).minTermFreq(1).minDocFreq(1)), - SearchPhaseExecutionException.class + ActionRequestBuilder builder4 = prepareSearch().setQuery( + moreLikeThisQuery(new String[] { "index" }).minTermFreq(1).minDocFreq(1) ); + expectThrows(SearchPhaseExecutionException.class, builder4); // mlt query with string fields assertHitCount( @@ -386,18 +384,16 @@ public void testNumericField() throws Exception { ); // mlt query with at least a numeric field -> fail by default - assertRequestBuilderThrows( - prepareSearch().setQuery(moreLikeThisQuery(new String[] { "string_value", "int_value" }, new String[] { "index" }, null)), - SearchPhaseExecutionException.class + ActionRequestBuilder builder3 = prepareSearch().setQuery( + moreLikeThisQuery(new String[] { "string_value", "int_value" }, new String[] { "index" }, null) ); + expectThrows(SearchPhaseExecutionException.class, builder3); // mlt query with at least a numeric field -> fail by command - assertRequestBuilderThrows( - prepareSearch().setQuery( - moreLikeThisQuery(new String[] { "string_value", "int_value" }, new String[] { "index" }, null).failOnUnsupportedField(true) - ), - SearchPhaseExecutionException.class + ActionRequestBuilder builder2 = prepareSearch().setQuery( + moreLikeThisQuery(new String[] { "string_value", "int_value" }, new String[] { "index" }, null).failOnUnsupportedField(true) ); + expectThrows(SearchPhaseExecutionException.class, builder2); // mlt query with at least a numeric field but fail_on_unsupported_field set to false assertHitCount( @@ -410,22 +406,18 @@ public void testNumericField() throws Exception { ); // mlt field query on a numeric field -> failure by default - assertRequestBuilderThrows( - prepareSearch().setQuery( - moreLikeThisQuery(new String[] { "int_value" }, new String[] { "42" }, null).minTermFreq(1).minDocFreq(1) - ), - SearchPhaseExecutionException.class + ActionRequestBuilder builder1 = prepareSearch().setQuery( + moreLikeThisQuery(new String[] { "int_value" }, new String[] { "42" }, null).minTermFreq(1).minDocFreq(1) ); + expectThrows(SearchPhaseExecutionException.class, builder1); // mlt field query on a numeric field -> failure by command - assertRequestBuilderThrows( - prepareSearch().setQuery( - moreLikeThisQuery(new String[] { "int_value" }, new String[] { "42" }, null).minTermFreq(1) - .minDocFreq(1) - .failOnUnsupportedField(true) - ), - SearchPhaseExecutionException.class + ActionRequestBuilder builder = prepareSearch().setQuery( + moreLikeThisQuery(new String[] { "int_value" }, new String[] { "42" }, null).minTermFreq(1) + .minDocFreq(1) + .failOnUnsupportedField(true) ); + expectThrows(SearchPhaseExecutionException.class, builder); // mlt field query on a numeric field but fail_on_unsupported_field set to false assertHitCount( @@ -811,9 +803,9 @@ public void testWithMissingRouting() throws IOException { logger.info("Running moreLikeThis with one item without routing attribute"); SearchPhaseExecutionException exception = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch().setQuery( + prepareSearch().setQuery( new MoreLikeThisQueryBuilder(null, new Item[] { new Item("test", "1") }).minTermFreq(1).minDocFreq(1) - ).get() + ) ); Throwable cause = exception.getCause(); @@ -825,12 +817,12 @@ public void testWithMissingRouting() throws IOException { logger.info("Running moreLikeThis with one item with routing attribute and two items without routing attribute"); SearchPhaseExecutionException exception = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch().setQuery( + prepareSearch().setQuery( new MoreLikeThisQueryBuilder( null, new Item[] { new Item("test", "1").routing("1"), new Item("test", "2"), new Item("test", "3") } ).minTermFreq(1).minDocFreq(1) - ).get() + ) ); Throwable cause = exception.getCause(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/QueryStringIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/QueryStringIT.java index d8787b6ef7b16..a7ce84f3cd02d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/QueryStringIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/QueryStringIT.java @@ -186,7 +186,7 @@ public void testPhraseQueryOnFieldWithNoPositions() throws Exception { Exception exc = expectThrows( Exception.class, - () -> prepareSearch("test").setQuery(queryStringQuery("f4:\"eggplant parmesan\"").lenient(false)).get() + prepareSearch("test").setQuery(queryStringQuery("f4:\"eggplant parmesan\"").lenient(false)) ); IllegalStateException ise = (IllegalStateException) ExceptionsHelper.unwrap(exc, IllegalStateException.class); assertNotNull(ise); @@ -194,7 +194,7 @@ public void testPhraseQueryOnFieldWithNoPositions() throws Exception { } public void testBooleanStrictQuery() throws Exception { - Exception e = expectThrows(Exception.class, () -> prepareSearch("test").setQuery(queryStringQuery("foo").field("f_bool")).get()); + Exception e = expectThrows(Exception.class, prepareSearch("test").setQuery(queryStringQuery("foo").field("f_bool"))); assertThat( ExceptionsHelper.unwrap(e, IllegalArgumentException.class).getMessage(), containsString("Can't parse boolean value [foo], expected [true] or [false]") @@ -204,7 +204,7 @@ public void testBooleanStrictQuery() throws Exception { public void testAllFieldsWithSpecifiedLeniency() throws IOException { Exception e = expectThrows( Exception.class, - () -> prepareSearch("test").setQuery(queryStringQuery("f_date:[now-2D TO now]").lenient(false)).get() + prepareSearch("test").setQuery(queryStringQuery("f_date:[now-2D TO now]").lenient(false)) ); assertThat(e.getCause().getMessage(), containsString("unit [D] not supported for date math [-2D]")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/ScriptScoreQueryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/ScriptScoreQueryIT.java index 0a35c33673343..30eeb86ba44d3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/ScriptScoreQueryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/ScriptScoreQueryIT.java @@ -146,7 +146,7 @@ public void testDisallowExpensiveQueries() { ElasticsearchException e = expectThrows( ElasticsearchException.class, - () -> prepareSearch("test-index").setQuery(scriptScoreQuery(matchQuery("field1", "text0"), script)).get() + prepareSearch("test-index").setQuery(scriptScoreQuery(matchQuery("field1", "text0"), script)) ); assertEquals( "[script score] queries cannot be executed when 'search.allow_expensive_queries' is set to false.", diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/SearchQueryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/SearchQueryIT.java index ea2decff18cd0..384395bcb78e7 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -350,7 +350,7 @@ public void testDateRangeInQueryString() { SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch().setQuery(queryStringQuery("future:[now/D TO now+2M/d]").lenient(false)).get() + prepareSearch().setQuery(queryStringQuery("future:[now/D TO now+2M/d]").lenient(false)) ); assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); assertThat(e.toString(), containsString("unit [D] not supported for date math")); @@ -544,7 +544,7 @@ public void testMatchQueryNumeric() throws Exception { assertResponse(prepareSearch().setQuery(matchQuery("double", "2")), response -> { assertHitCount(response, 1L); assertFirstHit(response, hasId("2")); - expectThrows(SearchPhaseExecutionException.class, () -> prepareSearch().setQuery(matchQuery("double", "2 3 4")).get()); + expectThrows(SearchPhaseExecutionException.class, prepareSearch().setQuery(matchQuery("double", "2 3 4"))); }); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/SimpleQueryStringIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/SimpleQueryStringIT.java index 449777580b691..7630ddb000140 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/SimpleQueryStringIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/SimpleQueryStringIT.java @@ -525,7 +525,7 @@ public void testAllFieldsWithSpecifiedLeniency() throws Exception { SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("test").setQuery(simpleQueryStringQuery("foo123").lenient(false)).get() + prepareSearch("test").setQuery(simpleQueryStringQuery("foo123").lenient(false)) ); assertThat(e.getDetailedMessage(), containsString("NumberFormatException: For input string: \"foo123\"")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/scriptfilter/ScriptQuerySearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/scriptfilter/ScriptQuerySearchIT.java index 4c99becad055e..9f6ad69a4eed6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/scriptfilter/ScriptQuerySearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/scriptfilter/ScriptQuerySearchIT.java @@ -242,7 +242,7 @@ public void testDisallowExpensiveQueries() { // Set search.allow_expensive_queries to "false" => assert failure ElasticsearchException e = expectThrows( ElasticsearchException.class, - () -> prepareSearch("test-index").setQuery(scriptQuery(script)).get() + prepareSearch("test-index").setQuery(scriptQuery(script)) ); assertEquals( "[script] queries cannot be executed when 'search.allow_expensive_queries' is set to false.", diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollIT.java index 28723a09355a9..7dcdb92ec5680 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollIT.java @@ -347,22 +347,14 @@ public void testClearIllegalScrollId() throws Exception { createIndex("idx"); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> client().prepareClearScroll().addScrollId("c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1").get() + client().prepareClearScroll().addScrollId("c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1") ); assertEquals("Cannot parse scroll id", e.getMessage()); - - e = expectThrows( - IllegalArgumentException.class, - // Fails during base64 decoding (Base64-encoded string must have at least four characters) - () -> client().prepareClearScroll().addScrollId("a").get() - ); + // Fails during base64 decoding (Base64-encoded string must have at least four characters) + e = expectThrows(IllegalArgumentException.class, client().prepareClearScroll().addScrollId("a")); assertEquals("Cannot parse scroll id", e.getMessage()); - - e = expectThrows( - IllegalArgumentException.class, - // Other invalid base64 - () -> client().prepareClearScroll().addScrollId("abcabc").get() - ); + // Other invalid base64 + e = expectThrows(IllegalArgumentException.class, client().prepareClearScroll().addScrollId("abcabc")); assertEquals("Cannot parse scroll id", e.getMessage()); } @@ -559,9 +551,8 @@ public void testCloseAndReopenOrDeleteWithActiveScroll() { public void testScrollInvalidDefaultKeepAlive() throws IOException { IllegalArgumentException exc = expectThrows( IllegalArgumentException.class, - () -> clusterAdmin().prepareUpdateSettings() + clusterAdmin().prepareUpdateSettings() .setPersistentSettings(Settings.builder().put("search.max_keep_alive", "1m").put("search.default_keep_alive", "2m")) - .get() ); assertThat(exc.getMessage(), containsString("was (2m > 1m)")); @@ -571,9 +562,7 @@ public void testScrollInvalidDefaultKeepAlive() throws IOException { exc = expectThrows( IllegalArgumentException.class, - () -> clusterAdmin().prepareUpdateSettings() - .setPersistentSettings(Settings.builder().put("search.default_keep_alive", "3m")) - .get() + clusterAdmin().prepareUpdateSettings().setPersistentSettings(Settings.builder().put("search.default_keep_alive", "3m")) ); assertThat(exc.getMessage(), containsString("was (3m > 2m)")); @@ -581,7 +570,7 @@ public void testScrollInvalidDefaultKeepAlive() throws IOException { exc = expectThrows( IllegalArgumentException.class, - () -> clusterAdmin().prepareUpdateSettings().setPersistentSettings(Settings.builder().put("search.max_keep_alive", "30s")).get() + clusterAdmin().prepareUpdateSettings().setPersistentSettings(Settings.builder().put("search.max_keep_alive", "30s")) ); assertThat(exc.getMessage(), containsString("was (1m > 30s)")); } @@ -596,7 +585,7 @@ public void testInvalidScrollKeepAlive() throws IOException { Exception exc = expectThrows( Exception.class, - () -> prepareSearch().setQuery(matchAllQuery()).setSize(1).setScroll(TimeValue.timeValueHours(2)).get() + prepareSearch().setQuery(matchAllQuery()).setSize(1).setScroll(TimeValue.timeValueHours(2)) ); IllegalArgumentException illegalArgumentException = (IllegalArgumentException) ExceptionsHelper.unwrap( exc, @@ -611,7 +600,7 @@ public void testInvalidScrollKeepAlive() throws IOException { assertThat(searchResponse.getHits().getHits().length, equalTo(1)); Exception ex = expectThrows( Exception.class, - () -> client().prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueHours(3)).get() + client().prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueHours(3)) ); IllegalArgumentException iae = (IllegalArgumentException) ExceptionsHelper.unwrap(ex, IllegalArgumentException.class); assertNotNull(iae); @@ -699,7 +688,7 @@ public void testRestartDataNodesDuringScrollSearch() throws Exception { } SearchPhaseExecutionException error = expectThrows( SearchPhaseExecutionException.class, - () -> client().prepareSearchScroll(respFromDemoIndexScrollId).get() + client().prepareSearchScroll(respFromDemoIndexScrollId) ); for (ShardSearchFailure shardSearchFailure : error.shardFailures()) { assertThat(shardSearchFailure.getCause().getMessage(), containsString("No search context found for id [1]")); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/searchafter/SearchAfterIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/searchafter/SearchAfterIT.java index d76031d402af0..075740ae2c194 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/searchafter/SearchAfterIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/searchafter/SearchAfterIT.java @@ -69,11 +69,10 @@ public void testsShouldFail() throws Exception { { SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("test").addSort("field1", SortOrder.ASC) + prepareSearch("test").addSort("field1", SortOrder.ASC) .setQuery(matchAllQuery()) .searchAfter(new Object[] { 0 }) .setScroll("1m") - .get() ); assertTrue(e.shardFailures().length > 0); for (ShardSearchFailure failure : e.shardFailures()) { @@ -84,11 +83,7 @@ public void testsShouldFail() throws Exception { { SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("test").addSort("field1", SortOrder.ASC) - .setQuery(matchAllQuery()) - .searchAfter(new Object[] { 0 }) - .setFrom(10) - .get() + prepareSearch("test").addSort("field1", SortOrder.ASC).setQuery(matchAllQuery()).searchAfter(new Object[] { 0 }).setFrom(10) ); assertTrue(e.shardFailures().length > 0); for (ShardSearchFailure failure : e.shardFailures()) { @@ -99,7 +94,7 @@ public void testsShouldFail() throws Exception { { SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("test").setQuery(matchAllQuery()).searchAfter(new Object[] { 0.75f }).get() + prepareSearch("test").setQuery(matchAllQuery()).searchAfter(new Object[] { 0.75f }) ); assertTrue(e.shardFailures().length > 0); for (ShardSearchFailure failure : e.shardFailures()) { @@ -110,11 +105,10 @@ public void testsShouldFail() throws Exception { { SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("test").addSort("field2", SortOrder.DESC) + prepareSearch("test").addSort("field2", SortOrder.DESC) .addSort("field1", SortOrder.ASC) .setQuery(matchAllQuery()) .searchAfter(new Object[] { 1 }) - .get() ); assertTrue(e.shardFailures().length > 0); for (ShardSearchFailure failure : e.shardFailures()) { @@ -125,10 +119,7 @@ public void testsShouldFail() throws Exception { { SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("test").setQuery(matchAllQuery()) - .addSort("field1", SortOrder.ASC) - .searchAfter(new Object[] { 1, 2 }) - .get() + prepareSearch("test").setQuery(matchAllQuery()).addSort("field1", SortOrder.ASC).searchAfter(new Object[] { 1, 2 }) ); for (ShardSearchFailure failure : e.shardFailures()) { assertTrue(e.shardFailures().length > 0); @@ -139,10 +130,7 @@ public void testsShouldFail() throws Exception { { SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("test").setQuery(matchAllQuery()) - .addSort("field1", SortOrder.ASC) - .searchAfter(new Object[] { "toto" }) - .get() + prepareSearch("test").setQuery(matchAllQuery()).addSort("field1", SortOrder.ASC).searchAfter(new Object[] { "toto" }) ); assertTrue(e.shardFailures().length > 0); for (ShardSearchFailure failure : e.shardFailures()) { @@ -473,7 +461,7 @@ public void testScrollAndSearchAfterWithBigIndex() { { OpenPointInTimeRequest openPITRequest = new OpenPointInTimeRequest("test").keepAlive(TimeValue.timeValueMinutes(5)); pitID = client().execute(TransportOpenPointInTimeAction.TYPE, openPITRequest).actionGet().getPointInTimeId(); - SearchRequest searchRequest = new SearchRequest("test").source( + SearchRequest searchRequest = new SearchRequest().source( new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(pitID).setKeepAlive(TimeValue.timeValueMinutes(5))) .sort("timestamp") ); @@ -509,7 +497,7 @@ public void testScrollAndSearchAfterWithBigIndex() { { OpenPointInTimeRequest openPITRequest = new OpenPointInTimeRequest("test").keepAlive(TimeValue.timeValueMinutes(5)); pitID = client().execute(TransportOpenPointInTimeAction.TYPE, openPITRequest).actionGet().getPointInTimeId(); - SearchRequest searchRequest = new SearchRequest("test").source( + SearchRequest searchRequest = new SearchRequest().source( new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(pitID).setKeepAlive(TimeValue.timeValueMinutes(5))) .sort(SortBuilders.pitTiebreaker()) ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/simple/SimpleSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/simple/SimpleSearchIT.java index cb13fca85541f..543f45b58279e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/simple/SimpleSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/simple/SimpleSearchIT.java @@ -64,13 +64,8 @@ protected Collection> nodePlugins() { } public void testSearchNullIndex() { - expectThrows(NullPointerException.class, () -> prepareSearch((String) null).setQuery(QueryBuilders.termQuery("_id", "XXX1")).get()); - - expectThrows( - NullPointerException.class, - () -> prepareSearch((String[]) null).setQuery(QueryBuilders.termQuery("_id", "XXX1")).get() - ); - + expectThrows(NullPointerException.class, () -> prepareSearch((String) null)); + expectThrows(NullPointerException.class, () -> prepareSearch((String[]) null)); } public void testSearchRandomPreference() throws InterruptedException, ExecutionException { @@ -459,10 +454,11 @@ public void testTermQueryBigInt() throws Exception { .get(); String queryJson = "{ \"field\" : { \"value\" : 80315953321748200608 } }"; - XContentParser parser = createParser(JsonXContent.jsonXContent, queryJson); - parser.nextToken(); - TermQueryBuilder query = TermQueryBuilder.fromXContent(parser); - assertHitCount(prepareSearch("idx").setQuery(query), 1); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, queryJson)) { + parser.nextToken(); + TermQueryBuilder query = TermQueryBuilder.fromXContent(parser); + assertHitCount(prepareSearch("idx").setQuery(query), 1); + } } public void testTooLongRegexInRegexpQuery() throws Exception { @@ -476,7 +472,7 @@ public void testTooLongRegexInRegexpQuery() throws Exception { } SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("idx").setQuery(QueryBuilders.regexpQuery("num", regexp.toString())).get() + prepareSearch("idx").setQuery(QueryBuilders.regexpQuery("num", regexp.toString())) ); assertThat( e.getRootCause().getMessage(), @@ -526,7 +522,7 @@ public void testStrictlyCountRequest() throws Exception { } private void assertWindowFails(SearchRequestBuilder search) { - SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, () -> search.get()); + SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, search); assertThat( e.toString(), containsString( @@ -539,7 +535,7 @@ private void assertWindowFails(SearchRequestBuilder search) { private void assertRescoreWindowFails(int windowSize) { SearchRequestBuilder search = prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(windowSize)); - SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, () -> search.get()); + SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, search); assertThat( e.toString(), containsString( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/slice/SearchSliceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/slice/SearchSliceIT.java index 93340bedbdae3..53bc8ba439c3e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/slice/SearchSliceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/slice/SearchSliceIT.java @@ -222,7 +222,7 @@ private void assertSearchSlicesWithPointInTime(String sliceField, String sortFie for (int id = 0; id < numSlice; id++) { int numSliceResults = 0; - SearchRequestBuilder request = prepareSearch("test").slice(new SliceBuilder(sliceField, id, numSlice)) + SearchRequestBuilder request = prepareSearch().slice(new SliceBuilder(sliceField, id, numSlice)) .setPointInTime(new PointInTimeBuilder(pointInTimeId)) .addSort(SortBuilders.fieldSort(sortField)) .setSize(randomIntBetween(10, 100)); @@ -261,10 +261,10 @@ public void testInvalidFields() throws Exception { setupIndex(0, 1); SearchPhaseExecutionException exc = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("test").setQuery(matchAllQuery()) + prepareSearch("test").setQuery(matchAllQuery()) .setScroll(new Scroll(TimeValue.timeValueSeconds(10))) .slice(new SliceBuilder("invalid_random_int", 0, 10)) - .get() + ); Throwable rootCause = findRootCause(exc); assertThat(rootCause.getClass(), equalTo(IllegalArgumentException.class)); @@ -272,10 +272,9 @@ public void testInvalidFields() throws Exception { exc = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("test").setQuery(matchAllQuery()) + prepareSearch("test").setQuery(matchAllQuery()) .setScroll(new Scroll(TimeValue.timeValueSeconds(10))) .slice(new SliceBuilder("invalid_random_kw", 0, 10)) - .get() ); rootCause = findRootCause(exc); assertThat(rootCause.getClass(), equalTo(IllegalArgumentException.class)); @@ -286,7 +285,7 @@ public void testInvalidQuery() throws Exception { setupIndex(0, 1); SearchPhaseExecutionException exc = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch().setQuery(matchAllQuery()).slice(new SliceBuilder("invalid_random_int", 0, 10)).get() + prepareSearch().setQuery(matchAllQuery()).slice(new SliceBuilder("invalid_random_int", 0, 10)) ); Throwable rootCause = findRootCause(exc); assertThat(rootCause.getClass(), equalTo(SearchException.class)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java index 21a784c16c8e9..7915637ca7ac5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java @@ -1654,7 +1654,7 @@ public void testNestedSort() throws IOException, InterruptedException, Execution { SearchPhaseExecutionException exc = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch().setQuery(matchAllQuery()) + prepareSearch().setQuery(matchAllQuery()) .addSort( SortBuilders.fieldSort("nested.bar.foo") .setNestedSort( @@ -1662,7 +1662,6 @@ public void testNestedSort() throws IOException, InterruptedException, Execution ) .order(SortOrder.DESC) ) - .get() ); assertThat(exc.toString(), containsString("max_children is only supported on top level of nested sort")); } @@ -1684,7 +1683,7 @@ public void testNestedSort() throws IOException, InterruptedException, Execution // missing nested path SearchPhaseExecutionException exc = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch().setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("nested.foo")).get() + prepareSearch().setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("nested.foo")) ); assertThat(exc.toString(), containsString("it is mandatory to set the [nested] context")); } @@ -2044,9 +2043,7 @@ public void testCastNumericTypeExceptions() throws Exception { for (String numericType : new String[] { "long", "double", "date", "date_nanos" }) { ElasticsearchException exc = expectThrows( ElasticsearchException.class, - () -> prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort(invalidField).setNumericType(numericType)) - .get() + prepareSearch().setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort(invalidField).setNumericType(numericType)) ); assertThat(exc.status(), equalTo(RestStatus.BAD_REQUEST)); assertThat(exc.getDetailedMessage(), containsString("[numeric_type] option cannot be set on a non-numeric field")); @@ -2123,7 +2120,7 @@ public void testSortMixedFieldTypes() { { // mixing long and double types is not allowed SearchPhaseExecutionException exc = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("index_long", "index_double").addSort(new FieldSortBuilder("foo")).setSize(10).get() + prepareSearch("index_long", "index_double").addSort(new FieldSortBuilder("foo")).setSize(10) ); assertThat(exc.getCause().toString(), containsString(errMsg)); } @@ -2131,7 +2128,7 @@ public void testSortMixedFieldTypes() { { // mixing long and keyword types is not allowed SearchPhaseExecutionException exc = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("index_long", "index_keyword").addSort(new FieldSortBuilder("foo")).setSize(10).get() + prepareSearch("index_long", "index_keyword").addSort(new FieldSortBuilder("foo")).setSize(10) ); assertThat(exc.getCause().toString(), containsString(errMsg)); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/source/MetadataFetchingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/source/MetadataFetchingIT.java index 4a10bf6cf8fab..bd0e7cef44d3f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/source/MetadataFetchingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/source/MetadataFetchingIT.java @@ -103,7 +103,7 @@ public void testInvalid() { { SearchPhaseExecutionException exc = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("test").setFetchSource(true).storedFields("_none_").get() + prepareSearch("test").setFetchSource(true).storedFields("_none_") ); Throwable rootCause = ExceptionsHelper.unwrap(exc, SearchException.class); assertNotNull(rootCause); @@ -113,7 +113,7 @@ public void testInvalid() { { SearchPhaseExecutionException exc = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("test").storedFields("_none_").addFetchField("field").get() + prepareSearch("test").storedFields("_none_").addFetchField("field") ); Throwable rootCause = ExceptionsHelper.unwrap(exc, SearchException.class); assertNotNull(rootCause); @@ -123,14 +123,14 @@ public void testInvalid() { { IllegalArgumentException exc = expectThrows( IllegalArgumentException.class, - () -> prepareSearch("test").storedFields("_none_", "field1").setVersion(true).get() + () -> prepareSearch("test").storedFields("_none_", "field1") ); assertThat(exc.getMessage(), equalTo("cannot combine _none_ with other fields")); } { IllegalArgumentException exc = expectThrows( IllegalArgumentException.class, - () -> prepareSearch("test").storedFields("_none_").storedFields("field1").setVersion(true).get() + () -> prepareSearch("test").storedFields("_none_").storedFields("field1") ); assertThat(exc.getMessage(), equalTo("cannot combine _none_ with other fields")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java index 9ca565cef7843..81659323e2471 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java @@ -425,7 +425,7 @@ public void testThatWeightMustBeAnInteger() throws Exception { Exception e = expectThrows( DocumentParsingException.class, - () -> prepareIndex(INDEX).setId("1") + prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -436,7 +436,6 @@ public void testThatWeightMustBeAnInteger() throws Exception { .endObject() .endObject() ) - .get() ); assertThat(e.getCause().getMessage(), equalTo("weight must be an integer, but was [2.5]")); } @@ -485,7 +484,7 @@ public void testThatWeightMustNotBeANonNumberString() throws Exception { Exception e = expectThrows( DocumentParsingException.class, - () -> prepareIndex(INDEX).setId("1") + prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -496,7 +495,6 @@ public void testThatWeightMustNotBeANonNumberString() throws Exception { .endObject() .endObject() ) - .get() ); assertThat(e.getCause().toString(), containsString("thisIsNotValid")); } @@ -508,7 +506,7 @@ public void testThatWeightAsStringMustBeInt() throws Exception { Exception e = expectThrows( DocumentParsingException.class, - () -> prepareIndex(INDEX).setId("1") + prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -519,7 +517,6 @@ public void testThatWeightAsStringMustBeInt() throws Exception { .endObject() .endObject() ) - .get() ); assertThat(e.getCause().toString(), containsString(weight)); } @@ -997,7 +994,7 @@ public void testThatSortingOnCompletionFieldReturnsUsefulException() throws Exce SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch(INDEX).addSort(new FieldSortBuilder(FIELD)).get() + prepareSearch(INDEX).addSort(new FieldSortBuilder(FIELD)) ); assertThat(e.status().getStatus(), is(400)); assertThat(e.toString(), containsString("Fielddata is not supported on field [" + FIELD + "] of type [completion]")); @@ -1336,7 +1333,7 @@ public void testReservedChars() throws IOException { String string = "foo" + (char) 0x00 + "bar"; Exception e = expectThrows( DocumentParsingException.class, - () -> prepareIndex(INDEX).setId("1") + prepareIndex(INDEX).setId("1") .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -1347,7 +1344,6 @@ public void testReservedChars() throws IOException { .endObject() .endObject() ) - .get() ); assertThat(e.getMessage(), containsString("failed to parse")); } @@ -1376,9 +1372,9 @@ public void testIssue5930() throws IOException { SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch(INDEX).addAggregation( + prepareSearch(INDEX).addAggregation( AggregationBuilders.terms("suggest_agg").field(FIELD).collectMode(randomFrom(SubAggCollectionMode.values())) - ).get() + ) ); assertThat(e.toString(), containsString("Fielddata is not supported on field [" + FIELD + "] of type [completion]")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/SuggestSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/SuggestSearchIT.java index bade23e193e75..a7cf141eb7669 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/SuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/SuggestSearchIT.java @@ -53,7 +53,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSuggestion; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSuggestionPhraseCollateMatchExists; @@ -313,12 +312,12 @@ public void testUnmappedField() throws IOException, InterruptedException, Execut { SearchRequestBuilder searchBuilder = prepareSearch().setSize(0); searchBuilder.suggest(new SuggestBuilder().setGlobalText("tetsting sugestion").addSuggestion("did_you_mean", phraseSuggestion)); - assertRequestBuilderThrows(searchBuilder, SearchPhaseExecutionException.class); + expectThrows(SearchPhaseExecutionException.class, searchBuilder); } { SearchRequestBuilder searchBuilder = prepareSearch().setSize(0); searchBuilder.suggest(new SuggestBuilder().setGlobalText("tetsting sugestion").addSuggestion("did_you_mean", phraseSuggestion)); - assertRequestBuilderThrows(searchBuilder, SearchPhaseExecutionException.class); + expectThrows(SearchPhaseExecutionException.class, searchBuilder); } } @@ -838,7 +837,7 @@ public void testShardFailures() throws IOException, InterruptedException { new SuggestBuilder().setGlobalText("tetsting sugestion") .addSuggestion("did_you_mean", phraseSuggestion("fielddoesnotexist").maxErrors(5.0f)) ); - assertRequestBuilderThrows(request, SearchPhaseExecutionException.class); + expectThrows(SearchPhaseExecutionException.class, request); // When searching on a shard which does not hold yet any document of an existing type, we should not fail assertNoFailuresAndResponse( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CloneSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CloneSnapshotIT.java index 085fca2462984..ca06dcea88766 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CloneSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CloneSnapshotIT.java @@ -165,7 +165,7 @@ public void testClonePreventsSnapshotDelete() throws Exception { ConcurrentSnapshotExecutionException ex = expectThrows( ConcurrentSnapshotExecutionException.class, - () -> startDeleteSnapshot(repoName, sourceSnapshot).actionGet() + startDeleteSnapshot(repoName, sourceSnapshot) ); assertThat(ex.getMessage(), containsString("cannot delete snapshot while it is being cloned")); @@ -286,7 +286,7 @@ public void testDeletePreventsClone() throws Exception { ConcurrentSnapshotExecutionException ex = expectThrows( ConcurrentSnapshotExecutionException.class, - () -> startClone(repoName, sourceSnapshot, targetSnapshot, indexName).actionGet() + startClone(repoName, sourceSnapshot, targetSnapshot, indexName) ); assertThat(ex.getMessage(), containsString("cannot clone from snapshot that is being deleted")); @@ -401,10 +401,7 @@ public void testFailsOnCloneMissingIndices() { final String snapshotName = "snapshot"; createFullSnapshot(repoName, snapshotName); - expectThrows( - IndexNotFoundException.class, - () -> startClone(repoName, snapshotName, "target-snapshot", "does-not-exist").actionGet() - ); + expectThrows(IndexNotFoundException.class, startClone(repoName, snapshotName, "target-snapshot", "does-not-exist")); } public void testMasterFailoverDuringCloneStep2() throws Exception { @@ -426,7 +423,7 @@ public void testMasterFailoverDuringCloneStep2() throws Exception { final String masterNode = internalCluster().getMasterName(); waitForBlock(masterNode, repoName); internalCluster().restartNode(masterNode); - expectThrows(SnapshotException.class, cloneFuture::actionGet); + expectThrows(SnapshotException.class, cloneFuture); awaitNoMoreRunningOperations(); assertAllSnapshotsSuccessful(getRepositoryData(repoName), 2); @@ -451,7 +448,7 @@ public void testExceptionDuringShardClone() throws Exception { final String masterNode = internalCluster().getMasterName(); waitForBlock(masterNode, repoName); unblockNode(repoName, masterNode); - expectThrows(SnapshotException.class, cloneFuture::actionGet); + expectThrows(SnapshotException.class, cloneFuture); awaitNoMoreRunningOperations(); assertAllSnapshotsSuccessful(getRepositoryData(repoName), 1); assertAcked(startDeleteSnapshot(repoName, sourceSnapshot).get()); @@ -480,9 +477,7 @@ public void testDoesNotStartOnBrokenSourceSnapshot() throws Exception { final SnapshotException sne = expectThrows( SnapshotException.class, - () -> startClone(masterClient, repoName, sourceSnapshot, "target-snapshot", testIndex).actionGet( - TimeValue.timeValueSeconds(30L) - ) + startClone(masterClient, repoName, sourceSnapshot, "target-snapshot", testIndex) ); assertThat( sne.getMessage(), @@ -542,9 +537,7 @@ public void testSnapshotQueuedAfterCloneFromBrokenSourceSnapshot() throws Except ); final SnapshotException sne = expectThrows( SnapshotException.class, - () -> startClone(masterClient, repoName, sourceSnapshot, "target-snapshot", testIndex).actionGet( - TimeValue.timeValueSeconds(30L) - ) + startClone(masterClient, repoName, sourceSnapshot, "target-snapshot", testIndex) ); assertThat( sne.getMessage(), @@ -744,7 +737,7 @@ public void testRemoveFailedCloneFromCSWithoutIO() throws Exception { awaitNumberOfSnapshotsInProgress(1); waitForBlock(masterNode, repoName); unblockNode(repoName, masterNode); - expectThrows(SnapshotException.class, cloneFuture::actionGet); + expectThrows(SnapshotException.class, cloneFuture); awaitNoMoreRunningOperations(); assertAllSnapshotsSuccessful(getRepositoryData(repoName), 1); assertAcked(startDeleteSnapshot(repoName, sourceSnapshot).get()); @@ -787,7 +780,7 @@ public void testRemoveFailedCloneFromCSWithQueuedSnapshotInProgress() throws Exc waitForBlock(masterNode, repoName); unblockNode(repoName, masterNode); final ActionFuture fullSnapshotFuture2 = startFullSnapshot(repoName, "full-snapshot-2"); - expectThrows(SnapshotException.class, cloneFuture::actionGet); + expectThrows(SnapshotException.class, cloneFuture); unblockNode(repoName, dataNode); awaitNoMoreRunningOperations(); assertSuccessful(fullSnapshotFuture1); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java index df59ab18bef72..8d2e15f5027d5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java @@ -143,11 +143,7 @@ public void testRecreateCorruptedRepositoryDuringSnapshotsFails() throws Excepti logger.info("--> trying to create another snapshot in order for repository to be marked as corrupt"); final SnapshotException snapshotException = expectThrows( SnapshotException.class, - () -> clusterAdmin().prepareCreateSnapshot(repoName, "fast-snapshot2") - .setIndices(indexFast) - .setWaitForCompletion(true) - .execute() - .actionGet() + clusterAdmin().prepareCreateSnapshot(repoName, "fast-snapshot2").setIndices(indexFast).setWaitForCompletion(true) ); assertThat(snapshotException.getMessage(), containsString("failed to update snapshot in repository")); assertEquals(RepositoryData.CORRUPTED_REPO_GEN, getRepositoryMetadata(repoName).generation()); @@ -243,7 +239,7 @@ public void testBlockedRepoDoesNotBlockOtherRepos() throws Exception { clusterAdmin().prepareCreateSnapshot(otherRepoName, "snapshot").setIndices("does-not-exist-*").setWaitForCompletion(false).get(); unblockNode(blockedRepoName, internalCluster().getMasterName()); - expectThrows(SnapshotException.class, createSlowFuture::actionGet); + expectThrows(SnapshotException.class, createSlowFuture); assertBusy(() -> assertThat(currentSnapshots(otherRepoName), empty()), 30L, TimeUnit.SECONDS); } @@ -337,7 +333,7 @@ public void testSnapshotRunsAfterInProgressDelete() throws Exception { final ActionFuture snapshotFuture = startFullSnapshot(repoName, "second-snapshot"); unblockNode(repoName, masterNode); - final UncategorizedExecutionException ex = expectThrows(UncategorizedExecutionException.class, deleteFuture::actionGet); + final UncategorizedExecutionException ex = expectThrows(UncategorizedExecutionException.class, deleteFuture); assertThat(ex.getRootCause(), instanceOf(IOException.class)); assertSuccessful(snapshotFuture); @@ -542,7 +538,7 @@ public void testMasterFailOverWithQueuedDeletes() throws Exception { assertThat(sme.getSnapshotName(), is(firstSnapshot)); } } - expectThrows(SnapshotException.class, snapshotThreeFuture::actionGet); + expectThrows(SnapshotException.class, snapshotThreeFuture); logger.info("--> verify that all snapshots are gone and no more work is left in the cluster state"); awaitNoMoreRunningOperations(); @@ -597,12 +593,12 @@ public void testQueuedDeletesWithFailures() throws Exception { awaitNDeletionsInProgress(2); unblockNode(repoName, masterNode); - expectThrows(UncategorizedExecutionException.class, firstDeleteFuture::actionGet); + expectThrows(UncategorizedExecutionException.class, firstDeleteFuture); // Second delete works out cleanly since the repo is unblocked now assertThat(secondDeleteFuture.get().isAcknowledged(), is(true)); // Snapshot should have been aborted - final SnapshotException snapshotException = expectThrows(SnapshotException.class, snapshotFuture::actionGet); + final SnapshotException snapshotException = expectThrows(SnapshotException.class, snapshotFuture); assertThat(snapshotException.getMessage(), containsString(SnapshotsInProgress.ABORTED_FAILURE_TEXT)); assertThat(clusterAdmin().prepareGetSnapshots(repoName).get().getSnapshots(), empty()); @@ -629,7 +625,7 @@ public void testQueuedDeletesWithOverlap() throws Exception { // Second delete works out cleanly since the repo is unblocked now assertThat(secondDeleteFuture.get().isAcknowledged(), is(true)); // Snapshot should have been aborted - final SnapshotException snapshotException = expectThrows(SnapshotException.class, snapshotFuture::actionGet); + final SnapshotException snapshotException = expectThrows(SnapshotException.class, snapshotFuture); assertThat(snapshotException.getMessage(), containsString(SnapshotsInProgress.ABORTED_FAILURE_TEXT)); assertThat(clusterAdmin().prepareGetSnapshots(repoName).get().getSnapshots(), empty()); @@ -696,7 +692,7 @@ public void testQueuedOperationsOnMasterDisconnect() throws Exception { logger.info("--> make sure all failing requests get a response"); assertAcked(firstDeleteFuture.get()); assertAcked(secondDeleteFuture.get()); - expectThrows(SnapshotException.class, createThirdSnapshot::actionGet); + expectThrows(SnapshotException.class, createThirdSnapshot); awaitNoMoreRunningOperations(); } @@ -737,8 +733,8 @@ public void testQueuedOperationsOnMasterDisconnectAndRepoFailure() throws Except networkDisruption.stopDisrupting(); logger.info("--> make sure all failing requests get a response"); - expectThrows(SnapshotException.class, firstFailedSnapshotFuture::actionGet); - expectThrows(SnapshotException.class, secondFailedSnapshotFuture::actionGet); + expectThrows(SnapshotException.class, firstFailedSnapshotFuture); + expectThrows(SnapshotException.class, secondFailedSnapshotFuture); assertAcked(deleteFuture.get()); awaitNoMoreRunningOperations(); @@ -808,8 +804,8 @@ public void testQueuedSnapshotOperationsAndBrokenRepoOnMasterFailOver() throws E ensureStableCluster(3); awaitNoMoreRunningOperations(); - expectThrows(ElasticsearchException.class, snapshotThree::actionGet); - expectThrows(ElasticsearchException.class, snapshotFour::actionGet); + expectThrows(ElasticsearchException.class, snapshotThree); + expectThrows(ElasticsearchException.class, snapshotFour); } public void testQueuedSnapshotOperationsAndBrokenRepoOnMasterFailOver2() throws Exception { @@ -841,8 +837,8 @@ public void testQueuedSnapshotOperationsAndBrokenRepoOnMasterFailOver2() throws unblockNode(repoName, masterNode); networkDisruption.stopDisrupting(); awaitNoMoreRunningOperations(); - expectThrows(ElasticsearchException.class, snapshotThree::actionGet); - expectThrows(ElasticsearchException.class, snapshotFour::actionGet); + expectThrows(ElasticsearchException.class, snapshotThree); + expectThrows(ElasticsearchException.class, snapshotFour); } public void testQueuedSnapshotOperationsAndBrokenRepoOnMasterFailOverMultipleRepos() throws Exception { @@ -885,8 +881,8 @@ public void testQueuedSnapshotOperationsAndBrokenRepoOnMasterFailOverMultipleRep ensureStableCluster(3); awaitNoMoreRunningOperations(); - expectThrows(ElasticsearchException.class, snapshotThree::actionGet); - expectThrows(ElasticsearchException.class, snapshotFour::actionGet); + expectThrows(ElasticsearchException.class, snapshotThree); + expectThrows(ElasticsearchException.class, snapshotFour); assertAcked(deleteFuture.get()); try { createBlockedSnapshot.actionGet(); @@ -1031,7 +1027,7 @@ public void testQueuedOperationsAfterFinalizationFailure() throws Exception { unblockNode(repoName, masterName); - expectThrows(SnapshotException.class, snapshotThree::actionGet); + expectThrows(SnapshotException.class, snapshotThree); assertAcked(deleteSnapshotOne.get()); } @@ -1317,7 +1313,7 @@ public void testConcurrentOperationsLimit() throws Exception { final ConcurrentSnapshotExecutionException cse = expectThrows( ConcurrentSnapshotExecutionException.class, - () -> clusterAdmin().prepareCreateSnapshot(repoName, "expected-to-fail").get() + clusterAdmin().prepareCreateSnapshot(repoName, "expected-to-fail") ); assertThat( cse.getMessage(), @@ -1407,7 +1403,7 @@ public void testQueuedDeleteAfterFinalizationFailure() throws Exception { awaitNDeletionsInProgress(1); unblockNode(repoName, masterNode); assertAcked(deleteFuture.get()); - final SnapshotException sne = expectThrows(SnapshotException.class, snapshotFuture::actionGet); + final SnapshotException sne = expectThrows(SnapshotException.class, snapshotFuture); assertThat(sne.getCause().getMessage(), containsString("exception after block")); } @@ -1430,7 +1426,7 @@ public void testAbortNotStartedSnapshotWithoutIO() throws Exception { awaitNumberOfSnapshotsInProgress(2); assertAcked(startDeleteSnapshot(repoName, snapshotTwo).get()); - final SnapshotException sne = expectThrows(SnapshotException.class, createSnapshot2Future::actionGet); + final SnapshotException sne = expectThrows(SnapshotException.class, createSnapshot2Future); assertFalse(createSnapshot1Future.isDone()); unblockNode(repoName, dataNode); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java index e7bc6f13383d1..f507e27c6073e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java @@ -7,6 +7,7 @@ */ package org.elasticsearch.snapshots; +import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; @@ -48,7 +49,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileExists; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -92,7 +92,7 @@ public void testRecreateCorruptedRepositoryUnblocksIt() throws Exception { startDeleteSnapshot(repoName, snapshot).get(); logger.info("--> make sure snapshot doesn't exist"); - expectThrows(SnapshotMissingException.class, () -> clusterAdmin().prepareGetSnapshots(repoName).addSnapshots(snapshot).get()); + expectThrows(SnapshotMissingException.class, clusterAdmin().prepareGetSnapshots(repoName).addSnapshots(snapshot)); } public void testConcurrentlyChangeRepositoryContents() throws Exception { @@ -162,10 +162,7 @@ public void testConcurrentlyChangeRepositoryContents() throws Exception { startDeleteSnapshot(repoName, snapshot).get(); logger.info("--> make sure snapshot doesn't exist"); - expectThrows( - SnapshotMissingException.class, - () -> client.admin().cluster().prepareGetSnapshots(repoName).addSnapshots(snapshot).get() - ); + expectThrows(SnapshotMissingException.class, client.admin().cluster().prepareGetSnapshots(repoName).addSnapshots(snapshot)); } public void testFindDanglingLatestGeneration() throws Exception { @@ -232,7 +229,7 @@ public void testFindDanglingLatestGeneration() throws Exception { assertThat(getRepositoryData(repoName).getGenId(), is(beforeMoveGen + 2)); logger.info("--> make sure snapshot doesn't exist"); - expectThrows(SnapshotMissingException.class, () -> clusterAdmin().prepareGetSnapshots(repoName).addSnapshots(snapshot).get()); + expectThrows(SnapshotMissingException.class, clusterAdmin().prepareGetSnapshots(repoName).addSnapshots(snapshot)); } public void testHandlingMissingRootLevelSnapshotMetadata() throws Exception { @@ -578,10 +575,7 @@ public void testDeleteSnapshotWithMissingIndexAndShardMetadata() throws Exceptio logger.info("--> make sure snapshot doesn't exist"); - expectThrows( - SnapshotMissingException.class, - () -> client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1").get() - ); + expectThrows(SnapshotMissingException.class, client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1")); for (String index : indices) { assertTrue(Files.notExists(indicesPath.resolve(indexIds.get(index).getId()))); @@ -618,10 +612,7 @@ public void testDeleteSnapshotWithMissingMetadata() throws Exception { startDeleteSnapshot("test-repo", "test-snap-1").get(); logger.info("--> make sure snapshot doesn't exist"); - expectThrows( - SnapshotMissingException.class, - () -> client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1").get() - ); + expectThrows(SnapshotMissingException.class, client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1")); } public void testDeleteSnapshotWithCorruptedSnapshotFile() throws Exception { @@ -662,10 +653,7 @@ public void testDeleteSnapshotWithCorruptedSnapshotFile() throws Exception { startDeleteSnapshot("test-repo", "test-snap-1").get(); logger.info("--> make sure snapshot doesn't exist"); - expectThrows( - SnapshotMissingException.class, - () -> client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1").get().getSnapshots() - ); + expectThrows(SnapshotMissingException.class, client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1")); logger.info("--> make sure that we can create the snapshot again"); createSnapshotResponse = client.admin() @@ -723,11 +711,9 @@ public void testDeleteSnapshotWithCorruptedGlobalState() throws Exception { assertThat(snapshotStatusResponse.getSnapshots().get(0).getSnapshot().getSnapshotId().getName(), equalTo("test-snap")); assertAcked(startDeleteSnapshot("test-repo", "test-snap").get()); - expectThrows(SnapshotMissingException.class, () -> clusterAdmin().prepareGetSnapshots("test-repo").addSnapshots("test-snap").get()); - assertRequestBuilderThrows( - clusterAdmin().prepareSnapshotStatus("test-repo").addSnapshots("test-snap"), - SnapshotMissingException.class - ); + expectThrows(SnapshotMissingException.class, clusterAdmin().prepareGetSnapshots("test-repo").addSnapshots("test-snap")); + ActionRequestBuilder builder = clusterAdmin().prepareSnapshotStatus("test-repo").addSnapshots("test-snap"); + expectThrows(SnapshotMissingException.class, builder); createFullSnapshot("test-repo", "test-snap"); } @@ -797,14 +783,14 @@ private void assertRepositoryBlocked(String repo, String existingSnapshot) { logger.info("--> try to delete snapshot"); final RepositoryException ex = expectThrows( RepositoryException.class, - () -> clusterAdmin().prepareDeleteSnapshot(repo, existingSnapshot).get() + clusterAdmin().prepareDeleteSnapshot(repo, existingSnapshot) ); assertThat(ex.getMessage(), containsString("concurrent modification of the index-N file")); logger.info("--> try to create snapshot"); final RepositoryException ex2 = expectThrows( RepositoryException.class, - () -> clusterAdmin().prepareCreateSnapshot(repo, existingSnapshot).get() + clusterAdmin().prepareCreateSnapshot(repo, existingSnapshot) ); assertThat(ex2.getMessage(), containsString("The repository has been disabled to prevent data corruption")); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java index 3a72ab792f571..ef8ae3cf1cffb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.snapshots; import org.elasticsearch.TransportVersion; +import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -30,7 +31,6 @@ import java.util.function.Function; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; @@ -65,7 +65,8 @@ public void testShouldNotRestoreRepositoryMetadata() { .get(); logger.info("make sure old repository wasn't restored"); - assertRequestBuilderThrows(clusterAdmin().prepareGetRepositories("test-repo-1"), RepositoryMissingException.class); + ActionRequestBuilder builder = clusterAdmin().prepareGetRepositories("test-repo-1"); + expectThrows(RepositoryMissingException.class, builder); assertThat(clusterAdmin().prepareGetRepositories("test-repo-2").get().repositories().size(), equalTo(1)); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 089f6c09806cd..6ca3fccd1e292 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -196,7 +196,7 @@ public void testSnapshotWithStuckNode() throws Exception { } logger.info("--> making sure that snapshot no longer exists"); - expectThrows(SnapshotMissingException.class, () -> clusterAdmin().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get()); + expectThrows(SnapshotMissingException.class, clusterAdmin().prepareGetSnapshots("test-repo").setSnapshots("test-snap")); logger.info("--> trigger repository cleanup"); clusterAdmin().prepareCleanupRepository("test-repo").get(); @@ -263,10 +263,9 @@ public void testRestoreIndexWithMissingShards() throws Exception { logger.info("--> start snapshot with default settings without a closed index - should fail"); final SnapshotException sne = expectThrows( SnapshotException.class, - () -> clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-1") + clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-1") .setIndices("test-idx-all", "test-idx-none", "test-idx-some", "test-idx-closed") .setWaitForCompletion(true) - .get() ); assertThat(sne.getMessage(), containsString("Indices don't have primary shards")); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/FeatureStateResetApiIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/FeatureStateResetApiIT.java index 1f86d4cb39ea4..c3dbfd03cae38 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/FeatureStateResetApiIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/FeatureStateResetApiIT.java @@ -83,20 +83,16 @@ public void testResetSystemIndices() throws Exception { ); // verify that both indices are gone - Exception e1 = expectThrows(IndexNotFoundException.class, () -> indicesAdmin().prepareGetIndex().addIndices(systemIndex1).get()); - + Exception e1 = expectThrows(IndexNotFoundException.class, indicesAdmin().prepareGetIndex().addIndices(systemIndex1)); assertThat(e1.getMessage(), containsString("no such index")); - Exception e2 = expectThrows(IndexNotFoundException.class, () -> indicesAdmin().prepareGetIndex().addIndices(associatedIndex).get()); - + Exception e2 = expectThrows(IndexNotFoundException.class, indicesAdmin().prepareGetIndex().addIndices(associatedIndex)); assertThat(e2.getMessage(), containsString("no such index")); - Exception e3 = expectThrows(IndexNotFoundException.class, () -> indicesAdmin().prepareGetIndex().addIndices(systemIndex2).get()); - + Exception e3 = expectThrows(IndexNotFoundException.class, indicesAdmin().prepareGetIndex().addIndices(systemIndex2)); assertThat(e3.getMessage(), containsString("no such index")); GetIndexResponse response = indicesAdmin().prepareGetIndex().addIndices("my_index").get(); - assertThat(response.getIndices(), arrayContaining("my_index")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java index a5fe09c68f862..6b5b3826272ce 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java @@ -226,21 +226,17 @@ public void testPaginationRequiresVerboseListing() throws Exception { createNSnapshots(repoName, randomIntBetween(1, 5)); expectThrows( ActionRequestValidationException.class, - () -> clusterAdmin().prepareGetSnapshots(repoName) + clusterAdmin().prepareGetSnapshots(repoName) .setVerbose(false) .setSort(GetSnapshotsRequest.SortBy.DURATION) .setSize(GetSnapshotsRequest.NO_LIMIT) - .execute() - .actionGet() ); expectThrows( ActionRequestValidationException.class, - () -> clusterAdmin().prepareGetSnapshots(repoName) + clusterAdmin().prepareGetSnapshots(repoName) .setVerbose(false) .setSort(GetSnapshotsRequest.SortBy.START_TIME) .setSize(randomIntBetween(1, 100)) - .execute() - .actionGet() ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MultiClusterRepoAccessIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MultiClusterRepoAccessIT.java index e6bae861e1d04..8fc6e9e2aa3d8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MultiClusterRepoAccessIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MultiClusterRepoAccessIT.java @@ -127,7 +127,7 @@ public void testConcurrentDeleteFromOtherCluster() throws InterruptedException { final SnapshotException sne = expectThrows( SnapshotException.class, - () -> clusterAdmin().prepareCreateSnapshot(repoNameOnFirstCluster, "snap-4").setWaitForCompletion(true).get() + clusterAdmin().prepareCreateSnapshot(repoNameOnFirstCluster, "snap-4").setWaitForCompletion(true) ); assertThat(sne.getMessage(), containsString("failed to update snapshot in repository")); final RepositoryException cause = (RepositoryException) sne.getCause(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java index a6c8e0b08c9ed..6d36ce6924826 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java @@ -9,6 +9,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse; import org.elasticsearch.action.admin.cluster.snapshots.delete.TransportDeleteSnapshotAction; @@ -48,7 +49,6 @@ import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.READONLY_SETTING_KEY; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; @@ -204,22 +204,27 @@ public void testRepositoryVerification() { Settings settings = Settings.builder().put("location", randomRepoPath()).put("random_control_io_exception_rate", 1.0).build(); Settings readonlySettings = Settings.builder().put(settings).put(READONLY_SETTING_KEY, true).build(); logger.info("--> creating repository that cannot write any files - should fail"); - assertRequestBuilderThrows( - client.admin().cluster().preparePutRepository("test-repo-1").setType("mock").setSettings(settings), - RepositoryVerificationException.class - ); + ActionRequestBuilder builder3 = client.admin() + .cluster() + .preparePutRepository("test-repo-1") + .setType("mock") + .setSettings(settings); + expectThrows(RepositoryVerificationException.class, builder3); logger.info("--> creating read-only repository that cannot read any files - should fail"); - assertRequestBuilderThrows( - client.admin().cluster().preparePutRepository("test-repo-2").setType("mock").setSettings(readonlySettings), - RepositoryVerificationException.class - ); + ActionRequestBuilder builder2 = client.admin() + .cluster() + .preparePutRepository("test-repo-2") + .setType("mock") + .setSettings(readonlySettings); + expectThrows(RepositoryVerificationException.class, builder2); logger.info("--> creating repository that cannot write any files, but suppress verification - should be acked"); assertAcked(client.admin().cluster().preparePutRepository("test-repo-1").setType("mock").setSettings(settings).setVerify(false)); logger.info("--> verifying repository"); - assertRequestBuilderThrows(client.admin().cluster().prepareVerifyRepository("test-repo-1"), RepositoryVerificationException.class); + ActionRequestBuilder builder1 = client.admin().cluster().prepareVerifyRepository("test-repo-1"); + expectThrows(RepositoryVerificationException.class, builder1); logger.info("--> creating read-only repository that cannot read any files, but suppress verification - should be acked"); assertAcked( @@ -227,7 +232,8 @@ public void testRepositoryVerification() { ); logger.info("--> verifying repository"); - assertRequestBuilderThrows(client.admin().cluster().prepareVerifyRepository("test-repo-2"), RepositoryVerificationException.class); + ActionRequestBuilder builder = client.admin().cluster().prepareVerifyRepository("test-repo-2"); + expectThrows(RepositoryVerificationException.class, builder); Path location = randomRepoPath(); @@ -286,20 +292,14 @@ public void testRepositoryConflict() throws Exception { ); logger.info("--> try deleting the repository, should fail because the deletion of the snapshot is in progress"); - RepositoryConflictException e1 = expectThrows( - RepositoryConflictException.class, - () -> clusterAdmin().prepareDeleteRepository(repo).get() - ); + RepositoryConflictException e1 = expectThrows(RepositoryConflictException.class, clusterAdmin().prepareDeleteRepository(repo)); assertThat(e1.status(), equalTo(RestStatus.CONFLICT)); assertThat(e1.getMessage(), containsString("trying to modify or unregister repository that is currently used")); logger.info("--> try updating the repository, should fail because the deletion of the snapshot is in progress"); RepositoryConflictException e2 = expectThrows( RepositoryConflictException.class, - () -> clusterAdmin().preparePutRepository(repo) - .setType("mock") - .setSettings(Settings.builder().put("location", randomRepoPath())) - .get() + clusterAdmin().preparePutRepository(repo).setType("mock").setSettings(Settings.builder().put("location", randomRepoPath())) ); assertThat(e2.status(), equalTo(RestStatus.CONFLICT)); assertThat(e2.getMessage(), containsString("trying to modify or unregister repository that is currently used")); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryThrottlingStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryThrottlingStatsIT.java index 0f0858982b4ad..d8bc9327a2edd 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryThrottlingStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryThrottlingStatsIT.java @@ -37,7 +37,7 @@ public void testRepositoryThrottlingStats() throws Exception { IndexStats indexStats = indicesStats.getIndex("test-idx"); long totalSizeInBytes = 0; for (ShardStats shard : indexStats.getShards()) { - totalSizeInBytes += shard.getStats().getStore().getSizeInBytes(); + totalSizeInBytes += shard.getStats().getStore().sizeInBytes(); } logger.info("--> total shards size: {} bytes", totalSizeInBytes); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java index 20313767c0677..6c452103cc014 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java @@ -12,6 +12,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; @@ -55,7 +56,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertIndexTemplateExists; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertIndexTemplateMissing; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -694,30 +694,26 @@ public void testChangeSettingsOnRestore() throws Exception { .build(); logger.info("--> try restoring while changing the number of shards - should fail"); - assertRequestBuilderThrows( - client.admin() - .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") - .setIgnoreIndexSettings("index.analysis.*") - .setIndexSettings(newIncorrectIndexSettings) - .setWaitForCompletion(true), - SnapshotRestoreException.class - ); + ActionRequestBuilder builder1 = client.admin() + .cluster() + .prepareRestoreSnapshot("test-repo", "test-snap") + .setIgnoreIndexSettings("index.analysis.*") + .setIndexSettings(newIncorrectIndexSettings) + .setWaitForCompletion(true); + expectThrows(SnapshotRestoreException.class, builder1); logger.info("--> try restoring while changing the number of replicas to a negative number - should fail"); Settings newIncorrectReplicasIndexSettings = Settings.builder() .put(newIndexSettings) .put(SETTING_NUMBER_OF_REPLICAS.substring(IndexMetadata.INDEX_SETTING_PREFIX.length()), randomIntBetween(-10, -1)) .build(); - assertRequestBuilderThrows( - client.admin() - .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") - .setIgnoreIndexSettings("index.analysis.*") - .setIndexSettings(newIncorrectReplicasIndexSettings) - .setWaitForCompletion(true), - IllegalArgumentException.class - ); + ActionRequestBuilder builder = client.admin() + .cluster() + .prepareRestoreSnapshot("test-repo", "test-snap") + .setIgnoreIndexSettings("index.analysis.*") + .setIndexSettings(newIncorrectReplicasIndexSettings) + .setWaitForCompletion(true); + expectThrows(IllegalArgumentException.class, builder); logger.info("--> restore index with correct settings from the snapshot"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin() @@ -872,11 +868,10 @@ public void testForbidDisableSoftDeletesDuringRestore() throws Exception { createSnapshot("test-repo", "snapshot-0", Collections.singletonList("test-index")); final SnapshotRestoreException restoreError = expectThrows( SnapshotRestoreException.class, - () -> clusterAdmin().prepareRestoreSnapshot("test-repo", "snapshot-0") + clusterAdmin().prepareRestoreSnapshot("test-repo", "snapshot-0") .setIndexSettings(Settings.builder().put(INDEX_SOFT_DELETES_SETTING.getKey(), false)) .setRenamePattern("test-index") .setRenameReplacement("new-index") - .get() ); assertThat(restoreError.getMessage(), containsString("cannot disable setting [index.soft_deletes.enabled] on restore")); } @@ -889,7 +884,7 @@ public void testFailOnAncientVersion() throws Exception { final String oldSnapshot = initWithSnapshotVersion(repoName, repoPath, oldVersion); final SnapshotRestoreException snapshotRestoreException = expectThrows( SnapshotRestoreException.class, - () -> clusterAdmin().prepareRestoreSnapshot(repoName, oldSnapshot).get() + clusterAdmin().prepareRestoreSnapshot(repoName, oldSnapshot) ); assertThat( snapshotRestoreException.getMessage(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index abc2bf8fb7219..ed070c3224aa2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -767,7 +767,7 @@ public void testUnallocatedShards() { logger.info("--> snapshot"); final SnapshotException sne = expectThrows( SnapshotException.class, - () -> clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get() + clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx") ); assertThat(sne.getMessage(), containsString("Indices don't have primary shards")); assertThat(getRepositoryData("test-repo"), is(RepositoryData.EMPTY)); @@ -1180,7 +1180,7 @@ public void testSnapshotStatus() throws Exception { // test that getting an unavailable snapshot status throws an exception if ignoreUnavailable is false on the request SnapshotMissingException ex = expectThrows( SnapshotMissingException.class, - () -> client.admin().cluster().prepareSnapshotStatus("test-repo").addSnapshots("test-snap-doesnt-exist").get() + client.admin().cluster().prepareSnapshotStatus("test-repo").addSnapshots("test-snap-doesnt-exist") ); assertEquals("[test-repo:test-snap-doesnt-exist] is missing", ex.getMessage()); // test that getting an unavailable snapshot status does not throw an exception if ignoreUnavailable is true on the request @@ -1453,7 +1453,7 @@ public void testDeleteSnapshotWhileRestoringFails() throws Exception { logger.info("--> try deleting the snapshot while the restore is in progress (should throw an error)"); ConcurrentSnapshotExecutionException e = expectThrows( ConcurrentSnapshotExecutionException.class, - () -> clusterAdmin().prepareDeleteSnapshot(repoName, snapshotName).get() + clusterAdmin().prepareDeleteSnapshot(repoName, snapshotName) ); assertEquals(repoName, e.getRepositoryName()); assertEquals(snapshotName, e.getSnapshotName()); @@ -1483,16 +1483,10 @@ public void testSnapshotName() throws Exception { createRepository("test-repo", "fs"); - expectThrows(InvalidSnapshotNameException.class, () -> client.admin().cluster().prepareCreateSnapshot("test-repo", "_foo").get()); - expectThrows( - SnapshotMissingException.class, - () -> client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("_foo").get() - ); - expectThrows(SnapshotMissingException.class, () -> client.admin().cluster().prepareDeleteSnapshot("test-repo", "_foo").get()); - expectThrows( - SnapshotMissingException.class, - () -> client.admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("_foo").get() - ); + expectThrows(InvalidSnapshotNameException.class, client.admin().cluster().prepareCreateSnapshot("test-repo", "_foo")); + expectThrows(SnapshotMissingException.class, client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("_foo")); + expectThrows(SnapshotMissingException.class, client.admin().cluster().prepareDeleteSnapshot("test-repo", "_foo")); + expectThrows(SnapshotMissingException.class, client.admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("_foo")); } public void testListCorruptedSnapshot() throws Exception { @@ -1538,7 +1532,7 @@ public void testListCorruptedSnapshot() throws Exception { final SnapshotException ex = expectThrows( SnapshotException.class, - () -> client.admin().cluster().prepareGetSnapshots("test-repo").setIgnoreUnavailable(false).get() + client.admin().cluster().prepareGetSnapshots("test-repo").setIgnoreUnavailable(false) ); assertThat(ex.getRepositoryName(), equalTo("test-repo")); assertThat(ex.getSnapshotName(), equalTo("test-snap-2")); @@ -1580,7 +1574,7 @@ public void testRestoreSnapshotWithCorruptedGlobalState() throws Exception { SnapshotException ex = expectThrows( SnapshotException.class, - () -> clusterAdmin().prepareRestoreSnapshot(repoName, snapshotName).setRestoreGlobalState(true).setWaitForCompletion(true).get() + clusterAdmin().prepareRestoreSnapshot(repoName, snapshotName).setRestoreGlobalState(true).setWaitForCompletion(true) ); assertThat(ex.getRepositoryName(), equalTo(repoName)); assertThat(ex.getSnapshotName(), equalTo(snapshotName)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotBrokenSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotBrokenSettingsIT.java index df2cf31e37470..c31eafa8444ad 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotBrokenSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotBrokenSettingsIT.java @@ -70,12 +70,7 @@ public void testExceptionWhenRestoringPersistentSettings() { logger.info("--> restore snapshot"); final IllegalArgumentException ex = expectThrows( IllegalArgumentException.class, - client.admin() - .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") - .setRestoreGlobalState(true) - .setWaitForCompletion(true) - .execute()::actionGet + client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setRestoreGlobalState(true).setWaitForCompletion(true) ); assertEquals(BrokenSettingPlugin.EXCEPTION.getMessage(), ex.getMessage()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java new file mode 100644 index 0000000000000..922fd2ce4cb21 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java @@ -0,0 +1,540 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.snapshots; + +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.admin.cluster.configuration.AddVotingConfigExclusionsRequest; +import org.elasticsearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsRequest; +import org.elasticsearch.action.admin.cluster.configuration.TransportAddVotingConfigExclusionsAction; +import org.elasticsearch.action.admin.cluster.configuration.TransportClearVotingConfigExclusionsAction; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; +import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; +import org.elasticsearch.action.support.ActionTestUtils; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.SubscribableListener; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.SnapshotDeletionsInProgress; +import org.elasticsearch.cluster.SnapshotsInProgress; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.NodesShutdownMetadata; +import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Priority; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ClusterServiceUtils; +import org.elasticsearch.test.transport.MockTransportService; + +import java.util.Collection; +import java.util.Map; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.stream.Stream; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.oneOf; + +public class SnapshotShutdownIT extends AbstractSnapshotIntegTestCase { + + private static final String REQUIRE_NODE_NAME_SETTING = IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_PREFIX + "._name"; + + @Override + protected Collection> nodePlugins() { + return CollectionUtils.appendToCopy(super.nodePlugins(), MockTransportService.TestPlugin.class); + } + + public void testRestartNodeDuringSnapshot() throws Exception { + // Marking a node for restart has no impact on snapshots (see #71333 for how to handle this case) + internalCluster().ensureAtLeastNumDataNodes(1); + final var originalNode = internalCluster().startDataOnlyNode(); + final var indexName = randomIdentifier(); + createIndexWithContent(indexName, indexSettings(1, 0).put(REQUIRE_NODE_NAME_SETTING, originalNode).build()); + + final var repoName = randomIdentifier(); + createRepository(repoName, "mock"); + + final var clusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); + final var snapshotFuture = startFullSnapshotBlockedOnDataNode(randomIdentifier(), repoName, originalNode); + final var snapshotCompletesWithoutPausingListener = ClusterServiceUtils.addTemporaryStateListener(clusterService, state -> { + final var entriesForRepo = SnapshotsInProgress.get(state).forRepo(repoName); + if (entriesForRepo.isEmpty()) { + return true; + } + assertThat(entriesForRepo, hasSize(1)); + final var shardSnapshotStatuses = entriesForRepo.iterator().next().shards().values(); + assertThat(shardSnapshotStatuses, hasSize(1)); + assertThat( + shardSnapshotStatuses.iterator().next().state(), + oneOf(SnapshotsInProgress.ShardState.INIT, SnapshotsInProgress.ShardState.SUCCESS) + ); + return false; + }); + addUnassignedShardsWatcher(clusterService, indexName); + + PlainActionFuture.get( + fut -> putShutdownMetadata( + clusterService, + SingleNodeShutdownMetadata.builder() + .setType(SingleNodeShutdownMetadata.Type.RESTART) + .setStartedAtMillis(clusterService.threadPool().absoluteTimeInMillis()) + .setReason("test"), + originalNode, + fut + ), + 10, + TimeUnit.SECONDS + ); + assertFalse(snapshotCompletesWithoutPausingListener.isDone()); + unblockAllDataNodes(repoName); // lets the shard snapshot continue so the snapshot can succeed + assertEquals(SnapshotState.SUCCESS, snapshotFuture.get(10, TimeUnit.SECONDS).getSnapshotInfo().state()); + safeAwait(snapshotCompletesWithoutPausingListener); + clearShutdownMetadata(clusterService); + } + + public void testRemoveNodeDuringSnapshot() throws Exception { + internalCluster().ensureAtLeastNumDataNodes(1); + final var originalNode = internalCluster().startDataOnlyNode(); + final var indexName = randomIdentifier(); + createIndexWithContent(indexName, indexSettings(1, 0).put(REQUIRE_NODE_NAME_SETTING, originalNode).build()); + + final var repoName = randomIdentifier(); + createRepository(repoName, "mock"); + + final var clusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); + final var snapshotFuture = startFullSnapshotBlockedOnDataNode(randomIdentifier(), repoName, originalNode); + final var snapshotPausedListener = createSnapshotPausedListener(clusterService, repoName, indexName); + addUnassignedShardsWatcher(clusterService, indexName); + + updateIndexSettings(Settings.builder().putNull(REQUIRE_NODE_NAME_SETTING), indexName); + putShutdownForRemovalMetadata(originalNode, clusterService); + unblockAllDataNodes(repoName); // lets the shard snapshot abort, which frees up the shard so it can move + safeAwait(snapshotPausedListener); + + // snapshot completes when the node vacates even though it hasn't been removed yet + assertEquals(SnapshotState.SUCCESS, snapshotFuture.get(10, TimeUnit.SECONDS).getSnapshotInfo().state()); + + if (randomBoolean()) { + internalCluster().stopNode(originalNode); + } + + clearShutdownMetadata(clusterService); + } + + public void testRemoveNodeAndFailoverMasterDuringSnapshot() throws Exception { + internalCluster().ensureAtLeastNumDataNodes(1); + final var originalNode = internalCluster().startDataOnlyNode(); + final var indexName = randomIdentifier(); + createIndexWithContent(indexName, indexSettings(1, 0).put(REQUIRE_NODE_NAME_SETTING, originalNode).build()); + + final var repoName = randomIdentifier(); + createRepository(repoName, "mock"); + + final var clusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); + final var snapshotFuture = startFullSnapshotBlockedOnDataNode(randomIdentifier(), repoName, originalNode); + final var snapshotPausedListener = createSnapshotPausedListener(clusterService, repoName, indexName); + addUnassignedShardsWatcher(clusterService, indexName); + + final var snapshotStatusUpdateBarrier = new CyclicBarrier(2); + final var masterName = internalCluster().getMasterName(); + final var masterTransportService = MockTransportService.getInstance(masterName); + masterTransportService.addRequestHandlingBehavior( + SnapshotsService.UPDATE_SNAPSHOT_STATUS_ACTION_NAME, + (handler, request, channel, task) -> masterTransportService.getThreadPool().generic().execute(() -> { + safeAwait(snapshotStatusUpdateBarrier); + safeAwait(snapshotStatusUpdateBarrier); + try { + handler.messageReceived(request, channel, task); + } catch (Exception e) { + fail(e); + } + }) + ); + + updateIndexSettings(Settings.builder().putNull(REQUIRE_NODE_NAME_SETTING), indexName); + putShutdownForRemovalMetadata(originalNode, clusterService); + unblockAllDataNodes(repoName); // lets the shard snapshot abort, which frees up the shard so it can move + safeAwait(snapshotStatusUpdateBarrier); // wait for the data node to finish and then try and update the master + masterTransportService.clearAllRules(); // the shard might migrate to the old master, so let it process more updates + + if (internalCluster().numMasterNodes() == 1) { + internalCluster().startMasterOnlyNode(); + } + safeAwait( + SubscribableListener.newForked( + l -> client().execute( + TransportAddVotingConfigExclusionsAction.TYPE, + new AddVotingConfigExclusionsRequest(Strings.EMPTY_ARRAY, new String[] { masterName }, TimeValue.timeValueSeconds(10)), + l + ) + ) + ); + safeAwait( + ClusterServiceUtils.addTemporaryStateListener( + clusterService, + s -> s.nodes().getMasterNode() != null && s.nodes().getMasterNode().getName().equals(masterName) == false + ) + ); + + logger.info("--> new master elected, releasing blocked request"); + safeAwait(snapshotStatusUpdateBarrier); // let the old master try and update the state + logger.info("--> waiting for snapshot pause"); + safeAwait(snapshotPausedListener); + logger.info("--> snapshot was paused"); + + // snapshot API fails on master failover + assertThat( + asInstanceOf( + SnapshotException.class, + ExceptionsHelper.unwrapCause( + expectThrows(ExecutionException.class, RuntimeException.class, () -> snapshotFuture.get(10, TimeUnit.SECONDS)) + ) + ).getMessage(), + containsString("no longer master") + ); + + // but the snapshot itself completes + safeAwait(ClusterServiceUtils.addTemporaryStateListener(clusterService, state -> SnapshotsInProgress.get(state).isEmpty())); + + // flush master queue to ensure the completion is applied everywhere + safeAwait( + SubscribableListener.newForked( + l -> client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).execute(l) + ) + ); + + // and succeeds + final var snapshots = safeAwait( + SubscribableListener.newForked( + l -> client().admin().cluster().getSnapshots(new GetSnapshotsRequest(repoName), l) + ) + ).getSnapshots(); + assertThat(snapshots, hasSize(1)); + assertEquals(SnapshotState.SUCCESS, snapshots.get(0).state()); + + if (randomBoolean()) { + internalCluster().stopNode(originalNode); + } + + safeAwait(SubscribableListener.newForked(l -> { + final var clearVotingConfigExclusionsRequest = new ClearVotingConfigExclusionsRequest(); + clearVotingConfigExclusionsRequest.setWaitForRemoval(false); + client().execute(TransportClearVotingConfigExclusionsAction.TYPE, clearVotingConfigExclusionsRequest, l); + })); + + clearShutdownMetadata(internalCluster().getCurrentMasterNodeInstance(ClusterService.class)); + } + + public void testRemoveNodeDuringSnapshotWithOtherRunningShardSnapshots() throws Exception { + // SnapshotInProgressAllocationDecider only considers snapshots having shards in INIT state, so a single-shard snapshot such as the + // one in testRemoveNodeDuringSnapshot will be ignored when the shard is paused, permitting the shard movement. This test verifies + // that the shard is permitted to move even when the snapshot has other shards in INIT state. + + final var repoName = randomIdentifier(); + createRepository(repoName, "mock"); + + // create another index on another node which will be blocked (remain in state INIT) throughout + final var otherNode = internalCluster().startDataOnlyNode(); + final var otherIndex = randomIdentifier(); + createIndexWithContent(otherIndex, indexSettings(1, 0).put(REQUIRE_NODE_NAME_SETTING, otherNode).build()); + blockDataNode(repoName, otherNode); + + final var nodeForRemoval = internalCluster().startDataOnlyNode(); + final var indexName = randomIdentifier(); + createIndexWithContent(indexName, indexSettings(1, 0).put(REQUIRE_NODE_NAME_SETTING, nodeForRemoval).build()); + + final var clusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); + final var snapshotFuture = startFullSnapshotBlockedOnDataNode(randomIdentifier(), repoName, nodeForRemoval); + final var snapshotPausedListener = createSnapshotPausedListener(clusterService, repoName, indexName); + addUnassignedShardsWatcher(clusterService, indexName); + + waitForBlock(otherNode, repoName); + + putShutdownForRemovalMetadata(nodeForRemoval, clusterService); + unblockNode(repoName, nodeForRemoval); // lets the shard snapshot abort, which frees up the shard to move + safeAwait(snapshotPausedListener); + + // adjust the allocation filter so that the shard moves + updateIndexSettings(Settings.builder().putNull(REQUIRE_NODE_NAME_SETTING), indexName); + + // wait for the target shard snapshot to succeed + safeAwait( + ClusterServiceUtils.addTemporaryStateListener( + clusterService, + state -> SnapshotsInProgress.get(state) + .asStream() + .allMatch( + e -> e.shards() + .entrySet() + .stream() + .anyMatch( + shardEntry -> shardEntry.getKey().getIndexName().equals(indexName) + && switch (shardEntry.getValue().state()) { + case INIT, PAUSED_FOR_NODE_REMOVAL -> false; + case SUCCESS -> true; + case FAILED, ABORTED, MISSING, QUEUED, WAITING -> throw new AssertionError(shardEntry.toString()); + } + ) + ) + ) + ); + + unblockAllDataNodes(repoName); + + // snapshot completes when the node vacates even though it hasn't been removed yet + assertEquals(SnapshotState.SUCCESS, snapshotFuture.get(10, TimeUnit.SECONDS).getSnapshotInfo().state()); + + if (randomBoolean()) { + internalCluster().stopNode(nodeForRemoval); + } + + clearShutdownMetadata(clusterService); + } + + public void testStartRemoveNodeButDoNotComplete() throws Exception { + final var primaryNode = internalCluster().startDataOnlyNode(); + final var indexName = randomIdentifier(); + createIndexWithContent(indexName, indexSettings(1, 0).put(REQUIRE_NODE_NAME_SETTING, primaryNode).build()); + + final var repoName = randomIdentifier(); + createRepository(repoName, "mock"); + + final var clusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); + final var snapshotFuture = startFullSnapshotBlockedOnDataNode(randomIdentifier(), repoName, primaryNode); + final var snapshotPausedListener = createSnapshotPausedListener(clusterService, repoName, indexName); + addUnassignedShardsWatcher(clusterService, indexName); + + putShutdownForRemovalMetadata(primaryNode, clusterService); + unblockAllDataNodes(repoName); // lets the shard snapshot abort, but allocation filtering stops it from moving + safeAwait(snapshotPausedListener); + assertFalse(snapshotFuture.isDone()); + + // give up on the node shutdown so the shard snapshot can restart + clearShutdownMetadata(clusterService); + + assertEquals(SnapshotState.SUCCESS, snapshotFuture.get(10, TimeUnit.SECONDS).getSnapshotInfo().state()); + } + + public void testAbortSnapshotWhileRemovingNode() throws Exception { + final var primaryNode = internalCluster().startDataOnlyNode(); + final var indexName = randomIdentifier(); + createIndexWithContent(indexName, indexSettings(1, 0).put(REQUIRE_NODE_NAME_SETTING, primaryNode).build()); + + final var repoName = randomIdentifier(); + createRepository(repoName, "mock"); + + final var snapshotName = randomIdentifier(); + final var snapshotFuture = startFullSnapshotBlockedOnDataNode(snapshotName, repoName, primaryNode); + + final var updateSnapshotStatusBarrier = new CyclicBarrier(2); + final var masterTransportService = MockTransportService.getInstance(internalCluster().getMasterName()); + masterTransportService.addRequestHandlingBehavior( + SnapshotsService.UPDATE_SNAPSHOT_STATUS_ACTION_NAME, + (handler, request, channel, task) -> masterTransportService.getThreadPool().generic().execute(() -> { + safeAwait(updateSnapshotStatusBarrier); + safeAwait(updateSnapshotStatusBarrier); + try { + handler.messageReceived(request, channel, task); + } catch (Exception e) { + fail(e); + } + }) + ); + + final var clusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); + addUnassignedShardsWatcher(clusterService, indexName); + putShutdownForRemovalMetadata(primaryNode, clusterService); + unblockAllDataNodes(repoName); // lets the shard snapshot abort, but allocation filtering stops it from moving + safeAwait(updateSnapshotStatusBarrier); // wait for data node to notify master that the shard snapshot is paused + + // abort snapshot (and wait for the abort to land in the cluster state) + final var deleteStartedListener = ClusterServiceUtils.addTemporaryStateListener(clusterService, state -> { + if (SnapshotDeletionsInProgress.get(state).getEntries().isEmpty()) { + return false; + } + + assertEquals(SnapshotsInProgress.State.ABORTED, SnapshotsInProgress.get(state).forRepo(repoName).get(0).state()); + return true; + }); + + final var deleteSnapshotFuture = startDeleteSnapshot(repoName, snapshotName); // abort the snapshot + safeAwait(deleteStartedListener); + + safeAwait(updateSnapshotStatusBarrier); // process pause notification now that the snapshot is ABORTED + + assertEquals(SnapshotState.FAILED, snapshotFuture.get(10, TimeUnit.SECONDS).getSnapshotInfo().state()); + assertTrue(deleteSnapshotFuture.get(10, TimeUnit.SECONDS).isAcknowledged()); + + clearShutdownMetadata(clusterService); + } + + public void testShutdownWhileSuccessInFlight() throws Exception { + final var primaryNode = internalCluster().startDataOnlyNode(); + final var indexName = randomIdentifier(); + createIndexWithContent(indexName, indexSettings(1, 0).put(REQUIRE_NODE_NAME_SETTING, primaryNode).build()); + + final var repoName = randomIdentifier(); + createRepository(repoName, "mock"); + + final var clusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); + final var masterTransportService = MockTransportService.getInstance(internalCluster().getMasterName()); + masterTransportService.addRequestHandlingBehavior( + SnapshotsService.UPDATE_SNAPSHOT_STATUS_ACTION_NAME, + (handler, request, channel, task) -> putShutdownForRemovalMetadata( + clusterService, + primaryNode, + ActionTestUtils.assertNoFailureListener(ignored -> handler.messageReceived(request, channel, task)) + ) + ); + + addUnassignedShardsWatcher(clusterService, indexName); + assertEquals( + SnapshotState.SUCCESS, + startFullSnapshot(repoName, randomIdentifier()).get(10, TimeUnit.SECONDS).getSnapshotInfo().state() + ); + clearShutdownMetadata(clusterService); + } + + private static SubscribableListener createSnapshotPausedListener( + ClusterService clusterService, + String repoName, + String indexName + ) { + return ClusterServiceUtils.addTemporaryStateListener(clusterService, state -> { + final var entriesForRepo = SnapshotsInProgress.get(state).forRepo(repoName); + assertThat(entriesForRepo, hasSize(1)); + final var shardSnapshotStatuses = entriesForRepo.iterator() + .next() + .shards() + .entrySet() + .stream() + .flatMap(e -> e.getKey().getIndexName().equals(indexName) ? Stream.of(e.getValue()) : Stream.of()) + .toList(); + assertThat(shardSnapshotStatuses, hasSize(1)); + final var shardState = shardSnapshotStatuses.iterator().next().state(); + assertThat(shardState, oneOf(SnapshotsInProgress.ShardState.INIT, SnapshotsInProgress.ShardState.PAUSED_FOR_NODE_REMOVAL)); + return shardState == SnapshotsInProgress.ShardState.PAUSED_FOR_NODE_REMOVAL; + }); + } + + private static void addUnassignedShardsWatcher(ClusterService clusterService, String indexName) { + ClusterServiceUtils.addTemporaryStateListener(clusterService, state -> { + final var indexRoutingTable = state.routingTable().index(indexName); + if (indexRoutingTable == null) { + // index was deleted, can remove this listener now + return true; + } + assertThat(indexRoutingTable.shardsWithState(ShardRoutingState.UNASSIGNED), empty()); + return false; + }); + } + + private static void putShutdownForRemovalMetadata(String nodeName, ClusterService clusterService) { + PlainActionFuture.get( + fut -> putShutdownForRemovalMetadata(clusterService, nodeName, fut), + 10, + TimeUnit.SECONDS + ); + } + + private static void flushMasterQueue(ClusterService clusterService, ActionListener listener) { + clusterService.submitUnbatchedStateUpdateTask("flush queue", new ClusterStateUpdateTask(Priority.LANGUID) { + @Override + public ClusterState execute(ClusterState currentState) { + return currentState; + } + + @Override + public void onFailure(Exception e) { + fail(e); + } + + @Override + public void clusterStateProcessed(ClusterState initialState, ClusterState newState) { + listener.onResponse(null); + } + }); + } + + private static void putShutdownForRemovalMetadata(ClusterService clusterService, String nodeName, ActionListener listener) { + // not testing REPLACE just because it requires us to specify the replacement node + final var shutdownType = randomFrom(SingleNodeShutdownMetadata.Type.REMOVE, SingleNodeShutdownMetadata.Type.SIGTERM); + final var shutdownMetadata = SingleNodeShutdownMetadata.builder() + .setType(shutdownType) + .setStartedAtMillis(clusterService.threadPool().absoluteTimeInMillis()) + .setReason("test"); + switch (shutdownType) { + case SIGTERM -> shutdownMetadata.setGracePeriod(TimeValue.timeValueSeconds(60)); + } + SubscribableListener + + .newForked(l -> putShutdownMetadata(clusterService, shutdownMetadata, nodeName, l)) + .andThen((l, ignored) -> flushMasterQueue(clusterService, l)) + .addListener(listener); + } + + private static void putShutdownMetadata( + ClusterService clusterService, + SingleNodeShutdownMetadata.Builder shutdownMetadataBuilder, + String nodeName, + ActionListener listener + ) { + clusterService.submitUnbatchedStateUpdateTask("mark node for removal", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + final var nodeId = currentState.nodes().resolveNode(nodeName).getId(); + return currentState.copyAndUpdateMetadata( + mdb -> mdb.putCustom( + NodesShutdownMetadata.TYPE, + new NodesShutdownMetadata(Map.of(nodeId, shutdownMetadataBuilder.setNodeId(nodeId).build())) + ) + ); + } + + @Override + public void onFailure(Exception e) { + fail(e); + } + + @Override + public void clusterStateProcessed(ClusterState initialState, ClusterState newState) { + listener.onResponse(null); + } + }); + } + + private static void clearShutdownMetadata(ClusterService clusterService) { + PlainActionFuture.get(fut -> clusterService.submitUnbatchedStateUpdateTask("remove restart marker", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + return currentState.copyAndUpdateMetadata(mdb -> mdb.putCustom(NodesShutdownMetadata.TYPE, NodesShutdownMetadata.EMPTY)); + } + + @Override + public void onFailure(Exception e) { + fail(e); + } + + @Override + public void clusterStateProcessed(ClusterState initialState, ClusterState newState) { + fut.onResponse(null); + } + }), 10, TimeUnit.SECONDS); + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java index cc7c7709075c0..5bd59c712caf0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java @@ -138,7 +138,7 @@ public void testExceptionOnMissingSnapBlob() throws IOException { logger.info("--> delete snap-${uuid}.dat file for this snapshot to simulate concurrent delete"); IOUtils.rm(repoPath.resolve(BlobStoreRepository.SNAPSHOT_PREFIX + snapshotInfo.snapshotId().getUUID() + ".dat")); - expectThrows(SnapshotMissingException.class, () -> clusterAdmin().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get()); + expectThrows(SnapshotMissingException.class, clusterAdmin().prepareGetSnapshots("test-repo").setSnapshots("test-snap")); } public void testExceptionOnMissingShardLevelSnapBlob() throws IOException { @@ -167,10 +167,7 @@ public void testExceptionOnMissingShardLevelSnapBlob() throws IOException { .resolve(BlobStoreRepository.SNAPSHOT_PREFIX + snapshotInfo.snapshotId().getUUID() + ".dat") ); - expectThrows( - SnapshotMissingException.class, - () -> clusterAdmin().prepareSnapshotStatus("test-repo").setSnapshots("test-snap").get() - ); + expectThrows(SnapshotMissingException.class, clusterAdmin().prepareSnapshotStatus("test-repo").setSnapshots("test-snap")); } public void testGetSnapshotsWithoutIndices() throws Exception { @@ -456,7 +453,7 @@ public void testGetSnapshotsWithSnapshotInProgress() throws Exception { expectThrows( SnapshotMissingException.class, - () -> clusterAdmin().prepareGetSnapshots("test-repo").setSnapshots(notExistedSnapshotName).setIgnoreUnavailable(false).get() + clusterAdmin().prepareGetSnapshots("test-repo").setSnapshots(notExistedSnapshotName).setIgnoreUnavailable(false) ); logger.info("--> unblock all data nodes"); @@ -499,7 +496,7 @@ public void testGetSnapshotsRequest() throws Exception { logger.info("--> get snapshots on an empty repository"); expectThrows( SnapshotMissingException.class, - () -> client.admin().cluster().prepareGetSnapshots(repositoryName).addSnapshots("non-existent-snapshot").get() + client.admin().cluster().prepareGetSnapshots(repositoryName).addSnapshots("non-existent-snapshot") ); // with ignore unavailable set to true, should not throw an exception GetSnapshotsResponse getSnapshotsResponse = client.admin() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java index 7eaa49b27007d..f70b86fd4fba2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.snapshots; +import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.tests.util.LuceneTestCase; @@ -16,7 +17,6 @@ import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequestBuilder; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.action.admin.cluster.node.hotthreads.NodeHotThreads; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequestBuilder; import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequestBuilder; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequestBuilder; @@ -24,16 +24,20 @@ import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.support.ListenableActionFuture; import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.NodesShutdownMetadata; +import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AbstractRunnable; @@ -46,6 +50,7 @@ import org.elasticsearch.core.Releasables; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; +import org.elasticsearch.monitor.jvm.HotThreads; import org.elasticsearch.repositories.RepositoryCleanupResult; import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.test.InternalTestCluster; @@ -309,6 +314,10 @@ public void run() throws InterruptedException { startCleaner(); } + if (randomBoolean()) { + startNodeShutdownMarker(); + } + if (completedSnapshotLatch.await(30, TimeUnit.SECONDS)) { logger.info("--> completed target snapshot count, finishing test"); } else { @@ -371,16 +380,11 @@ private void acquirePermitsAtEnd( "--> current cluster state:\n{}", Strings.toString(clusterAdmin().prepareState().get().getState(), true, true) ); - logger.info( - "--> hot threads:\n{}", - clusterAdmin().prepareNodesHotThreads() - .setThreads(99999) - .setIgnoreIdleThreads(false) - .get() - .getNodes() - .stream() - .map(NodeHotThreads::getHotThreads) - .collect(Collectors.joining("\n")) + HotThreads.logLocalHotThreads( + logger, + Level.INFO, + "hot threads while failing to acquire permit [" + label + "]", + ReferenceDocs.LOGGING ); failedPermitAcquisitions.add(label); } @@ -1166,6 +1170,104 @@ private void startNodeRestarter() { }); } + private void startNodeShutdownMarker() { + enqueueAction(() -> { + boolean rerun = true; + if (usually()) { + return; + } + try (TransferableReleasables localReleasables = new TransferableReleasables()) { + if (localReleasables.add(blockFullClusterRestart()) == null) { + return; + } + + final var node = randomFrom(shuffledNodes); + + if (localReleasables.add(tryAcquirePermit(node.permits)) == null) { + return; + } + + final var clusterService = cluster.getCurrentMasterNodeInstance(ClusterService.class); + + SubscribableListener + + .newForked( + l -> clusterService.submitUnbatchedStateUpdateTask( + "mark [" + node + "] for removal", + new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + assertTrue( + Strings.toString(currentState), + currentState.metadata().nodeShutdowns().getAll().isEmpty() + ); + final var nodeId = currentState.nodes().resolveNode(node.nodeName).getId(); + return currentState.copyAndUpdateMetadata( + mdb -> mdb.putCustom( + NodesShutdownMetadata.TYPE, + new NodesShutdownMetadata( + Map.of( + nodeId, + SingleNodeShutdownMetadata.builder() + .setNodeId(nodeId) + .setType(SingleNodeShutdownMetadata.Type.REMOVE) + .setStartedAtMillis(clusterService.threadPool().absoluteTimeInMillis()) + .setReason("test") + .build() + ) + ) + ) + ); + } + + @Override + public void onFailure(Exception e) { + l.onFailure(e); + } + + @Override + public void clusterStateProcessed(ClusterState initialState, ClusterState newState) { + l.onResponse(null); + } + } + ) + ) + + .andThen( + (l, ignored) -> clusterService.submitUnbatchedStateUpdateTask( + "unmark [" + node + "] for removal", + new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + return currentState.copyAndUpdateMetadata( + mdb -> mdb.putCustom(NodesShutdownMetadata.TYPE, NodesShutdownMetadata.EMPTY) + ); + } + + @Override + public void onFailure(Exception e) { + l.onFailure(e); + } + + @Override + public void clusterStateProcessed(ClusterState initialState, ClusterState newState) { + l.onResponse(null); + } + } + ) + ) + + .addListener(mustSucceed(ignored -> startNodeShutdownMarker())); + + rerun = false; + } finally { + if (rerun) { + startNodeShutdownMarker(); + } + } + }); + } + @Nullable // if we couldn't block node restarts private Releasable blockNodeRestarts() { try (TransferableReleasables localReleasables = new TransferableReleasables()) { @@ -1486,7 +1588,7 @@ private static class TrackedSnapshot { private final TrackedCluster.TrackedRepository trackedRepository; private final String snapshotName; private final Semaphore permits = new Semaphore(Integer.MAX_VALUE); - private final AtomicReference> snapshotInfoFutureRef = new AtomicReference<>(); + private final AtomicReference> snapshotInfoFutureRef = new AtomicReference<>(); TrackedSnapshot(TrackedCluster.TrackedRepository trackedRepository, String snapshotName) { this.trackedRepository = trackedRepository; @@ -1525,7 +1627,7 @@ Releasable tryAcquireAllPermits() { } void getSnapshotInfo(Client client, ActionListener listener) { - final ListenableActionFuture newFuture = new ListenableActionFuture<>(); + final SubscribableListener newFuture = new SubscribableListener<>(); final boolean firstRunner = snapshotInfoFutureRef.compareAndSet(null, newFuture); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceIT.java index c7933d7065ec2..7ee993915ae24 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceIT.java @@ -68,7 +68,7 @@ public void testDeletingSnapshotsIsLoggedAfterClusterStateIsProcessed() throws E final SnapshotMissingException e = expectThrows( SnapshotMissingException.class, - () -> startDeleteSnapshot("test-repo", "does-not-exist").actionGet() + startDeleteSnapshot("test-repo", "does-not-exist") ); assertThat(e.getMessage(), containsString("[test-repo:does-not-exist] is missing")); assertThat(startDeleteSnapshot("test-repo", "test-snapshot").actionGet().isAcknowledged(), is(true)); @@ -106,7 +106,7 @@ public void testSnapshotDeletionFailureShouldBeLogged() throws Exception { // Failure when listing root blobs final MockRepository mockRepository = getRepositoryOnMaster("test-repo"); mockRepository.setRandomControlIOExceptionRate(1.0); - final Exception e = expectThrows(Exception.class, () -> startDeleteSnapshot("test-repo", "test-snapshot").actionGet()); + final Exception e = expectThrows(Exception.class, startDeleteSnapshot("test-repo", "test-snapshot")); assertThat(e.getCause().getMessage(), containsString("Random IOException")); } else { // Failure when finalizing on index-N file @@ -115,7 +115,7 @@ public void testSnapshotDeletionFailureShouldBeLogged() throws Exception { deleteFuture = startDeleteSnapshot("test-repo", "test-snapshot"); waitForBlock(internalCluster().getMasterName(), "test-repo"); unblockNode("test-repo", internalCluster().getMasterName()); - final Exception e = expectThrows(Exception.class, deleteFuture::actionGet); + final Exception e = expectThrows(Exception.class, deleteFuture); assertThat(e.getCause().getMessage(), containsString("exception after block")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SystemIndicesSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SystemIndicesSnapshotIT.java index aef0c2324f167..058d5af7d9c85 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SystemIndicesSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SystemIndicesSnapshotIT.java @@ -312,10 +312,9 @@ public void testRestoreFeatureNotInSnapshot() { final String fakeFeatureStateName = "NonExistentTestPlugin"; SnapshotRestoreException exception = expectThrows( SnapshotRestoreException.class, - () -> clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") + clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") .setWaitForCompletion(true) .setFeatureStates("SystemIndexTestPlugin", fakeFeatureStateName) - .get() ); assertThat( @@ -332,11 +331,10 @@ public void testSnapshottingSystemIndexByNameIsRejected() throws Exception { IllegalArgumentException error = expectThrows( IllegalArgumentException.class, - () -> clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") + clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") .setIndices(SystemIndexTestPlugin.SYSTEM_INDEX_NAME) .setWaitForCompletion(true) .setIncludeGlobalState(randomBoolean()) - .get() ); assertThat( error.getMessage(), @@ -376,10 +374,9 @@ public void testRestoringSystemIndexByNameIsRejected() throws IllegalAccessExcep IllegalArgumentException ex = expectThrows( IllegalArgumentException.class, - () -> clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") + clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") .setWaitForCompletion(true) .setIndices(SystemIndexTestPlugin.SYSTEM_INDEX_NAME) - .get() ); assertThat( ex.getMessage(), @@ -611,11 +608,10 @@ public void testNoneFeatureStateMustBeAlone() { // run a snapshot including global state IllegalArgumentException createEx = expectThrows( IllegalArgumentException.class, - () -> clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") + clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") .setWaitForCompletion(true) .setIncludeGlobalState(randomBoolean()) .setFeatureStates("SystemIndexTestPlugin", "none", "AnotherSystemIndexTestPlugin") - .get() ); assertThat( createEx.getMessage(), @@ -634,11 +630,10 @@ public void testNoneFeatureStateMustBeAlone() { SnapshotRestoreException restoreEx = expectThrows( SnapshotRestoreException.class, - () -> clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") + clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") .setWaitForCompletion(true) .setRestoreGlobalState(randomBoolean()) .setFeatureStates("SystemIndexTestPlugin", "none") - .get() ); assertThat( restoreEx.getMessage(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/timeseries/support/TimeSeriesDimensionsLimitIT.java b/server/src/internalClusterTest/java/org/elasticsearch/timeseries/support/TimeSeriesDimensionsLimitIT.java index a62560588bdb2..dc512cdb92cc1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/timeseries/support/TimeSeriesDimensionsLimitIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/timeseries/support/TimeSeriesDimensionsLimitIT.java @@ -46,7 +46,7 @@ public void testDimensionFieldNameLimit() throws IOException { ); final Exception ex = expectThrows( DocumentParsingException.class, - () -> prepareIndex("test").setSource( + prepareIndex("test").setSource( "routing_field", randomAlphaOfLength(10), dimensionFieldName, @@ -55,7 +55,7 @@ public void testDimensionFieldNameLimit() throws IOException { randomIntBetween(10, 20), "@timestamp", Instant.now().toEpochMilli() - ).get() + ) ); assertThat( ex.getCause().getMessage(), @@ -78,14 +78,14 @@ public void testDimensionFieldValueLimit() throws IOException { .get(); final Exception ex = expectThrows( DocumentParsingException.class, - () -> prepareIndex("test").setSource( + prepareIndex("test").setSource( "field", randomAlphaOfLength(1025), "gauge", randomIntBetween(10, 20), "@timestamp", startTime + 1 - ).get() + ) ); assertThat(ex.getCause().getMessage(), equalTo("Dimension fields must be less than [1024] bytes but was [1025].")); } @@ -169,7 +169,7 @@ public void testTotalDimensionFieldsSizeLuceneLimitPlusOne() throws IOException for (int i = 0; i < dimensionFieldLimit; i++) { source.put(dimensionFieldNames.get(i), randomAlphaOfLength(1024)); } - final Exception ex = expectThrows(DocumentParsingException.class, () -> prepareIndex("test").setSource(source).get()); + final Exception ex = expectThrows(DocumentParsingException.class, prepareIndex("test").setSource(source)); assertEquals("_tsid longer than [32766] bytes [33903].", ex.getCause().getMessage()); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/update/UpdateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/update/UpdateIT.java index 813ff8b4227bc..f6ff9c4f21f1b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/update/UpdateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/update/UpdateIT.java @@ -297,7 +297,7 @@ public void testUpdate() throws Exception { Script fieldIncScript = new Script(ScriptType.INLINE, UPDATE_SCRIPTS, FIELD_INC_SCRIPT, Collections.singletonMap("field", "field")); DocumentMissingException ex = expectThrows( DocumentMissingException.class, - () -> client().prepareUpdate(indexOrAlias(), "1").setScript(fieldIncScript).get() + client().prepareUpdate(indexOrAlias(), "1").setScript(fieldIncScript) ); assertEquals("[1]: document missing", ex.getMessage()); @@ -438,29 +438,26 @@ public void testUpdateWithIfSeqNo() throws Exception { DocWriteResponse result = prepareIndex("test").setId("1").setSource("field", 1).get(); expectThrows( VersionConflictEngineException.class, - () -> client().prepareUpdate(indexOrAlias(), "1") + client().prepareUpdate(indexOrAlias(), "1") .setDoc(XContentFactory.jsonBuilder().startObject().field("field", 2).endObject()) .setIfSeqNo(result.getSeqNo() + 1) .setIfPrimaryTerm(result.getPrimaryTerm()) - .get() ); expectThrows( VersionConflictEngineException.class, - () -> client().prepareUpdate(indexOrAlias(), "1") + client().prepareUpdate(indexOrAlias(), "1") .setDoc(XContentFactory.jsonBuilder().startObject().field("field", 2).endObject()) .setIfSeqNo(result.getSeqNo()) .setIfPrimaryTerm(result.getPrimaryTerm() + 1) - .get() ); expectThrows( VersionConflictEngineException.class, - () -> client().prepareUpdate(indexOrAlias(), "1") + client().prepareUpdate(indexOrAlias(), "1") .setDoc(XContentFactory.jsonBuilder().startObject().field("field", 2).endObject()) .setIfSeqNo(result.getSeqNo() + 1) .setIfPrimaryTerm(result.getPrimaryTerm() + 1) - .get() ); UpdateResponse updateResponse = client().prepareUpdate(indexOrAlias(), "1") diff --git a/server/src/internalClusterTest/java/org/elasticsearch/versioning/SimpleVersioningIT.java b/server/src/internalClusterTest/java/org/elasticsearch/versioning/SimpleVersioningIT.java index e7877dd862ded..819e14c176975 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/versioning/SimpleVersioningIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/versioning/SimpleVersioningIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.versioning; import org.apache.lucene.tests.util.TestUtil; +import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; @@ -34,7 +35,6 @@ import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFutureThrows; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -88,10 +88,11 @@ public void testExternalGTE() throws Exception { .get(); assertThat(indexResponse.getVersion(), equalTo(14L)); - assertRequestBuilderThrows( - prepareIndex("test").setId("1").setSource("field1", "value1_1").setVersion(13).setVersionType(VersionType.EXTERNAL_GTE), - VersionConflictEngineException.class - ); + ActionRequestBuilder builder1 = prepareIndex("test").setId("1") + .setSource("field1", "value1_1") + .setVersion(13) + .setVersionType(VersionType.EXTERNAL_GTE); + expectThrows(VersionConflictEngineException.class, builder1); client().admin().indices().prepareRefresh().get(); if (randomBoolean()) { @@ -102,10 +103,8 @@ public void testExternalGTE() throws Exception { } // deleting with a lower version fails. - assertRequestBuilderThrows( - client().prepareDelete("test", "1").setVersion(2).setVersionType(VersionType.EXTERNAL_GTE), - VersionConflictEngineException.class - ); + ActionRequestBuilder builder = client().prepareDelete("test", "1").setVersion(2).setVersionType(VersionType.EXTERNAL_GTE); + expectThrows(VersionConflictEngineException.class, builder); // Delete with a higher or equal version deletes all versions up to the given one. long v = randomIntBetween(14, 17); @@ -260,18 +259,12 @@ public void testCompareAndSet() { VersionConflictEngineException.class ); - assertRequestBuilderThrows( - client().prepareDelete("test", "1").setIfSeqNo(10).setIfPrimaryTerm(1), - VersionConflictEngineException.class - ); - assertRequestBuilderThrows( - client().prepareDelete("test", "1").setIfSeqNo(10).setIfPrimaryTerm(2), - VersionConflictEngineException.class - ); - assertRequestBuilderThrows( - client().prepareDelete("test", "1").setIfSeqNo(1).setIfPrimaryTerm(2), - VersionConflictEngineException.class - ); + ActionRequestBuilder builder6 = client().prepareDelete("test", "1").setIfSeqNo(10).setIfPrimaryTerm(1); + expectThrows(VersionConflictEngineException.class, builder6); + ActionRequestBuilder builder5 = client().prepareDelete("test", "1").setIfSeqNo(10).setIfPrimaryTerm(2); + expectThrows(VersionConflictEngineException.class, builder5); + ActionRequestBuilder builder4 = client().prepareDelete("test", "1").setIfSeqNo(1).setIfPrimaryTerm(2); + expectThrows(VersionConflictEngineException.class, builder4); client().admin().indices().prepareRefresh().get(); for (int i = 0; i < 10; i++) { @@ -302,24 +295,16 @@ public void testCompareAndSet() { assertThat(deleteResponse.getSeqNo(), equalTo(2L)); assertThat(deleteResponse.getPrimaryTerm(), equalTo(1L)); - assertRequestBuilderThrows( - client().prepareDelete("test", "1").setIfSeqNo(1).setIfPrimaryTerm(1), - VersionConflictEngineException.class - ); - assertRequestBuilderThrows( - client().prepareDelete("test", "1").setIfSeqNo(3).setIfPrimaryTerm(12), - VersionConflictEngineException.class - ); - assertRequestBuilderThrows( - client().prepareDelete("test", "1").setIfSeqNo(1).setIfPrimaryTerm(2), - VersionConflictEngineException.class - ); + ActionRequestBuilder builder3 = client().prepareDelete("test", "1").setIfSeqNo(1).setIfPrimaryTerm(1); + expectThrows(VersionConflictEngineException.class, builder3); + ActionRequestBuilder builder2 = client().prepareDelete("test", "1").setIfSeqNo(3).setIfPrimaryTerm(12); + expectThrows(VersionConflictEngineException.class, builder2); + ActionRequestBuilder builder1 = client().prepareDelete("test", "1").setIfSeqNo(1).setIfPrimaryTerm(2); + expectThrows(VersionConflictEngineException.class, builder1); // the doc is deleted. Even when we hit the deleted seqNo, a conditional delete should fail. - assertRequestBuilderThrows( - client().prepareDelete("test", "1").setIfSeqNo(2).setIfPrimaryTerm(1), - VersionConflictEngineException.class - ); + ActionRequestBuilder builder = client().prepareDelete("test", "1").setIfSeqNo(2).setIfPrimaryTerm(1); + expectThrows(VersionConflictEngineException.class, builder); } public void testSimpleVersioningWithFlush() throws Exception { @@ -334,20 +319,17 @@ public void testSimpleVersioningWithFlush() throws Exception { assertThat(indexResponse.getSeqNo(), equalTo(1L)); client().admin().indices().prepareFlush().get(); - assertRequestBuilderThrows( - prepareIndex("test").setId("1").setSource("field1", "value1_1").setIfSeqNo(0).setIfPrimaryTerm(1), - VersionConflictEngineException.class - ); + ActionRequestBuilder builder2 = prepareIndex("test").setId("1") + .setSource("field1", "value1_1") + .setIfSeqNo(0) + .setIfPrimaryTerm(1); + expectThrows(VersionConflictEngineException.class, builder2); - assertRequestBuilderThrows( - prepareIndex("test").setId("1").setCreate(true).setSource("field1", "value1_1"), - VersionConflictEngineException.class - ); + ActionRequestBuilder builder1 = prepareIndex("test").setId("1").setCreate(true).setSource("field1", "value1_1"); + expectThrows(VersionConflictEngineException.class, builder1); - assertRequestBuilderThrows( - client().prepareDelete("test", "1").setIfSeqNo(0).setIfPrimaryTerm(1), - VersionConflictEngineException.class - ); + ActionRequestBuilder builder = client().prepareDelete("test", "1").setIfSeqNo(0).setIfPrimaryTerm(1); + expectThrows(VersionConflictEngineException.class, builder); for (int i = 0; i < 10; i++) { assertThat(client().prepareGet("test", "1").get().getVersion(), equalTo(2L)); diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java index 904876984ad91..0f58703af7750 100644 --- a/server/src/main/java/module-info.java +++ b/server/src/main/java/module-info.java @@ -422,7 +422,8 @@ provides org.apache.lucene.codecs.PostingsFormat with org.elasticsearch.index.codec.bloomfilter.ES85BloomFilterPostingsFormat, - org.elasticsearch.index.codec.bloomfilter.ES87BloomFilterPostingsFormat; + org.elasticsearch.index.codec.bloomfilter.ES87BloomFilterPostingsFormat, + org.elasticsearch.index.codec.postings.ES812PostingsFormat; provides org.apache.lucene.codecs.DocValuesFormat with ES87TSDBDocValuesFormat; exports org.elasticsearch.cluster.routing.allocation.shards diff --git a/server/src/main/java/org/elasticsearch/Build.java b/server/src/main/java/org/elasticsearch/Build.java index 24cd82d29614e..0b8cd149744e3 100644 --- a/server/src/main/java/org/elasticsearch/Build.java +++ b/server/src/main/java/org/elasticsearch/Build.java @@ -47,8 +47,9 @@ private static class CurrentHolder { // finds the pluggable current build, or uses the local build as a fallback private static Build findCurrent() { - var buildExtension = ExtensionLoader.loadSingleton(ServiceLoader.load(BuildExtension.class), () -> Build::findLocalBuild); - return buildExtension.getCurrentBuild(); + return ExtensionLoader.loadSingleton(ServiceLoader.load(BuildExtension.class)) + .map(BuildExtension::getCurrentBuild) + .orElseGet(Build::findLocalBuild); } } @@ -204,7 +205,7 @@ static URL getElasticsearchCodeSourceLocation() { public static Build readBuild(StreamInput in) throws IOException { final String flavor; if (in.getTransportVersion().before(TransportVersions.V_8_3_0) - || in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_040)) { + || in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { flavor = in.readString(); } else { flavor = "default"; @@ -234,7 +235,7 @@ public static Build readBuild(StreamInput in) throws IOException { version = versionMatcher.group(1); qualifier = versionMatcher.group(2); } - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_041)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { minWireVersion = in.readString(); minIndexVersion = in.readString(); displayString = in.readString(); @@ -251,7 +252,7 @@ public static Build readBuild(StreamInput in) throws IOException { public static void writeBuild(Build build, StreamOutput out) throws IOException { if (out.getTransportVersion().before(TransportVersions.V_8_3_0) - || out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_040)) { + || out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { out.writeString(build.flavor()); } out.writeString(build.type().displayName()); @@ -265,7 +266,7 @@ public static void writeBuild(Build build, StreamOutput out) throws IOException out.writeBoolean(build.isSnapshot()); out.writeString(build.qualifiedVersion()); } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_041)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { out.writeString(build.minWireCompatVersion()); out.writeString(build.minIndexCompatVersion()); out.writeString(build.displayString()); diff --git a/server/src/main/java/org/elasticsearch/TransportVersion.java b/server/src/main/java/org/elasticsearch/TransportVersion.java index 92bb88f16385d..d3224bb048393 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersion.java +++ b/server/src/main/java/org/elasticsearch/TransportVersion.java @@ -109,13 +109,11 @@ public String toString() { private static class CurrentHolder { private static final TransportVersion CURRENT = findCurrent(); - // finds the pluggable current version, or uses the given fallback + // finds the pluggable current version private static TransportVersion findCurrent() { - var versionExtension = ExtensionLoader.loadSingleton(ServiceLoader.load(VersionExtension.class), () -> null); - if (versionExtension == null) { - return TransportVersions.LATEST_DEFINED; - } - var version = versionExtension.getCurrentTransportVersion(TransportVersions.LATEST_DEFINED); + var version = ExtensionLoader.loadSingleton(ServiceLoader.load(VersionExtension.class)) + .map(e -> e.getCurrentTransportVersion(TransportVersions.LATEST_DEFINED)) + .orElse(TransportVersions.LATEST_DEFINED); assert version.onOrAfter(TransportVersions.LATEST_DEFINED); return version; } diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 226d82a9d5b73..a23445bf2d087 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -93,27 +93,6 @@ static TransportVersion def(int id) { * Detached transport versions added below here. */ public static final TransportVersion V_8_500_020 = def(8_500_020); - public static final TransportVersion V_8_500_040 = def(8_500_040); - public static final TransportVersion V_8_500_041 = def(8_500_041); - public static final TransportVersion V_8_500_042 = def(8_500_042); - public static final TransportVersion V_8_500_043 = def(8_500_043); - public static final TransportVersion V_8_500_044 = def(8_500_044); - public static final TransportVersion V_8_500_045 = def(8_500_045); - public static final TransportVersion V_8_500_046 = def(8_500_046); - public static final TransportVersion V_8_500_047 = def(8_500_047); - public static final TransportVersion V_8_500_048 = def(8_500_048); - public static final TransportVersion V_8_500_049 = def(8_500_049); - public static final TransportVersion V_8_500_050 = def(8_500_050); - public static final TransportVersion V_8_500_051 = def(8_500_051); - public static final TransportVersion V_8_500_052 = def(8_500_052); - public static final TransportVersion V_8_500_053 = def(8_500_053); - public static final TransportVersion V_8_500_054 = def(8_500_054); - public static final TransportVersion V_8_500_055 = def(8_500_055); - public static final TransportVersion V_8_500_056 = def(8_500_056); - public static final TransportVersion V_8_500_057 = def(8_500_057); - public static final TransportVersion V_8_500_058 = def(8_500_058); - public static final TransportVersion V_8_500_059 = def(8_500_059); - public static final TransportVersion V_8_500_060 = def(8_500_060); public static final TransportVersion V_8_500_061 = def(8_500_061); public static final TransportVersion V_8_500_062 = def(8_500_062); public static final TransportVersion V_8_500_063 = def(8_500_063); @@ -193,7 +172,16 @@ static TransportVersion def(int id) { public static final TransportVersion ENRICH_ELASTICSEARCH_VERSION_REMOVED = def(8_560_00_0); public static final TransportVersion NODE_STATS_REQUEST_SIMPLIFIED = def(8_561_00_0); public static final TransportVersion TEXT_EXPANSION_TOKEN_PRUNING_CONFIG_ADDED = def(8_562_00_0); - public static final TransportVersion SEMANTIC_TEXT_FIELD_ADDED = def(8_563_00_0); + public static final TransportVersion ESQL_ASYNC_QUERY = def(8_563_00_0); + public static final TransportVersion ESQL_STATUS_INCLUDE_LUCENE_QUERIES = def(8_564_00_0); + public static final TransportVersion ESQL_CLUSTER_ALIAS = def(8_565_00_0); + public static final TransportVersion SNAPSHOTS_IN_PROGRESS_TRACKING_REMOVING_NODES_ADDED = def(8_566_00_0); + public static final TransportVersion SMALLER_RELOAD_SECURE_SETTINGS_REQUEST = def(8_567_00_0); + public static final TransportVersion UPDATE_API_KEY_EXPIRATION_TIME_ADDED = def(8_568_00_0); + public static final TransportVersion LAZY_ROLLOVER_ADDED = def(8_569_00_0); + public static final TransportVersion ESQL_PLAN_POINT_LITERAL_WKB = def(8_570_00_0); + public static final TransportVersion HOT_THREADS_AS_BYTES = def(8_571_00_0); + public static final TransportVersion SEMANTIC_TEXT_FIELD_ADDED = def(8_572_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/action/ActionFuture.java b/server/src/main/java/org/elasticsearch/action/ActionFuture.java index e51e31f4c03ce..061875e42fec8 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionFuture.java +++ b/server/src/main/java/org/elasticsearch/action/ActionFuture.java @@ -27,22 +27,6 @@ public interface ActionFuture extends Future { */ T actionGet(); - /** - * Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just catching the {@link InterruptedException} and throwing - * an {@link IllegalStateException} instead. Also catches - * {@link java.util.concurrent.ExecutionException} and throws the actual cause instead. - */ - T actionGet(String timeout); - - /** - * Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just catching the {@link InterruptedException} and throwing - * an {@link IllegalStateException} instead. Also catches - * {@link java.util.concurrent.ExecutionException} and throws the actual cause instead. - * - * @param timeoutMillis Timeout in millis - */ - T actionGet(long timeoutMillis); - /** * Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just catching the {@link InterruptedException} and throwing * an {@link IllegalStateException} instead. Also catches diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 2039acda89b8a..dd70dc65b853b 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -30,7 +30,6 @@ import org.elasticsearch.action.admin.cluster.migration.TransportPostFeatureUpgradeAction; import org.elasticsearch.action.admin.cluster.node.hotthreads.TransportNodesHotThreadsAction; import org.elasticsearch.action.admin.cluster.node.info.TransportNodesInfoAction; -import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsAction; import org.elasticsearch.action.admin.cluster.node.reload.TransportNodesReloadSecureSettingsAction; import org.elasticsearch.action.admin.cluster.node.shutdown.PrevalidateNodeRemovalAction; import org.elasticsearch.action.admin.cluster.node.shutdown.TransportPrevalidateNodeRemovalAction; @@ -105,10 +104,8 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexAction; import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; import org.elasticsearch.action.admin.indices.dangling.delete.TransportDeleteDanglingIndexAction; -import org.elasticsearch.action.admin.indices.dangling.find.FindDanglingIndexAction; import org.elasticsearch.action.admin.indices.dangling.find.TransportFindDanglingIndexAction; import org.elasticsearch.action.admin.indices.dangling.import_index.TransportImportDanglingIndexAction; -import org.elasticsearch.action.admin.indices.dangling.list.ListDanglingIndicesAction; import org.elasticsearch.action.admin.indices.dangling.list.TransportListDanglingIndicesAction; import org.elasticsearch.action.admin.indices.delete.TransportDeleteIndexAction; import org.elasticsearch.action.admin.indices.diskusage.AnalyzeIndexDiskUsageAction; @@ -125,8 +122,6 @@ import org.elasticsearch.action.admin.indices.mapping.get.TransportGetFieldMappingsAction; import org.elasticsearch.action.admin.indices.mapping.get.TransportGetFieldMappingsIndexAction; import org.elasticsearch.action.admin.indices.mapping.get.TransportGetMappingsAction; -import org.elasticsearch.action.admin.indices.mapping.put.AutoPutMappingAction; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.admin.indices.mapping.put.TransportAutoPutMappingAction; import org.elasticsearch.action.admin.indices.mapping.put.TransportPutMappingAction; @@ -148,8 +143,6 @@ import org.elasticsearch.action.admin.indices.settings.get.GetSettingsAction; import org.elasticsearch.action.admin.indices.settings.get.TransportGetSettingsAction; import org.elasticsearch.action.admin.indices.settings.put.TransportUpdateSettingsAction; -import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction; -import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresAction; import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction; import org.elasticsearch.action.admin.indices.shrink.ResizeAction; import org.elasticsearch.action.admin.indices.shrink.TransportResizeAction; @@ -157,9 +150,6 @@ import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; import org.elasticsearch.action.admin.indices.stats.TransportFieldUsageAction; import org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction; -import org.elasticsearch.action.admin.indices.template.delete.DeleteComponentTemplateAction; -import org.elasticsearch.action.admin.indices.template.delete.DeleteComposableIndexTemplateAction; -import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.delete.TransportDeleteComponentTemplateAction; import org.elasticsearch.action.admin.indices.template.delete.TransportDeleteComposableIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.delete.TransportDeleteIndexTemplateAction; @@ -174,8 +164,6 @@ import org.elasticsearch.action.admin.indices.template.post.TransportSimulateIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.post.TransportSimulateTemplateAction; import org.elasticsearch.action.admin.indices.template.put.PutComponentTemplateAction; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; -import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.put.TransportPutComponentTemplateAction; import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.put.TransportPutIndexTemplateAction; @@ -193,11 +181,9 @@ import org.elasticsearch.action.get.TransportMultiGetAction; import org.elasticsearch.action.get.TransportShardMultiGetAction; import org.elasticsearch.action.index.TransportIndexAction; -import org.elasticsearch.action.ingest.DeletePipelineAction; import org.elasticsearch.action.ingest.DeletePipelineTransportAction; import org.elasticsearch.action.ingest.GetPipelineAction; import org.elasticsearch.action.ingest.GetPipelineTransportAction; -import org.elasticsearch.action.ingest.PutPipelineAction; import org.elasticsearch.action.ingest.PutPipelineTransportAction; import org.elasticsearch.action.ingest.SimulatePipelineAction; import org.elasticsearch.action.ingest.SimulatePipelineTransportAction; @@ -682,7 +668,7 @@ public void reg actions.register(IndicesStatsAction.INSTANCE, TransportIndicesStatsAction.class); actions.register(IndicesSegmentsAction.INSTANCE, TransportIndicesSegmentsAction.class); - actions.register(IndicesShardStoresAction.INSTANCE, TransportIndicesShardStoresAction.class); + actions.register(TransportIndicesShardStoresAction.TYPE, TransportIndicesShardStoresAction.class); actions.register(CreateIndexAction.INSTANCE, TransportCreateIndexAction.class); actions.register(ResizeAction.INSTANCE, TransportResizeAction.class); actions.register(RolloverAction.INSTANCE, TransportRolloverAction.class); @@ -694,21 +680,21 @@ public void reg actions.register(GetMappingsAction.INSTANCE, TransportGetMappingsAction.class); actions.register(GetFieldMappingsAction.INSTANCE, TransportGetFieldMappingsAction.class); actions.register(TransportGetFieldMappingsIndexAction.TYPE, TransportGetFieldMappingsIndexAction.class); - actions.register(PutMappingAction.INSTANCE, TransportPutMappingAction.class); - actions.register(AutoPutMappingAction.INSTANCE, TransportAutoPutMappingAction.class); + actions.register(TransportPutMappingAction.TYPE, TransportPutMappingAction.class); + actions.register(TransportAutoPutMappingAction.TYPE, TransportAutoPutMappingAction.class); actions.register(TransportIndicesAliasesAction.TYPE, TransportIndicesAliasesAction.class); - actions.register(UpdateSettingsAction.INSTANCE, TransportUpdateSettingsAction.class); + actions.register(TransportUpdateSettingsAction.TYPE, TransportUpdateSettingsAction.class); actions.register(AnalyzeAction.INSTANCE, TransportAnalyzeAction.class); actions.register(TransportReloadAnalyzersAction.TYPE, TransportReloadAnalyzersAction.class); - actions.register(PutIndexTemplateAction.INSTANCE, TransportPutIndexTemplateAction.class); + actions.register(TransportPutIndexTemplateAction.TYPE, TransportPutIndexTemplateAction.class); actions.register(GetIndexTemplatesAction.INSTANCE, TransportGetIndexTemplatesAction.class); - actions.register(DeleteIndexTemplateAction.INSTANCE, TransportDeleteIndexTemplateAction.class); + actions.register(TransportDeleteIndexTemplateAction.TYPE, TransportDeleteIndexTemplateAction.class); actions.register(PutComponentTemplateAction.INSTANCE, TransportPutComponentTemplateAction.class); actions.register(GetComponentTemplateAction.INSTANCE, TransportGetComponentTemplateAction.class); - actions.register(DeleteComponentTemplateAction.INSTANCE, TransportDeleteComponentTemplateAction.class); - actions.register(PutComposableIndexTemplateAction.INSTANCE, TransportPutComposableIndexTemplateAction.class); + actions.register(TransportDeleteComponentTemplateAction.TYPE, TransportDeleteComponentTemplateAction.class); + actions.register(TransportPutComposableIndexTemplateAction.TYPE, TransportPutComposableIndexTemplateAction.class); actions.register(GetComposableIndexTemplateAction.INSTANCE, TransportGetComposableIndexTemplateAction.class); - actions.register(DeleteComposableIndexTemplateAction.INSTANCE, TransportDeleteComposableIndexTemplateAction.class); + actions.register(TransportDeleteComposableIndexTemplateAction.TYPE, TransportDeleteComposableIndexTemplateAction.class); actions.register(SimulateIndexTemplateAction.INSTANCE, TransportSimulateIndexTemplateAction.class); actions.register(SimulateTemplateAction.INSTANCE, TransportSimulateTemplateAction.class); actions.register(ValidateQueryAction.INSTANCE, TransportValidateQueryAction.class); @@ -740,7 +726,7 @@ public void reg actions.register(TransportExplainAction.TYPE, TransportExplainAction.class); actions.register(TransportClearScrollAction.TYPE, TransportClearScrollAction.class); actions.register(RecoveryAction.INSTANCE, TransportRecoveryAction.class); - actions.register(NodesReloadSecureSettingsAction.INSTANCE, TransportNodesReloadSecureSettingsAction.class); + actions.register(TransportNodesReloadSecureSettingsAction.TYPE, TransportNodesReloadSecureSettingsAction.class); actions.register(AutoCreateAction.INSTANCE, AutoCreateAction.TransportAction.class); actions.register(ResolveIndexAction.INSTANCE, ResolveIndexAction.TransportAction.class); actions.register(AnalyzeIndexDiskUsageAction.INSTANCE, TransportAnalyzeIndexDiskUsageAction.class); @@ -757,9 +743,9 @@ public void reg actions.register(TransportFieldCapabilitiesAction.TYPE, TransportFieldCapabilitiesAction.class); - actions.register(PutPipelineAction.INSTANCE, PutPipelineTransportAction.class); + actions.register(PutPipelineTransportAction.TYPE, PutPipelineTransportAction.class); actions.register(GetPipelineAction.INSTANCE, GetPipelineTransportAction.class); - actions.register(DeletePipelineAction.INSTANCE, DeletePipelineTransportAction.class); + actions.register(DeletePipelineTransportAction.TYPE, DeletePipelineTransportAction.class); actions.register(SimulatePipelineAction.INSTANCE, SimulatePipelineTransportAction.class); actionPlugins.stream().flatMap(p -> p.getActions().stream()).forEach(actions::register); @@ -776,10 +762,10 @@ public void reg actions.register(RetentionLeaseActions.REMOVE, RetentionLeaseActions.TransportRemoveAction.class); // Dangling indices - actions.register(ListDanglingIndicesAction.INSTANCE, TransportListDanglingIndicesAction.class); + actions.register(TransportListDanglingIndicesAction.TYPE, TransportListDanglingIndicesAction.class); actions.register(TransportImportDanglingIndexAction.TYPE, TransportImportDanglingIndexAction.class); actions.register(TransportDeleteDanglingIndexAction.TYPE, TransportDeleteDanglingIndexAction.class); - actions.register(FindDanglingIndexAction.INSTANCE, TransportFindDanglingIndexAction.class); + actions.register(TransportFindDanglingIndexAction.TYPE, TransportFindDanglingIndexAction.class); // internal actions actions.register(GlobalCheckpointSyncAction.TYPE, GlobalCheckpointSyncAction.class); diff --git a/server/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java index 6209e9fce390e..32d65d743e6a6 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java @@ -48,13 +48,6 @@ public Response get(TimeValue timeout) { return execute().actionGet(timeout); } - /** - * Short version of execute().actionGet(). - */ - public Response get(String timeout) { - return execute().actionGet(timeout); - } - public void execute(ActionListener listener) { client.execute(action, request, listener); } diff --git a/server/src/main/java/org/elasticsearch/action/ActionType.java b/server/src/main/java/org/elasticsearch/action/ActionType.java index b8e4c8b88aa5e..a052cf77fbd4d 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionType.java +++ b/server/src/main/java/org/elasticsearch/action/ActionType.java @@ -8,7 +8,6 @@ package org.elasticsearch.action; -import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.io.stream.Writeable; /** @@ -27,10 +26,6 @@ public static ActionType emptyResponse(String name) { return new ActionType<>(name, in -> ActionResponse.Empty.INSTANCE); } - public static ActionType acknowledgedResponse(String name) { - return new ActionType<>(name, AcknowledgedResponse::readFrom); - } - /** * @param name The name of the action, must be unique across actions. * @param responseReader A reader for the response type diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodeHotThreads.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodeHotThreads.java index 918bfbca8d304..1118a6318ddf7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodeHotThreads.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodeHotThreads.java @@ -8,34 +8,78 @@ package org.elasticsearch.action.admin.cluster.node.hotthreads; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.support.nodes.BaseNodeResponse; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; +import java.io.InputStreamReader; +import java.io.StringReader; +import java.nio.charset.StandardCharsets; public class NodeHotThreads extends BaseNodeResponse { - private String hotThreads; + private final ReleasableBytesReference bytes; NodeHotThreads(StreamInput in) throws IOException { super(in); - hotThreads = in.readString(); + if (in.getTransportVersion().onOrAfter(TransportVersions.HOT_THREADS_AS_BYTES)) { + bytes = in.readReleasableBytesReference(); + } else { + bytes = ReleasableBytesReference.wrap(new BytesArray(in.readString().getBytes(StandardCharsets.UTF_8))); + } } - public NodeHotThreads(DiscoveryNode node, String hotThreads) { + public NodeHotThreads(DiscoveryNode node, ReleasableBytesReference hotThreadsUtf8Bytes) { super(node); - this.hotThreads = hotThreads; + assert hotThreadsUtf8Bytes.hasReferences(); + bytes = hotThreadsUtf8Bytes; // takes ownership of the original ref, no need to .retain() } public String getHotThreads() { - return this.hotThreads; + return bytes.utf8ToString(); + } + + public java.io.Reader getHotThreadsReader() { + try { + return new InputStreamReader(bytes.streamInput(), StandardCharsets.UTF_8); + } catch (IOException e) { + assert false : e; // all in-memory, no IO takes place + return new StringReader("ERROR:" + e.toString()); + } } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeString(hotThreads); + if (out.getTransportVersion().onOrAfter(TransportVersions.HOT_THREADS_AS_BYTES)) { + out.writeBytesReference(bytes); + } else { + out.writeString(bytes.utf8ToString()); + } + } + + @Override + public void incRef() { + bytes.incRef(); + } + + @Override + public boolean tryIncRef() { + return bytes.tryIncRef(); + } + + @Override + public boolean decRef() { + return bytes.decRef(); + } + + @Override + public boolean hasReferences() { + return bytes.hasReferences(); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java deleted file mode 100644 index 6593b90fb7f65..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.cluster.node.hotthreads; - -import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder; -import org.elasticsearch.client.internal.ElasticsearchClient; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.monitor.jvm.HotThreads; - -public class NodesHotThreadsRequestBuilder extends NodesOperationRequestBuilder< - NodesHotThreadsRequest, - NodesHotThreadsResponse, - NodesHotThreadsRequestBuilder> { - - public NodesHotThreadsRequestBuilder(ElasticsearchClient client) { - super(client, TransportNodesHotThreadsAction.TYPE, new NodesHotThreadsRequest()); - } - - public NodesHotThreadsRequestBuilder setThreads(int threads) { - request.threads(threads); - return this; - } - - public NodesHotThreadsRequestBuilder setIgnoreIdleThreads(boolean ignoreIdleThreads) { - request.ignoreIdleThreads(ignoreIdleThreads); - return this; - } - - public NodesHotThreadsRequestBuilder setType(HotThreads.ReportType type) { - request.type(type); - return this; - } - - public NodesHotThreadsRequestBuilder setSortOrder(HotThreads.SortOrder sortOrder) { - request.sortOrder(sortOrder); - return this; - } - - public NodesHotThreadsRequestBuilder setInterval(TimeValue interval) { - request.interval(interval); - return this; - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsResponse.java index 59307009f785b..892629dbe46f7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsResponse.java @@ -15,20 +15,31 @@ import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.core.RefCounted; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.transport.LeakTracker; import java.io.BufferedReader; import java.io.IOException; -import java.io.StringReader; import java.util.Iterator; import java.util.List; import java.util.NoSuchElementException; -import java.util.Objects; public class NodesHotThreadsResponse extends BaseNodesResponse { + @SuppressWarnings("this-escape") + private final RefCounted refs = LeakTracker.wrap( + AbstractRefCounted.of(() -> Releasables.wrap(Iterators.map(getNodes().iterator(), n -> n::decRef)).close()) + ); + + @SuppressWarnings("this-escape") public NodesHotThreadsResponse(ClusterName clusterName, List nodes, List failures) { super(clusterName, nodes, failures); + for (NodeHotThreads nodeHotThreads : getNodes()) { + nodeHotThreads.mustIncRef(); + } } public Iterator> getTextChunks() { @@ -36,15 +47,21 @@ public Iterator> getTextChunks() { getNodes().iterator(), node -> Iterators.concat( Iterators.single(writer -> writer.append("::: ").append(node.getNode().toString()).append('\n')), - Iterators.map(new LinesIterator(node.getHotThreads()), line -> writer -> writer.append(" ").append(line).append('\n')), - Iterators.single(writer -> writer.append('\n')) + Iterators.map( + new LinesIterator(node.getHotThreadsReader()), + line -> writer -> writer.append(" ").append(line).append('\n') + ), + Iterators.single(writer -> { + assert hasReferences(); + writer.append('\n'); + }) ) ); } @Override protected List readNodesFrom(StreamInput in) throws IOException { - return in.readCollectionAsList(NodeHotThreads::new); + return TransportAction.localOnly(); } @Override @@ -56,8 +73,8 @@ private static class LinesIterator implements Iterator { final BufferedReader reader; String nextLine; - private LinesIterator(String input) { - reader = new BufferedReader(new StringReader(Objects.requireNonNull(input))); + private LinesIterator(java.io.Reader reader) { + this.reader = new BufferedReader(reader); advance(); } @@ -86,4 +103,24 @@ public String next() { } } } + + @Override + public void incRef() { + refs.incRef(); + } + + @Override + public boolean tryIncRef() { + return refs.tryIncRef(); + } + + @Override + public boolean decRef() { + return refs.decRef(); + } + + @Override + public boolean hasReferences() { + return refs.hasReferences(); + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java index ea56c85e36a3a..e731d951493f0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java @@ -15,17 +15,22 @@ import org.elasticsearch.action.support.nodes.TransportNodesAction; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.IOUtils; import org.elasticsearch.monitor.jvm.HotThreads; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.LeakTracker; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportService; import java.io.IOException; -import java.io.StringWriter; +import java.io.OutputStreamWriter; +import java.nio.charset.StandardCharsets; import java.util.List; public class TransportNodesHotThreadsAction extends TransportNodesAction< @@ -74,17 +79,28 @@ protected NodeHotThreads newNodeResponse(StreamInput in, DiscoveryNode node) thr @Override protected NodeHotThreads nodeOperation(NodeRequest request, Task task) { - HotThreads hotThreads = new HotThreads().busiestThreads(request.request.threads) + final var hotThreads = new HotThreads().busiestThreads(request.request.threads) .type(request.request.type) .sortOrder(request.request.sortOrder) .interval(request.request.interval) .threadElementsSnapshotCount(request.request.snapshots) .ignoreIdleThreads(request.request.ignoreIdleThreads); - try (var writer = new StringWriter()) { - hotThreads.detect(writer); - return new NodeHotThreads(clusterService.localNode(), writer.toString()); + final var out = transportService.newNetworkBytesStream(); + final var trackedResource = LeakTracker.wrap(out); + var success = false; + try { + try (var writer = new OutputStreamWriter(Streams.flushOnCloseStream(out), StandardCharsets.UTF_8)) { + hotThreads.detect(writer); + } + final var result = new NodeHotThreads(clusterService.localNode(), new ReleasableBytesReference(out.bytes(), trackedResource)); + success = true; + return result; } catch (Exception e) { throw new ElasticsearchException("failed to detect hot threads", e); + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(trackedResource); + } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsAction.java deleted file mode 100644 index 3b09694958dcd..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsAction.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.cluster.node.reload; - -import org.elasticsearch.action.ActionType; -import org.elasticsearch.common.io.stream.Writeable; - -public class NodesReloadSecureSettingsAction extends ActionType { - - public static final NodesReloadSecureSettingsAction INSTANCE = new NodesReloadSecureSettingsAction(); - public static final String NAME = "cluster:admin/nodes/reload_secure_settings"; - - private NodesReloadSecureSettingsAction() { - super(NAME, Writeable.Reader.localOnly()); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java index 0af12b5d47c58..c24833dca49ee 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java @@ -8,26 +8,32 @@ package org.elasticsearch.action.admin.cluster.node.reload; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.action.support.nodes.BaseNodesRequest; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.CharArrays; import org.elasticsearch.core.Nullable; -import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.RefCounted; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.transport.LeakTracker; import org.elasticsearch.transport.TransportRequest; import java.io.IOException; import java.util.Arrays; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; /** * Request for a reload secure settings action */ -public class NodesReloadSecureSettingsRequest extends BaseNodesRequest implements Releasable { +public class NodesReloadSecureSettingsRequest extends BaseNodesRequest { /** * The password is used to re-read and decrypt the contents @@ -37,39 +43,12 @@ public class NodesReloadSecureSettingsRequest extends BaseNodesRequest Releasables.close(secureSettingsPassword))); + public NodesReloadSecureSettingsRequest() { super((String[]) null); } - public NodesReloadSecureSettingsRequest(StreamInput in) throws IOException { - super(in); - final BytesReference bytesRef = in.readOptionalBytesReference(); - if (bytesRef != null) { - byte[] bytes = BytesReference.toBytes(bytesRef); - try { - this.secureSettingsPassword = new SecureString(CharArrays.utf8BytesToChars(bytes)); - } finally { - Arrays.fill(bytes, (byte) 0); - } - } else { - this.secureSettingsPassword = null; - } - } - - /** - * Reload secure settings only on certain nodes, based on the nodes ids - * specified. If none are passed, secure settings will be reloaded on all the - * nodes. - */ - public NodesReloadSecureSettingsRequest(String... nodesIds) { - super(nodesIds); - } - - @Nullable - public SecureString getSecureSettingsPassword() { - return secureSettingsPassword; - } - public void setSecureStorePassword(SecureString secureStorePassword) { this.secureSettingsPassword = secureStorePassword; } @@ -80,64 +59,126 @@ boolean hasPassword() { @Override public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - if (this.secureSettingsPassword == null) { - out.writeOptionalBytesReference(null); - } else { - final byte[] passwordBytes = CharArrays.toUtf8Bytes(this.secureSettingsPassword.getChars()); - try { - out.writeOptionalBytesReference(new BytesArray(passwordBytes)); - } finally { - Arrays.fill(passwordBytes, (byte) 0); - } - } + TransportAction.localOnly(); } - // This field is intentionally not part of serialization - private final Set nodeRequests = ConcurrentHashMap.newKeySet(); + @Override + public void incRef() { + refs.incRef(); + } - NodeRequest newNodeRequest() { - final NodesReloadSecureSettingsRequest clone = new NodesReloadSecureSettingsRequest(nodesIds()); - if (hasPassword()) { - clone.setSecureStorePassword(getSecureSettingsPassword().clone()); - } - final NodeRequest nodeRequest = new NodeRequest(clone); - nodeRequests.add(nodeRequest); - return nodeRequest; + @Override + public boolean tryIncRef() { + return refs.tryIncRef(); } @Override - public void close() { - if (this.secureSettingsPassword != null) { - this.secureSettingsPassword.close(); - } - nodeRequests.forEach(NodeRequest::close); + public boolean decRef() { + return refs.decRef(); + } + + @Override + public boolean hasReferences() { + return refs.hasReferences(); + } + + NodeRequest newNodeRequest() { + refs.mustIncRef(); + return new NodeRequest(secureSettingsPassword, refs); } - public static class NodeRequest extends TransportRequest implements Releasable { + public static class NodeRequest extends TransportRequest { + + @Nullable + private final SecureString secureSettingsPassword; - // TODO don't wrap the whole top-level request, it contains heavy and irrelevant DiscoveryNode things; see #100878 - NodesReloadSecureSettingsRequest request; + private final RefCounted refs; NodeRequest(StreamInput in) throws IOException { super(in); - request = new NodesReloadSecureSettingsRequest(in); + + if (in.getTransportVersion().before(TransportVersions.SMALLER_RELOAD_SECURE_SETTINGS_REQUEST)) { + TaskId.readFromStream(in); + in.readStringArray(); + in.readOptionalArray(DiscoveryNode::new, DiscoveryNode[]::new); + in.readOptionalTimeValue(); + } + + final BytesReference bytesRef = in.readOptionalBytesReference(); + if (bytesRef != null) { + byte[] bytes = BytesReference.toBytes(bytesRef); + try { + this.secureSettingsPassword = new SecureString(CharArrays.utf8BytesToChars(bytes)); + this.refs = LeakTracker.wrap(AbstractRefCounted.of(() -> Releasables.close(this.secureSettingsPassword))); + } finally { + Arrays.fill(bytes, (byte) 0); + } + } else { + this.secureSettingsPassword = null; + this.refs = LeakTracker.wrap(AbstractRefCounted.of(() -> {})); + } } - NodeRequest(NodesReloadSecureSettingsRequest request) { - this.request = request; + NodeRequest(@Nullable SecureString secureSettingsPassword, RefCounted refs) { + assert secureSettingsPassword == null || secureSettingsPassword.getChars() != null; // ensures it's not closed + assert refs.hasReferences(); + this.secureSettingsPassword = secureSettingsPassword; + this.refs = refs; } @Override public void writeTo(StreamOutput out) throws IOException { + assert hasReferences(); super.writeTo(out); - request.writeTo(out); + + if (out.getTransportVersion().before(TransportVersions.SMALLER_RELOAD_SECURE_SETTINGS_REQUEST)) { + TaskId.EMPTY_TASK_ID.writeTo(out); + out.writeStringArray(Strings.EMPTY_ARRAY); + out.writeOptionalArray(StreamOutput::writeWriteable, null); + out.writeOptionalTimeValue(null); + } + + if (this.secureSettingsPassword == null) { + out.writeOptionalBytesReference(null); + } else { + final byte[] passwordBytes = CharArrays.toUtf8Bytes(this.secureSettingsPassword.getChars()); + try { + out.writeOptionalBytesReference(new BytesArray(passwordBytes)); + } finally { + Arrays.fill(passwordBytes, (byte) 0); + } + } + } + + boolean hasPassword() { + assert hasReferences(); + return this.secureSettingsPassword != null && this.secureSettingsPassword.length() > 0; + } + + @Nullable + public SecureString getSecureSettingsPassword() { + assert hasReferences(); + return secureSettingsPassword; + } + + @Override + public void incRef() { + refs.incRef(); + } + + @Override + public boolean tryIncRef() { + return refs.tryIncRef(); + } + + @Override + public boolean decRef() { + return refs.decRef(); } @Override - public void close() { - assert request.nodeRequests.isEmpty() : "potential circular reference"; - request.close(); + public boolean hasReferences() { + return refs.hasReferences(); } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java deleted file mode 100644 index 95c5d53ad7fbc..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.cluster.node.reload; - -import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder; -import org.elasticsearch.client.internal.ElasticsearchClient; -import org.elasticsearch.common.settings.SecureString; - -/** - * Builder for the reload secure settings nodes request - */ -public class NodesReloadSecureSettingsRequestBuilder extends NodesOperationRequestBuilder< - NodesReloadSecureSettingsRequest, - NodesReloadSecureSettingsResponse, - NodesReloadSecureSettingsRequestBuilder> { - - public NodesReloadSecureSettingsRequestBuilder(ElasticsearchClient client) { - super(client, NodesReloadSecureSettingsAction.INSTANCE, new NodesReloadSecureSettingsRequest()); - } - - public NodesReloadSecureSettingsRequestBuilder setSecureStorePassword(SecureString secureStorePassword) { - request.setSecureStorePassword(secureStorePassword); - return this; - } - -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java index ed63e6d1b4474..9598f378a188c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java @@ -13,6 +13,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.nodes.TransportNodesAction; @@ -39,6 +40,10 @@ public class TransportNodesReloadSecureSettingsAction extends TransportNodesActi NodesReloadSecureSettingsRequest.NodeRequest, NodesReloadSecureSettingsResponse.NodeResponse> { + public static final ActionType TYPE = ActionType.localOnly( + "cluster:admin/nodes/reload_secure_settings" + ); + private static final Logger logger = LogManager.getLogger(TransportNodesReloadSecureSettingsAction.class); private final Environment environment; @@ -54,7 +59,7 @@ public TransportNodesReloadSecureSettingsAction( PluginsService pluginService ) { super( - NodesReloadSecureSettingsAction.NAME, + TYPE.name(), clusterService, transportService, actionFilters, @@ -91,7 +96,6 @@ protected void doExecute( ActionListener listener ) { if (request.hasPassword() && isNodeLocal(request) == false && isNodeTransportTLSEnabled() == false) { - request.close(); listener.onFailure( new ElasticsearchException( "Secure settings cannot be updated cluster wide when TLS for the transport layer" @@ -99,16 +103,15 @@ protected void doExecute( ) ); } else { - super.doExecute(task, request, ActionListener.runBefore(listener, request::close)); + super.doExecute(task, request, listener); } } @Override protected NodesReloadSecureSettingsResponse.NodeResponse nodeOperation( - NodesReloadSecureSettingsRequest.NodeRequest nodeReloadRequest, + NodesReloadSecureSettingsRequest.NodeRequest request, Task task ) { - final NodesReloadSecureSettingsRequest request = nodeReloadRequest.request; // We default to using an empty string as the keystore password so that we mimic pre 7.3 API behavior try (KeyStoreWrapper keystore = KeyStoreWrapper.load(environment.configFile())) { // reread keystore from config file @@ -138,8 +141,6 @@ protected NodesReloadSecureSettingsResponse.NodeResponse nodeOperation( return new NodesReloadSecureSettingsResponse.NodeResponse(clusterService.localNode(), null); } catch (final Exception e) { return new NodesReloadSecureSettingsResponse.NodeResponse(clusterService.localNode(), e); - } finally { - request.close(); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryResponse.java index 8bcedd274d613..a7ba179c22ef7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryResponse.java @@ -11,30 +11,13 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.repositories.RepositoryCleanupResult; -import org.elasticsearch.xcontent.ObjectParser; -import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; public final class CleanupRepositoryResponse extends ActionResponse implements ToXContentObject { - private static final ObjectParser PARSER = new ObjectParser<>( - CleanupRepositoryResponse.class.getName(), - true, - CleanupRepositoryResponse::new - ); - - static { - PARSER.declareObject( - (response, cleanupResult) -> response.result = cleanupResult, - RepositoryCleanupResult.PARSER, - new ParseField("results") - ); - } - private RepositoryCleanupResult result; public CleanupRepositoryResponse() {} @@ -56,10 +39,6 @@ public void writeTo(StreamOutput out) throws IOException { result.writeTo(out); } - public static CleanupRepositoryResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject().field("results"); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java index 69568462731e8..09e311e5d78ad 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java @@ -34,7 +34,7 @@ */ public class TransportDeleteRepositoryAction extends AcknowledgedTransportMasterNodeAction { - public static final ActionType TYPE = ActionType.acknowledgedResponse("cluster:admin/repository/delete"); + public static final ActionType TYPE = ActionType.localOnly("cluster:admin/repository/delete"); private final RepositoriesService repositoriesService; @Inject diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java index 4011d5365dd57..4c153c7331dba 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java @@ -16,14 +16,11 @@ import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.List; import java.util.Map; -import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; - /** * Get repositories response */ @@ -62,8 +59,4 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - public static GetRepositoriesResponse fromXContent(XContentParser parser) throws IOException { - ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); - return new GetRepositoriesResponse(RepositoriesMetadata.fromXContent(parser)); - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java index c6b471ff25bdf..98b2d1561eb01 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java @@ -34,7 +34,7 @@ */ public class TransportPutRepositoryAction extends AcknowledgedTransportMasterNodeAction { - public static final ActionType TYPE = ActionType.acknowledgedResponse("cluster:admin/repository/put"); + public static final ActionType TYPE = ActionType.localOnly("cluster:admin/repository/put"); private final RepositoriesService repositoriesService; @Inject diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java index 4c7c4ec8f15a2..2fc2f1cfde3b7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java @@ -13,9 +13,9 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; -import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresAction; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; +import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.cluster.ClusterState; @@ -110,7 +110,7 @@ private void verifyThenSubmitUpdate( ) { transportService.sendRequest( transportService.getLocalNode(), - IndicesShardStoresAction.NAME, + TransportIndicesShardStoresAction.TYPE.name(), new IndicesShardStoresRequest().indices(stalePrimaryAllocations.keySet().toArray(Strings.EMPTY_ARRAY)), new ActionListenerResponseHandler<>(listener.delegateFailureAndWrap((delegate, response) -> { Map>> status = response.getStoreStatuses(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/TransportCloneSnapshotAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/TransportCloneSnapshotAction.java index 7ab8b704a3ee8..22286ee41497a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/TransportCloneSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/TransportCloneSnapshotAction.java @@ -30,7 +30,7 @@ */ public final class TransportCloneSnapshotAction extends AcknowledgedTransportMasterNodeAction { - public static final ActionType TYPE = ActionType.acknowledgedResponse("cluster:admin/snapshot/clone"); + public static final ActionType TYPE = ActionType.localOnly("cluster:admin/snapshot/clone"); private final SnapshotsService snapshotsService; @Inject diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java index 7c5d11a884d60..2e8a28d412e26 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java @@ -104,7 +104,7 @@ public CreateSnapshotRequest(StreamInput in) throws IOException { includeGlobalState = in.readBoolean(); waitForCompletion = in.readBoolean(); partial = in.readBoolean(); - userMetadata = in.readMap(); + userMetadata = in.readGenericMap(); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java index 39b03b479ffdf..1451f39dadf7c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java @@ -29,7 +29,7 @@ * Transport action for delete snapshot operation */ public class TransportDeleteSnapshotAction extends AcknowledgedTransportMasterNodeAction { - public static final ActionType TYPE = ActionType.acknowledgedResponse("cluster:admin/snapshot/delete"); + public static final ActionType TYPE = ActionType.localOnly("cluster:admin/snapshot/delete"); private final SnapshotsService snapshotsService; @Inject diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index e1f1636781a08..4be6c6af3d7db 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -227,7 +227,7 @@ private void buildResponse( // state and the repository contents in the below logic final SnapshotIndexShardStage stage = switch (shardEntry.getValue().state()) { case FAILED, ABORTED, MISSING -> SnapshotIndexShardStage.FAILURE; - case INIT, WAITING, QUEUED -> SnapshotIndexShardStage.STARTED; + case INIT, WAITING, PAUSED_FOR_NODE_REMOVAL, QUEUED -> SnapshotIndexShardStage.STARTED; case SUCCESS -> SnapshotIndexShardStage.DONE; }; final SnapshotIndexShardStatus shardStatus; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/AnalysisStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/AnalysisStats.java index f8d894e4de48b..81a26999d2907 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/AnalysisStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/AnalysisStats.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.admin.cluster.stats; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.cluster.metadata.Metadata; @@ -36,14 +37,12 @@ import java.util.Set; import java.util.TreeMap; -import static org.elasticsearch.TransportVersions.V_8_500_045; - /** * Statistics about analysis usage. */ public final class AnalysisStats implements ToXContentFragment, Writeable { - private static final TransportVersion SYNONYM_SETS_VERSION = V_8_500_045; + private static final TransportVersion SYNONYM_SETS_VERSION = TransportVersions.V_8_500_061; private static final Set SYNONYM_FILTER_TYPES = Set.of("synonym", "synonym_graph"); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportDeleteStoredScriptAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportDeleteStoredScriptAction.java index 829b00b7cc1c9..37821f597a8e5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportDeleteStoredScriptAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportDeleteStoredScriptAction.java @@ -27,7 +27,7 @@ public class TransportDeleteStoredScriptAction extends AcknowledgedTransportMasterNodeAction { - public static final ActionType TYPE = ActionType.acknowledgedResponse("cluster:admin/script/delete"); + public static final ActionType TYPE = ActionType.localOnly("cluster:admin/script/delete"); @Inject public TransportDeleteStoredScriptAction( diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportPutStoredScriptAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportPutStoredScriptAction.java index 4fb0f68bce625..f526cf37e357b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportPutStoredScriptAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportPutStoredScriptAction.java @@ -27,7 +27,7 @@ public class TransportPutStoredScriptAction extends AcknowledgedTransportMasterNodeAction { - public static final ActionType TYPE = ActionType.acknowledgedResponse("cluster:admin/script/put"); + public static final ActionType TYPE = ActionType.localOnly("cluster:admin/script/put"); private final ScriptService scriptService; @Inject diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java deleted file mode 100644 index aa3f226d23c9d..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.cluster.tasks; - -import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; -import org.elasticsearch.client.internal.ElasticsearchClient; - -public class PendingClusterTasksRequestBuilder extends MasterNodeReadOperationRequestBuilder< - PendingClusterTasksRequest, - PendingClusterTasksResponse, - PendingClusterTasksRequestBuilder> { - - public PendingClusterTasksRequestBuilder(ElasticsearchClient client) { - super(client, TransportPendingClusterTasksAction.TYPE, new PendingClusterTasksRequest()); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java index 0001fec4e71e5..367837fa91296 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java @@ -59,7 +59,7 @@ public class TransportIndicesAliasesAction extends AcknowledgedTransportMasterNodeAction { public static final String NAME = "indices:admin/aliases"; - public static final ActionType TYPE = ActionType.acknowledgedResponse(NAME); + public static final ActionType TYPE = ActionType.localOnly(NAME); private static final Logger logger = LogManager.getLogger(TransportIndicesAliasesAction.class); private final MetadataIndexAliasesService indexAliasesService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java index 8a674292b3cc5..6c5a271c3338b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java @@ -441,7 +441,7 @@ public AnalyzeToken( positionLength = 1; } type = in.readOptionalString(); - attributes = in.readMap(); + attributes = in.readGenericMap(); } public String getTerm() { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/ReloadAnalyzersRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/ReloadAnalyzersRequest.java index 31c5f57ab5eef..e2894f072011c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/ReloadAnalyzersRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/ReloadAnalyzersRequest.java @@ -25,7 +25,7 @@ public class ReloadAnalyzersRequest extends BroadcastRequest { - public static final ActionType TYPE = ActionType.acknowledgedResponse("cluster:admin/indices/dangling/delete"); + public static final ActionType TYPE = ActionType.localOnly("cluster:admin/indices/dangling/delete"); private static final Logger logger = LogManager.getLogger(TransportDeleteDanglingIndexAction.class); private final Settings settings; @@ -166,7 +166,7 @@ protected ClusterBlockException checkBlock(DeleteDanglingIndexRequest request, C private void findDanglingIndex(String indexUUID, ActionListener listener) { this.nodeClient.execute( - ListDanglingIndicesAction.INSTANCE, + TransportListDanglingIndicesAction.TYPE, new ListDanglingIndicesRequest(indexUUID), listener.delegateFailure((l, response) -> { if (response.hasFailures()) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/find/FindDanglingIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/find/FindDanglingIndexAction.java deleted file mode 100644 index 107d2d1734183..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/find/FindDanglingIndexAction.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.dangling.find; - -import org.elasticsearch.action.ActionType; -import org.elasticsearch.common.io.stream.Writeable; - -/** - * Represents a request to find a particular dangling index by UUID. - */ -public class FindDanglingIndexAction extends ActionType { - - public static final FindDanglingIndexAction INSTANCE = new FindDanglingIndexAction(); - public static final String NAME = "cluster:admin/indices/dangling/find"; - - private FindDanglingIndexAction() { - super(NAME, Writeable.Reader.localOnly()); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/find/TransportFindDanglingIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/find/TransportFindDanglingIndexAction.java index 553e3915b3e3f..e3178c4b7fc30 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/find/TransportFindDanglingIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/find/TransportFindDanglingIndexAction.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.admin.indices.dangling.find; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.nodes.TransportNodesAction; @@ -34,6 +35,8 @@ public class TransportFindDanglingIndexAction extends TransportNodesAction< NodeFindDanglingIndexRequest, NodeFindDanglingIndexResponse> { + public static final ActionType TYPE = ActionType.localOnly("cluster:admin/indices/dangling/find"); + private final TransportService transportService; private final DanglingIndicesState danglingIndicesState; @@ -46,7 +49,7 @@ public TransportFindDanglingIndexAction( DanglingIndicesState danglingIndicesState ) { super( - FindDanglingIndexAction.NAME, + TYPE.name(), clusterService, transportService, actionFilters, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/import_index/TransportImportDanglingIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/import_index/TransportImportDanglingIndexAction.java index 0348b46bedcae..59bf71a4387e1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/import_index/TransportImportDanglingIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/import_index/TransportImportDanglingIndexAction.java @@ -14,9 +14,9 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.FailedNodeException; -import org.elasticsearch.action.admin.indices.dangling.find.FindDanglingIndexAction; import org.elasticsearch.action.admin.indices.dangling.find.FindDanglingIndexRequest; import org.elasticsearch.action.admin.indices.dangling.find.NodeFindDanglingIndexResponse; +import org.elasticsearch.action.admin.indices.dangling.find.TransportFindDanglingIndexAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -39,7 +39,7 @@ * to perform the actual allocation. */ public class TransportImportDanglingIndexAction extends HandledTransportAction { - public static final ActionType TYPE = ActionType.acknowledgedResponse("cluster:admin/indices/dangling/import"); + public static final ActionType TYPE = ActionType.localOnly("cluster:admin/indices/dangling/import"); private static final Logger logger = LogManager.getLogger(TransportImportDanglingIndexAction.class); private final LocalAllocateDangledIndices danglingIndexAllocator; @@ -97,7 +97,7 @@ public void onFailure(Exception e) { private void findDanglingIndex(ImportDanglingIndexRequest request, ActionListener listener) { final String indexUUID = request.getIndexUUID(); this.nodeClient.execute( - FindDanglingIndexAction.INSTANCE, + TransportFindDanglingIndexAction.TYPE, new FindDanglingIndexRequest(indexUUID), listener.delegateFailure((l, response) -> { if (response.hasFailures()) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/list/ListDanglingIndicesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/list/ListDanglingIndicesAction.java deleted file mode 100644 index 3db80832f4959..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/list/ListDanglingIndicesAction.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.dangling.list; - -import org.elasticsearch.action.ActionType; -import org.elasticsearch.common.io.stream.Writeable; - -/** - * Represents a request to list all dangling indices known to the cluster. - */ -public class ListDanglingIndicesAction extends ActionType { - - public static final ListDanglingIndicesAction INSTANCE = new ListDanglingIndicesAction(); - public static final String NAME = "cluster:admin/indices/dangling/list"; - - private ListDanglingIndicesAction() { - super(NAME, Writeable.Reader.localOnly()); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/list/TransportListDanglingIndicesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/list/TransportListDanglingIndicesAction.java index 6e0a27f7fe822..7baa190e3899d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/list/TransportListDanglingIndicesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/list/TransportListDanglingIndicesAction.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.admin.indices.dangling.list; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.admin.indices.dangling.DanglingIndexInfo; import org.elasticsearch.action.support.ActionFilters; @@ -35,6 +36,9 @@ public class TransportListDanglingIndicesAction extends TransportNodesAction< ListDanglingIndicesResponse, NodeListDanglingIndicesRequest, NodeListDanglingIndicesResponse> { + + public static final ActionType TYPE = ActionType.localOnly("cluster:admin/indices/dangling/list"); + private final TransportService transportService; private final DanglingIndicesState danglingIndicesState; @@ -47,7 +51,7 @@ public TransportListDanglingIndicesAction( DanglingIndicesState danglingIndicesState ) { super( - ListDanglingIndicesAction.NAME, + TYPE.name(), clusterService, transportService, actionFilters, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java index eff4fe24c10ac..c980e35c00e44 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java @@ -37,7 +37,7 @@ */ public class TransportDeleteIndexAction extends AcknowledgedTransportMasterNodeAction { - public static final ActionType TYPE = ActionType.acknowledgedResponse("indices:admin/delete"); + public static final ActionType TYPE = ActionType.localOnly("indices:admin/delete"); private static final Logger logger = LogManager.getLogger(TransportDeleteIndexAction.class); private final MetadataDeleteIndexService deleteIndexService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzer.java b/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzer.java index 17b28ebbe3b4b..2f57b59c165e2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzer.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzer.java @@ -55,6 +55,7 @@ import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.index.codec.postings.ES812PostingsFormat; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.LuceneFilesExtensions; @@ -302,6 +303,9 @@ private static void readProximity(Terms terms, PostingsEnum postings) throws IOE private static BlockTermState getBlockTermState(TermsEnum termsEnum, BytesRef term) throws IOException { if (term != null && termsEnum.seekExact(term)) { final TermState termState = termsEnum.termState(); + if (termState instanceof final ES812PostingsFormat.IntBlockTermState blockTermState) { + return new BlockTermState(blockTermState.docStartFP, blockTermState.posStartFP, blockTermState.payStartFP); + } if (termState instanceof final Lucene99PostingsFormat.IntBlockTermState blockTermState) { return new BlockTermState(blockTermState.docStartFP, blockTermState.posStartFP, blockTermState.payStartFP); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/AutoPutMappingAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/AutoPutMappingAction.java deleted file mode 100644 index 5a78f92c42f2b..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/AutoPutMappingAction.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.mapping.put; - -import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.support.master.AcknowledgedResponse; - -public class AutoPutMappingAction extends ActionType { - - public static final AutoPutMappingAction INSTANCE = new AutoPutMappingAction(); - public static final String NAME = "indices:admin/mapping/auto_put"; - - private AutoPutMappingAction() { - super(NAME, AcknowledgedResponse::readFrom); - } - -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingAction.java deleted file mode 100644 index 963d0ac7afd34..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingAction.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.mapping.put; - -import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.support.master.AcknowledgedResponse; - -public class PutMappingAction extends ActionType { - - public static final PutMappingAction INSTANCE = new PutMappingAction(); - public static final String NAME = "indices:admin/mapping/put"; - - private PutMappingAction() { - super(NAME, AcknowledgedResponse::readFrom); - } - -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java index 5db50f787c477..85b758f1693d1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java @@ -27,7 +27,7 @@ public class PutMappingRequestBuilder extends AcknowledgedRequestBuilder< PutMappingRequestBuilder> { public PutMappingRequestBuilder(ElasticsearchClient client) { - super(client, PutMappingAction.INSTANCE, new PutMappingRequest()); + super(client, TransportPutMappingAction.TYPE, new PutMappingRequest()); } public PutMappingRequestBuilder setIndices(String... indices) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportAutoPutMappingAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportAutoPutMappingAction.java index 35a76ada36678..0921bdcfe11a2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportAutoPutMappingAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportAutoPutMappingAction.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.AcknowledgedTransportMasterNodeAction; @@ -31,6 +32,7 @@ public class TransportAutoPutMappingAction extends AcknowledgedTransportMasterNodeAction { + public static final ActionType TYPE = ActionType.localOnly("indices:admin/mapping/auto_put"); private static final Logger logger = LogManager.getLogger(TransportAutoPutMappingAction.class); private final MetadataMappingService metadataMappingService; @@ -47,7 +49,7 @@ public TransportAutoPutMappingAction( final SystemIndices systemIndices ) { super( - AutoPutMappingAction.NAME, + TYPE.name(), transportService, clusterService, threadPool, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java index 5cdef40a393b6..489ac9a378254 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.RequestValidators; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -44,6 +45,7 @@ */ public class TransportPutMappingAction extends AcknowledgedTransportMasterNodeAction { + public static final ActionType TYPE = ActionType.localOnly("indices:admin/mapping/put"); private static final Logger logger = LogManager.getLogger(TransportPutMappingAction.class); private final MetadataMappingService metadataMappingService; @@ -62,7 +64,7 @@ public TransportPutMappingAction( final SystemIndices systemIndices ) { super( - PutMappingAction.NAME, + TYPE.name(), transportService, clusterService, threadPool, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java index 1ba249aff8538..e107b0d063778 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java @@ -23,6 +23,7 @@ import org.elasticsearch.cluster.metadata.IndexTemplateMetadata; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; +import org.elasticsearch.cluster.metadata.MetadataDataStreamsService; import org.elasticsearch.cluster.metadata.MetadataIndexAliasesService; import org.elasticsearch.cluster.metadata.MetadataIndexTemplateService; import org.elasticsearch.cluster.routing.allocation.WriteLoadForecaster; @@ -290,6 +291,7 @@ private RolloverResult rolloverDataStream( createIndexClusterStateRequest.setMatchingTemplate(templateV2); assert createIndexClusterStateRequest.performReroute() == false : "rerouteCompletionIsNotRequired() assumes reroute is not called by underlying service"; + ClusterState newState = createIndexService.applyCreateIndexRequest( currentState, createIndexClusterStateRequest, @@ -312,6 +314,7 @@ private RolloverResult rolloverDataStream( metadataBuilder = withShardSizeForecastForWriteIndex(dataStreamName, metadataBuilder); newState = ClusterState.builder(newState).metadata(metadataBuilder).build(); + newState = MetadataDataStreamsService.setRolloverOnWrite(newState, dataStreamName, false); return new RolloverResult(newWriteIndexName, originalWriteIndex.getName(), newState); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverConditions.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverConditions.java index 8a886e5875c19..24f93ccb45348 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverConditions.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverConditions.java @@ -113,6 +113,13 @@ public boolean hasMinConditions() { return conditions.values().stream().anyMatch(c -> Condition.Type.MIN == c.type()); } + /** + * Returns true if there is at least one condition of any type + */ + public boolean hasConditions() { + return conditions.isEmpty() == false; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeNamedWriteableCollection( diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java index e860375a6d545..06046a066d211 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java @@ -7,6 +7,7 @@ */ package org.elasticsearch.action.admin.indices.rollover; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; @@ -96,6 +97,7 @@ public class RolloverRequest extends AcknowledgedRequest implem private String rolloverTarget; private String newIndexName; private boolean dryRun; + private boolean lazy; private RolloverConditions conditions = new RolloverConditions(); // the index name "_na_" is never read back, what matters are settings, mappings and aliases private CreateIndexRequest createIndexRequest = new CreateIndexRequest("_na_"); @@ -107,6 +109,11 @@ public RolloverRequest(StreamInput in) throws IOException { dryRun = in.readBoolean(); conditions = new RolloverConditions(in); createIndexRequest = new CreateIndexRequest(in); + if (in.getTransportVersion().onOrAfter(TransportVersions.LAZY_ROLLOVER_ADDED)) { + lazy = in.readBoolean(); + } else { + lazy = false; + } } RolloverRequest() {} @@ -142,6 +149,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(dryRun); conditions.writeTo(out); createIndexRequest.writeTo(out); + if (out.getTransportVersion().onOrAfter(TransportVersions.LAZY_ROLLOVER_ADDED)) { + out.writeBoolean(lazy); + } } @Override @@ -194,6 +204,13 @@ public void setConditions(RolloverConditions conditions) { this.conditions = conditions; } + /** + * Sets if an unconditional rollover should wait for a document to come before it gets executed + */ + public void lazy(boolean lazy) { + this.lazy = lazy; + } + public boolean isDryRun() { return dryRun; } @@ -214,6 +231,10 @@ public String getNewIndexName() { return newIndexName; } + public boolean isLazy() { + return lazy; + } + /** * Given the results of evaluating each individual condition, determine whether the rollover request should proceed -- that is, * whether the conditions are met. @@ -257,6 +278,7 @@ public boolean equals(Object o) { } RolloverRequest that = (RolloverRequest) o; return dryRun == that.dryRun + && lazy == that.lazy && Objects.equals(rolloverTarget, that.rolloverTarget) && Objects.equals(newIndexName, that.newIndexName) && Objects.equals(conditions, that.conditions) @@ -265,6 +287,6 @@ public boolean equals(Object o) { @Override public int hashCode() { - return Objects.hash(rolloverTarget, newIndexName, dryRun, conditions, createIndexRequest); + return Objects.hash(rolloverTarget, newIndexName, dryRun, conditions, createIndexRequest, lazy); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestBuilder.java index 7f96662719c96..5cbb3f4161298 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestBuilder.java @@ -43,6 +43,11 @@ public RolloverRequestBuilder dryRun(boolean dryRun) { return this; } + public RolloverRequestBuilder lazy(boolean lazy) { + this.request.lazy(lazy); + return this; + } + public RolloverRequestBuilder settings(Settings settings) { this.request.getCreateIndexRequest().settings(settings); return this; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java index 008a379ecbadb..360ea59e6a299 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java @@ -20,6 +20,8 @@ import java.util.Map; import java.util.Objects; +import static org.elasticsearch.TransportVersions.LAZY_ROLLOVER_ADDED; + /** * Response object for {@link RolloverRequest} API * @@ -32,6 +34,7 @@ public final class RolloverResponse extends ShardsAcknowledgedResponse implement private static final ParseField OLD_INDEX = new ParseField("old_index"); private static final ParseField DRY_RUN = new ParseField("dry_run"); private static final ParseField ROLLED_OVER = new ParseField("rolled_over"); + private static final ParseField LAZY = new ParseField("lazy"); private static final ParseField CONDITIONS = new ParseField("conditions"); private final String oldIndex; @@ -39,9 +42,10 @@ public final class RolloverResponse extends ShardsAcknowledgedResponse implement private final Map conditionStatus; private final boolean dryRun; private final boolean rolledOver; - // Needs to be duplicated, because shardsAcknowledged gets (de)serailized as last field whereas - // in other subclasses of ShardsAcknowledgedResponse this field (de)serailized as first field. + // Needs to be duplicated, because shardsAcknowledged gets (de)serialized as last field whereas + // in other subclasses of ShardsAcknowledgedResponse this field (de)serialized as first field. private final boolean shardsAcknowledged; + private final boolean lazy; RolloverResponse(StreamInput in) throws IOException { super(in, false); @@ -55,6 +59,11 @@ public final class RolloverResponse extends ShardsAcknowledgedResponse implement dryRun = in.readBoolean(); rolledOver = in.readBoolean(); shardsAcknowledged = in.readBoolean(); + if (in.getTransportVersion().onOrAfter(LAZY_ROLLOVER_ADDED)) { + lazy = in.readBoolean(); + } else { + lazy = false; + } } public RolloverResponse( @@ -64,7 +73,8 @@ public RolloverResponse( boolean dryRun, boolean rolledOver, boolean acknowledged, - boolean shardsAcknowledged + boolean shardsAcknowledged, + boolean lazy ) { super(acknowledged, shardsAcknowledged); this.oldIndex = oldIndex; @@ -73,6 +83,7 @@ public RolloverResponse( this.rolledOver = rolledOver; this.conditionStatus = conditionResults; this.shardsAcknowledged = shardsAcknowledged; + this.lazy = lazy; } /** @@ -115,6 +126,13 @@ public boolean isShardsAcknowledged() { return shardsAcknowledged; } + /** + * Returns true if the rollover has been lazily applied, meaning the target will rollover when the next document will get indexed. + */ + public boolean isLazy() { + return lazy; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -124,6 +142,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(dryRun); out.writeBoolean(rolledOver); out.writeBoolean(shardsAcknowledged); + if (out.getTransportVersion().onOrAfter(LAZY_ROLLOVER_ADDED)) { + out.writeBoolean(lazy); + } } @Override @@ -133,6 +154,7 @@ protected void addCustomFields(XContentBuilder builder, Params params) throws IO builder.field(NEW_INDEX.getPreferredName(), newIndex); builder.field(ROLLED_OVER.getPreferredName(), rolledOver); builder.field(DRY_RUN.getPreferredName(), dryRun); + builder.field(LAZY.getPreferredName(), lazy); builder.startObject(CONDITIONS.getPreferredName()); for (Map.Entry entry : conditionStatus.entrySet()) { builder.field(entry.getKey(), entry.getValue()); @@ -146,6 +168,7 @@ public boolean equals(Object o) { RolloverResponse that = (RolloverResponse) o; return dryRun == that.dryRun && rolledOver == that.rolledOver + && lazy == that.lazy && Objects.equals(oldIndex, that.oldIndex) && Objects.equals(newIndex, that.newIndex) && Objects.equals(conditionStatus, that.conditionStatus); @@ -155,6 +178,6 @@ public boolean equals(Object o) { @Override public int hashCode() { - return Objects.hash(super.hashCode(), oldIndex, newIndex, conditionStatus, dryRun, rolledOver); + return Objects.hash(super.hashCode(), oldIndex, newIndex, conditionStatus, dryRun, rolledOver, lazy); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java index fce8402114ff6..4bf4ee975b107 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -31,6 +31,7 @@ import org.elasticsearch.cluster.metadata.IndexMetadataStats; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataDataStreamsService; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.allocator.AllocationActionMultiListener; import org.elasticsearch.cluster.service.ClusterService; @@ -67,6 +68,7 @@ public class TransportRolloverAction extends TransportMasterNodeAction rolloverTaskQueue; + private final MetadataDataStreamsService metadataDataStreamsService; @Inject public TransportRolloverAction( @@ -77,7 +79,8 @@ public TransportRolloverAction( IndexNameExpressionResolver indexNameExpressionResolver, MetadataRolloverService rolloverService, Client client, - AllocationService allocationService + AllocationService allocationService, + MetadataDataStreamsService metadataDataStreamsService ) { super( RolloverAction.NAME, @@ -96,6 +99,7 @@ public TransportRolloverAction( Priority.NORMAL, new RolloverExecutor(clusterService, allocationService, rolloverService, threadPool) ); + this.metadataDataStreamsService = metadataDataStreamsService; } @Override @@ -118,12 +122,61 @@ protected ClusterBlockException checkBlock(RolloverRequest request, ClusterState protected void masterOperation( Task task, final RolloverRequest rolloverRequest, - final ClusterState oldState, + final ClusterState clusterState, final ActionListener listener ) throws Exception { assert task instanceof CancellableTask; - Metadata metadata = oldState.metadata(); + Metadata metadata = clusterState.metadata(); + // We evaluate the names of the index for which we should evaluate conditions, as well as what our newly created index *would* be. + final MetadataRolloverService.NameResolution trialRolloverNames = MetadataRolloverService.resolveRolloverNames( + clusterState, + rolloverRequest.getRolloverTarget(), + rolloverRequest.getNewIndexName(), + rolloverRequest.getCreateIndexRequest() + ); + final String trialSourceIndexName = trialRolloverNames.sourceName(); + final String trialRolloverIndexName = trialRolloverNames.rolloverName(); + MetadataRolloverService.validateIndexName(clusterState, trialRolloverIndexName); + + boolean isDataStream = metadata.dataStreams().containsKey(rolloverRequest.getRolloverTarget()); + if (rolloverRequest.isLazy()) { + if (isDataStream == false || rolloverRequest.getConditions().hasConditions()) { + String message; + if (isDataStream) { + message = "Lazy rollover can be used only without any conditions." + + " Please remove the conditions from the request body or the query parameter 'lazy'."; + } else if (rolloverRequest.getConditions().hasConditions() == false) { + message = "Lazy rollover can be applied only on a data stream." + " Please remove the query parameter 'lazy'."; + } else { + message = "Lazy rollover can be applied only on a data stream with no conditions." + + " Please remove the query parameter 'lazy'."; + } + listener.onFailure(new IllegalArgumentException(message)); + return; + } + if (rolloverRequest.isDryRun() == false) { + metadataDataStreamsService.setRolloverOnWrite( + rolloverRequest.getRolloverTarget(), + true, + rolloverRequest.ackTimeout(), + rolloverRequest.masterNodeTimeout(), + listener.map( + response -> new RolloverResponse( + trialSourceIndexName, + trialRolloverIndexName, + Map.of(), + false, + false, + response.isAcknowledged(), + false, + response.isAcknowledged() + ) + ) + ); + return; + } + } IndicesStatsRequest statsRequest = new IndicesStatsRequest().indices(rolloverRequest.getRolloverTarget()) .clear() @@ -140,18 +193,6 @@ protected void masterOperation( statsRequest, listener.delegateFailureAndWrap((delegate, statsResponse) -> { - // Now that we have the stats for the cluster, we need to know the names of the index for which we should evaluate - // conditions, as well as what our newly created index *would* be. - final MetadataRolloverService.NameResolution trialRolloverNames = MetadataRolloverService.resolveRolloverNames( - oldState, - rolloverRequest.getRolloverTarget(), - rolloverRequest.getNewIndexName(), - rolloverRequest.getCreateIndexRequest() - ); - final String trialSourceIndexName = trialRolloverNames.sourceName(); - final String trialRolloverIndexName = trialRolloverNames.rolloverName(); - - MetadataRolloverService.validateIndexName(oldState, trialRolloverIndexName); // Evaluate the conditions, so that we can tell without a cluster state update whether a rollover would occur. final Map trialConditionResults = evaluateConditions( @@ -166,7 +207,8 @@ protected void masterOperation( rolloverRequest.isDryRun(), false, false, - false + false, + rolloverRequest.isLazy() ); // If this is a dry run, return with the results without invoking a cluster state update @@ -366,7 +408,8 @@ public ClusterState executeTask( false, true, true, - isShardsAcknowledged + isShardsAcknowledged, + false ) ) ); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java index 19fa9c3d359fb..3d1a8dad9d1bc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.AcknowledgedTransportMasterNodeAction; @@ -44,6 +45,7 @@ public class TransportUpdateSettingsAction extends AcknowledgedTransportMasterNodeAction { + public static final ActionType TYPE = ActionType.localOnly("indices:admin/settings/update"); private static final Logger logger = LogManager.getLogger(TransportUpdateSettingsAction.class); private final MetadataUpdateSettingsService updateSettingsService; @@ -60,7 +62,7 @@ public TransportUpdateSettingsAction( SystemIndices systemIndices ) { super( - UpdateSettingsAction.NAME, + TYPE.name(), transportService, clusterService, threadPool, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsAction.java deleted file mode 100644 index 14cfebb63ab3d..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsAction.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.settings.put; - -import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.support.master.AcknowledgedResponse; - -public class UpdateSettingsAction extends ActionType { - - public static final UpdateSettingsAction INSTANCE = new UpdateSettingsAction(); - public static final String NAME = "indices:admin/settings/update"; - - private UpdateSettingsAction() { - super(NAME, AcknowledgedResponse::readFrom); - } - -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java index a48efa31302bf..634223b22d37a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java @@ -26,7 +26,7 @@ public class UpdateSettingsRequestBuilder extends AcknowledgedRequestBuilder< UpdateSettingsRequestBuilder> { public UpdateSettingsRequestBuilder(ElasticsearchClient client, String... indices) { - super(client, UpdateSettingsAction.INSTANCE, new UpdateSettingsRequest(indices)); + super(client, TransportUpdateSettingsAction.TYPE, new UpdateSettingsRequest(indices)); } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java deleted file mode 100644 index 7165953fa85ed..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.shards; - -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; -import org.elasticsearch.client.internal.ElasticsearchClient; -import org.elasticsearch.cluster.health.ClusterHealthStatus; - -/** - * Request builder for {@link IndicesShardStoresRequest} - */ -public class IndicesShardStoreRequestBuilder extends MasterNodeReadOperationRequestBuilder< - IndicesShardStoresRequest, - IndicesShardStoresResponse, - IndicesShardStoreRequestBuilder> { - - public IndicesShardStoreRequestBuilder(ElasticsearchClient client, String... indices) { - super(client, IndicesShardStoresAction.INSTANCE, new IndicesShardStoresRequest(indices)); - } - - /** - * Sets the indices for the shard stores request - */ - public IndicesShardStoreRequestBuilder setIndices(String... indices) { - request.indices(indices); - return this; - } - - /** - * Specifies what type of requested indices to ignore and wildcard indices expressions - * By default, expands wildcards to both open and closed indices - */ - public IndicesShardStoreRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) { - request.indicesOptions(indicesOptions); - return this; - } - - /** - * Set statuses to filter shards to get stores info on. - * @param shardStatuses acceptable values are "green", "yellow", "red" and "all" - * see {@link ClusterHealthStatus} for details - */ - public IndicesShardStoreRequestBuilder setShardStatuses(String... shardStatuses) { - request.shardStatuses(shardStatuses); - return this; - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresAction.java deleted file mode 100644 index f170e14778504..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresAction.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.shards; - -import org.elasticsearch.action.ActionType; - -/** - * ActionType for {@link TransportIndicesShardStoresAction} - * - * Exposes shard store information for requested indices. - * Shard store information reports which nodes hold shard copies, how recent they are - * and any exceptions on opening the shard index or from previous engine failures - */ -public class IndicesShardStoresAction extends ActionType { - - public static final IndicesShardStoresAction INSTANCE = new IndicesShardStoresAction(); - public static final String NAME = "indices:monitor/shard_stores"; - - private IndicesShardStoresAction() { - super(NAME, IndicesShardStoresResponse::new); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresRequest.java index a4a8a475ae8b7..0ff478365cb53 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresRequest.java @@ -25,14 +25,14 @@ import java.util.Map; /** - * Request for {@link IndicesShardStoresAction} + * Request for {@link TransportIndicesShardStoresAction} */ public class IndicesShardStoresRequest extends MasterNodeReadRequest implements IndicesRequest.Replaceable { static final int DEFAULT_MAX_CONCURRENT_SHARD_REQUESTS = 100; private String[] indices = Strings.EMPTY_ARRAY; - private IndicesOptions indicesOptions = IndicesOptions.strictExpand(); + private IndicesOptions indicesOptions = IndicesOptions.strictExpandHidden(); private EnumSet statuses = EnumSet.of(ClusterHealthStatus.YELLOW, ClusterHealthStatus.RED); private int maxConcurrentShardRequests = DEFAULT_MAX_CONCURRENT_SHARD_REQUESTS; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java index e9b27629beebf..fc1ef9e011e62 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java @@ -29,7 +29,7 @@ import java.util.Map; /** - * Response for {@link IndicesShardStoresAction} + * Response for {@link TransportIndicesShardStoresAction} * * Consists of {@link StoreStatus}s for requested indices grouped by * indices and shard ids and a list of encountered node {@link Failure}s diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java index 4f04414cff1ac..7d091d8278ab7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse.Failure; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse.StoreStatus; @@ -62,6 +63,8 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc IndicesShardStoresRequest, IndicesShardStoresResponse> { + public static final ActionType TYPE = ActionType.localOnly("indices:monitor/shard_stores"); + private static final Logger logger = LogManager.getLogger(TransportIndicesShardStoresAction.class); private final NodeClient client; @@ -76,7 +79,7 @@ public TransportIndicesShardStoresAction( NodeClient client ) { super( - IndicesShardStoresAction.NAME, + TYPE.name(), transportService, clusterService, threadPool, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeNumberOfShardsCalculator.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeNumberOfShardsCalculator.java index 31807919fd9d9..076841e3efadc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeNumberOfShardsCalculator.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeNumberOfShardsCalculator.java @@ -69,7 +69,7 @@ public int calculate(Integer numberOfShards, ByteSizeValue maxPrimaryShardSize, } } else if (maxPrimaryShardSize != null) { int sourceIndexShardsNum = sourceMetadata.getNumberOfShards(); - long sourceIndexStorageBytes = indexStoreStats.getSizeInBytes(); + long sourceIndexStorageBytes = indexStoreStats.sizeInBytes(); long maxPrimaryShardSizeBytes = maxPrimaryShardSize.getBytes(); long minShardsNum = sourceIndexStorageBytes / maxPrimaryShardSizeBytes; if (minShardsNum * maxPrimaryShardSizeBytes < sourceIndexStorageBytes) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java index 76500964be750..d0da715b17168 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java @@ -48,7 +48,7 @@ public class CommonStats implements Writeable, ToXContentFragment { private static final TransportVersion VERSION_SUPPORTING_NODE_MAPPINGS = TransportVersions.V_8_5_0; - private static final TransportVersion VERSION_SUPPORTING_DENSE_VECTOR_STATS = TransportVersions.V_8_500_058; + private static final TransportVersion VERSION_SUPPORTING_DENSE_VECTOR_STATS = TransportVersions.V_8_500_061; @Nullable public DocsStats docs; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteComponentTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteComponentTemplateAction.java deleted file mode 100644 index 167739c3e5319..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteComponentTemplateAction.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.template.delete; - -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.action.support.master.MasterNodeRequest; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Objects; - -import static org.elasticsearch.action.ValidateActions.addValidationError; - -public class DeleteComponentTemplateAction extends ActionType { - - public static final DeleteComponentTemplateAction INSTANCE = new DeleteComponentTemplateAction(); - public static final String NAME = "cluster:admin/component_template/delete"; - - private DeleteComponentTemplateAction() { - super(NAME, AcknowledgedResponse::readFrom); - } - - public static class Request extends MasterNodeRequest { - - private final String[] names; - - public Request(StreamInput in) throws IOException { - super(in); - names = in.readStringArray(); - } - - /** - * Constructs a new delete index request for the specified name. - */ - public Request(String... names) { - this.names = Objects.requireNonNull(names, "component templates to delete must not be null"); - } - - @Override - public ActionRequestValidationException validate() { - ActionRequestValidationException validationException = null; - if (Arrays.stream(names).anyMatch(Strings::hasLength) == false) { - validationException = addValidationError("no component template names specified", validationException); - } - return validationException; - } - - /** - * The index template names to delete. - */ - public String[] names() { - return names; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeStringArray(names); - } - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteComposableIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteComposableIndexTemplateAction.java deleted file mode 100644 index 5613db61bb4d4..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteComposableIndexTemplateAction.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.template.delete; - -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.action.support.master.MasterNodeRequest; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Objects; - -import static org.elasticsearch.action.ValidateActions.addValidationError; - -public class DeleteComposableIndexTemplateAction extends ActionType { - - public static final DeleteComposableIndexTemplateAction INSTANCE = new DeleteComposableIndexTemplateAction(); - public static final String NAME = "indices:admin/index_template/delete"; - - private DeleteComposableIndexTemplateAction() { - super(NAME, AcknowledgedResponse::readFrom); - } - - public static class Request extends MasterNodeRequest { - - private final String[] names; - - public Request(StreamInput in) throws IOException { - super(in); - names = in.readStringArray(); - } - - /** - * Constructs a new delete template request for the specified name. - */ - public Request(String... names) { - this.names = Objects.requireNonNull(names, "templates to delete must not be null"); - } - - @Override - public ActionRequestValidationException validate() { - ActionRequestValidationException validationException = null; - if (Arrays.stream(names).anyMatch(Strings::hasLength) == false) { - validationException = addValidationError("no template names specified", validationException); - } - return validationException; - } - - /** - * The index template names to delete. - */ - public String[] names() { - return names; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeStringArray(names); - } - - @Override - public int hashCode() { - return Arrays.hashCode(names); - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - Request other = (Request) obj; - return Arrays.equals(other.names, this.names); - } - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateAction.java deleted file mode 100644 index d55223795e8ce..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateAction.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.template.delete; - -import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.support.master.AcknowledgedResponse; - -public class DeleteIndexTemplateAction extends ActionType { - - public static final DeleteIndexTemplateAction INSTANCE = new DeleteIndexTemplateAction(); - public static final String NAME = "indices:admin/template/delete"; - - private DeleteIndexTemplateAction() { - super(NAME, AcknowledgedResponse::readFrom); - } - -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java index f020474e680d9..a1f5c6cdb4ebb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java @@ -17,7 +17,7 @@ public class DeleteIndexTemplateRequestBuilder extends MasterNodeOperationReques DeleteIndexTemplateRequestBuilder> { public DeleteIndexTemplateRequestBuilder(ElasticsearchClient client, String name) { - super(client, DeleteIndexTemplateAction.INSTANCE, new DeleteIndexTemplateRequest(name)); + super(client, TransportDeleteIndexTemplateAction.TYPE, new DeleteIndexTemplateRequest(name)); } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteComponentTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteComponentTemplateAction.java index e1987e822e4f4..9b6f0ad9a66c1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteComponentTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteComponentTemplateAction.java @@ -9,28 +9,41 @@ package org.elasticsearch.action.admin.indices.template.delete; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.admin.indices.template.reservedstate.ReservedComposableIndexTemplateAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.AcknowledgedTransportMasterNodeAction; +import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetadataIndexTemplateService; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.io.IOException; import java.util.Arrays; +import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; -public class TransportDeleteComponentTemplateAction extends AcknowledgedTransportMasterNodeAction { +import static org.elasticsearch.action.ValidateActions.addValidationError; + +public class TransportDeleteComponentTemplateAction extends AcknowledgedTransportMasterNodeAction< + TransportDeleteComponentTemplateAction.Request> { + + public static final ActionType TYPE = ActionType.localOnly("cluster:admin/component_template/delete"); private final MetadataIndexTemplateService indexTemplateService; @@ -44,12 +57,12 @@ public TransportDeleteComponentTemplateAction( IndexNameExpressionResolver indexNameExpressionResolver ) { super( - DeleteComponentTemplateAction.NAME, + TYPE.name(), transportService, clusterService, threadPool, actionFilters, - DeleteComponentTemplateAction.Request::new, + Request::new, indexNameExpressionResolver, EsExecutors.DIRECT_EXECUTOR_SERVICE ); @@ -57,14 +70,14 @@ public TransportDeleteComponentTemplateAction( } @Override - protected ClusterBlockException checkBlock(DeleteComponentTemplateAction.Request request, ClusterState state) { + protected ClusterBlockException checkBlock(Request request, ClusterState state) { return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); } @Override protected void masterOperation( Task task, - final DeleteComponentTemplateAction.Request request, + final Request request, final ClusterState state, final ActionListener listener ) { @@ -77,9 +90,48 @@ public Optional reservedStateHandlerName() { } @Override - public Set modifiedKeys(DeleteComponentTemplateAction.Request request) { + public Set modifiedKeys(Request request) { return Arrays.stream(request.names()) .map(n -> ReservedComposableIndexTemplateAction.reservedComponentName(n)) .collect(Collectors.toSet()); } + + public static class Request extends MasterNodeRequest { + + private final String[] names; + + public Request(StreamInput in) throws IOException { + super(in); + names = in.readStringArray(); + } + + /** + * Constructs a new delete index request for the specified name. + */ + public Request(String... names) { + this.names = Objects.requireNonNull(names, "component templates to delete must not be null"); + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (Arrays.stream(names).anyMatch(Strings::hasLength) == false) { + validationException = addValidationError("no component template names specified", validationException); + } + return validationException; + } + + /** + * The index template names to delete. + */ + public String[] names() { + return names; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(names); + } + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteComposableIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteComposableIndexTemplateAction.java index 2f3b95ec0c714..5eada4dd6ace9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteComposableIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteComposableIndexTemplateAction.java @@ -9,30 +9,41 @@ package org.elasticsearch.action.admin.indices.template.delete; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.admin.indices.template.reservedstate.ReservedComposableIndexTemplateAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.AcknowledgedTransportMasterNodeAction; +import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetadataIndexTemplateService; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.io.IOException; import java.util.Arrays; +import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; +import static org.elasticsearch.action.ValidateActions.addValidationError; + public class TransportDeleteComposableIndexTemplateAction extends AcknowledgedTransportMasterNodeAction< - DeleteComposableIndexTemplateAction.Request> { + TransportDeleteComposableIndexTemplateAction.Request> { + public static final ActionType TYPE = ActionType.localOnly("indices:admin/index_template/delete"); private final MetadataIndexTemplateService indexTemplateService; @Inject @@ -45,12 +56,12 @@ public TransportDeleteComposableIndexTemplateAction( IndexNameExpressionResolver indexNameExpressionResolver ) { super( - DeleteComposableIndexTemplateAction.NAME, + TYPE.name(), transportService, clusterService, threadPool, actionFilters, - DeleteComposableIndexTemplateAction.Request::new, + Request::new, indexNameExpressionResolver, EsExecutors.DIRECT_EXECUTOR_SERVICE ); @@ -58,14 +69,14 @@ public TransportDeleteComposableIndexTemplateAction( } @Override - protected ClusterBlockException checkBlock(DeleteComposableIndexTemplateAction.Request request, ClusterState state) { + protected ClusterBlockException checkBlock(Request request, ClusterState state) { return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); } @Override protected void masterOperation( Task task, - final DeleteComposableIndexTemplateAction.Request request, + final Request request, final ClusterState state, final ActionListener listener ) { @@ -78,9 +89,65 @@ public Optional reservedStateHandlerName() { } @Override - public Set modifiedKeys(DeleteComposableIndexTemplateAction.Request request) { + public Set modifiedKeys(Request request) { return Arrays.stream(request.names()) .map(n -> ReservedComposableIndexTemplateAction.reservedComposableIndexName(n)) .collect(Collectors.toSet()); } + + public static class Request extends MasterNodeRequest { + + private final String[] names; + + public Request(StreamInput in) throws IOException { + super(in); + names = in.readStringArray(); + } + + /** + * Constructs a new delete template request for the specified name. + */ + public Request(String... names) { + this.names = Objects.requireNonNull(names, "templates to delete must not be null"); + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (Arrays.stream(names).anyMatch(Strings::hasLength) == false) { + validationException = addValidationError("no template names specified", validationException); + } + return validationException; + } + + /** + * The index template names to delete. + */ + public String[] names() { + return names; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(names); + } + + @Override + public int hashCode() { + return Arrays.hashCode(names); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Arrays.equals(other.names, this.names); + } + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java index 56c341fe649cc..066d0999dd81a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.AcknowledgedTransportMasterNodeAction; @@ -30,6 +31,7 @@ */ public class TransportDeleteIndexTemplateAction extends AcknowledgedTransportMasterNodeAction { + public static final ActionType TYPE = ActionType.localOnly("indices:admin/template/delete"); private static final Logger logger = LogManager.getLogger(TransportDeleteIndexTemplateAction.class); private final MetadataIndexTemplateService indexTemplateService; @@ -44,7 +46,7 @@ public TransportDeleteIndexTemplateAction( IndexNameExpressionResolver indexNameExpressionResolver ) { super( - DeleteIndexTemplateAction.NAME, + TYPE.name(), transportService, clusterService, threadPool, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java index c34e8d83e8b80..6b71be3925478 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java @@ -10,7 +10,7 @@ import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.support.master.MasterNodeReadRequest; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -25,7 +25,7 @@ public class SimulateIndexTemplateRequest extends MasterNodeReadRequest { private String templateName; @Nullable - private PutComposableIndexTemplateAction.Request indexTemplateRequest; + private TransportPutComposableIndexTemplateAction.Request indexTemplateRequest; private boolean includeDefaults = false; public Request() {} @@ -52,7 +52,7 @@ public Request(String templateName) { this.templateName = templateName; } - public Request(PutComposableIndexTemplateAction.Request indexTemplateRequest) { + public Request(TransportPutComposableIndexTemplateAction.Request indexTemplateRequest) { if (indexTemplateRequest == null) { throw new IllegalArgumentException("index template body must be present"); } @@ -62,7 +62,7 @@ public Request(PutComposableIndexTemplateAction.Request indexTemplateRequest) { public Request(StreamInput in) throws IOException { super(in); templateName = in.readOptionalString(); - indexTemplateRequest = in.readOptionalWriteable(PutComposableIndexTemplateAction.Request::new); + indexTemplateRequest = in.readOptionalWriteable(TransportPutComposableIndexTemplateAction.Request::new); if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { includeDefaults = in.readBoolean(); } @@ -103,7 +103,7 @@ public boolean includeDefaults() { } @Nullable - public PutComposableIndexTemplateAction.Request getIndexTemplateRequest() { + public TransportPutComposableIndexTemplateAction.Request getIndexTemplateRequest() { return indexTemplateRequest; } @@ -112,7 +112,7 @@ public Request templateName(String templateName) { return this; } - public Request indexTemplateRequest(PutComposableIndexTemplateAction.Request indexTemplateRequest) { + public Request indexTemplateRequest(TransportPutComposableIndexTemplateAction.Request indexTemplateRequest) { this.indexTemplateRequest = indexTemplateRequest; return this; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutComposableIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutComposableIndexTemplateAction.java deleted file mode 100644 index 1ffdf3dcb9812..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutComposableIndexTemplateAction.java +++ /dev/null @@ -1,190 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.template.put; - -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.IndicesRequest; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.action.support.master.MasterNodeRequest; -import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; -import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.regex.Regex; -import org.elasticsearch.core.Nullable; - -import java.io.IOException; -import java.util.Objects; - -import static org.elasticsearch.action.ValidateActions.addValidationError; - -public class PutComposableIndexTemplateAction extends ActionType { - - public static final PutComposableIndexTemplateAction INSTANCE = new PutComposableIndexTemplateAction(); - public static final String NAME = "indices:admin/index_template/put"; - - private PutComposableIndexTemplateAction() { - super(NAME, AcknowledgedResponse::readFrom); - } - - /** - * A request for putting a single index template into the cluster state - */ - public static class Request extends MasterNodeRequest implements IndicesRequest { - private final String name; - @Nullable - private String cause; - private boolean create; - private ComposableIndexTemplate indexTemplate; - - public Request(StreamInput in) throws IOException { - super(in); - this.name = in.readString(); - this.cause = in.readOptionalString(); - this.create = in.readBoolean(); - this.indexTemplate = new ComposableIndexTemplate(in); - } - - /** - * Constructs a new put index template request with the provided name. - */ - public Request(String name) { - this.name = name; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeString(name); - out.writeOptionalString(cause); - out.writeBoolean(create); - this.indexTemplate.writeTo(out); - } - - @Override - public ActionRequestValidationException validate() { - ActionRequestValidationException validationException = null; - if (name == null || Strings.hasText(name) == false) { - validationException = addValidationError("name is missing", validationException); - } - validationException = validateIndexTemplate(validationException); - return validationException; - } - - public ActionRequestValidationException validateIndexTemplate(@Nullable ActionRequestValidationException validationException) { - if (indexTemplate == null) { - validationException = addValidationError("an index template is required", validationException); - } else { - if (indexTemplate.template() != null && indexTemplate.indexPatterns().stream().anyMatch(Regex::isMatchAllPattern)) { - if (indexTemplate.template().settings() != null - && IndexMetadata.INDEX_HIDDEN_SETTING.exists(indexTemplate.template().settings())) { - validationException = addValidationError( - "global composable templates may not specify the setting " + IndexMetadata.INDEX_HIDDEN_SETTING.getKey(), - validationException - ); - } - } - if (indexTemplate.priority() != null && indexTemplate.priority() < 0) { - validationException = addValidationError("index template priority must be >= 0", validationException); - } - } - return validationException; - } - - /** - * The name of the index template. - */ - public String name() { - return this.name; - } - - /** - * Set to {@code true} to force only creation, not an update of an index template. If it already - * exists, it will fail with an {@link IllegalArgumentException}. - */ - public Request create(boolean create) { - this.create = create; - return this; - } - - public boolean create() { - return create; - } - - /** - * The cause for this index template creation. - */ - public Request cause(@Nullable String cause) { - this.cause = cause; - return this; - } - - @Nullable - public String cause() { - return this.cause; - } - - /** - * The index template that will be inserted into the cluster state - */ - public Request indexTemplate(ComposableIndexTemplate template) { - this.indexTemplate = template; - return this; - } - - public ComposableIndexTemplate indexTemplate() { - return this.indexTemplate; - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("PutTemplateV2Request["); - sb.append("name=").append(name); - sb.append(", cause=").append(cause); - sb.append(", create=").append(create); - sb.append(", index_template=").append(indexTemplate); - sb.append("]"); - return sb.toString(); - } - - @Override - public String[] indices() { - return indexTemplate.indexPatterns().toArray(Strings.EMPTY_ARRAY); - } - - @Override - public IndicesOptions indicesOptions() { - return IndicesOptions.strictExpand(); - } - - @Override - public int hashCode() { - return Objects.hash(name, cause, create, indexTemplate); - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - Request other = (Request) obj; - return Objects.equals(this.name, other.name) - && Objects.equals(this.cause, other.cause) - && Objects.equals(this.indexTemplate, other.indexTemplate) - && this.create == other.create; - } - } - -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateAction.java deleted file mode 100644 index 3b8d52089d616..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateAction.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.template.put; - -import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.support.master.AcknowledgedResponse; - -public class PutIndexTemplateAction extends ActionType { - - public static final PutIndexTemplateAction INSTANCE = new PutIndexTemplateAction(); - public static final String NAME = "indices:admin/template/put"; - - private PutIndexTemplateAction() { - super(NAME, AcknowledgedResponse::readFrom); - } - -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java index 45af625cf2f65..d0b93d28ad157 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java @@ -25,7 +25,7 @@ public class PutIndexTemplateRequestBuilder extends MasterNodeOperationRequestBu PutIndexTemplateRequestBuilder> { public PutIndexTemplateRequestBuilder(ElasticsearchClient client, String name) { - super(client, PutIndexTemplateAction.INSTANCE, new PutIndexTemplateRequest(name)); + super(client, TransportPutIndexTemplateAction.TYPE, new PutIndexTemplateRequest(name)); } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutComposableIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutComposableIndexTemplateAction.java index 541aa43c72490..9155eac703632 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutComposableIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutComposableIndexTemplateAction.java @@ -9,35 +9,50 @@ package org.elasticsearch.action.admin.indices.template.put; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.admin.indices.template.reservedstate.ReservedComposableIndexTemplateAction; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.AcknowledgedTransportMasterNodeAction; +import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; +import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetadataIndexTemplateService; import org.elasticsearch.cluster.metadata.ReservedStateMetadata; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.Nullable; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; +import static org.elasticsearch.action.ValidateActions.addValidationError; import static org.elasticsearch.core.Strings.format; public class TransportPutComposableIndexTemplateAction extends AcknowledgedTransportMasterNodeAction< - PutComposableIndexTemplateAction.Request> { + TransportPutComposableIndexTemplateAction.Request> { + public static final ActionType TYPE = ActionType.localOnly("indices:admin/index_template/put"); private final MetadataIndexTemplateService indexTemplateService; @Inject @@ -50,12 +65,12 @@ public TransportPutComposableIndexTemplateAction( IndexNameExpressionResolver indexNameExpressionResolver ) { super( - PutComposableIndexTemplateAction.NAME, + TYPE.name(), transportService, clusterService, threadPool, actionFilters, - PutComposableIndexTemplateAction.Request::new, + Request::new, indexNameExpressionResolver, EsExecutors.DIRECT_EXECUTOR_SERVICE ); @@ -63,14 +78,14 @@ public TransportPutComposableIndexTemplateAction( } @Override - protected ClusterBlockException checkBlock(PutComposableIndexTemplateAction.Request request, ClusterState state) { + protected ClusterBlockException checkBlock(Request request, ClusterState state) { return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); } @Override protected void masterOperation( Task task, - final PutComposableIndexTemplateAction.Request request, + final Request request, final ClusterState state, final ActionListener listener ) { @@ -86,10 +101,7 @@ protected void masterOperation( ); } - public static void verifyIfUsingReservedComponentTemplates( - final PutComposableIndexTemplateAction.Request request, - final ClusterState state - ) { + public static void verifyIfUsingReservedComponentTemplates(final Request request, final ClusterState state) { ComposableIndexTemplate indexTemplate = request.indexTemplate(); Set composedOfKeys = indexTemplate.composedOf() .stream() @@ -118,7 +130,158 @@ public Optional reservedStateHandlerName() { } @Override - public Set modifiedKeys(PutComposableIndexTemplateAction.Request request) { + public Set modifiedKeys(Request request) { return Set.of(ReservedComposableIndexTemplateAction.reservedComposableIndexName(request.name())); } + + /** + * A request for putting a single index template into the cluster state + */ + public static class Request extends MasterNodeRequest implements IndicesRequest { + private final String name; + @Nullable + private String cause; + private boolean create; + private ComposableIndexTemplate indexTemplate; + + public Request(StreamInput in) throws IOException { + super(in); + this.name = in.readString(); + this.cause = in.readOptionalString(); + this.create = in.readBoolean(); + this.indexTemplate = new ComposableIndexTemplate(in); + } + + /** + * Constructs a new put index template request with the provided name. + */ + public Request(String name) { + this.name = name; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(name); + out.writeOptionalString(cause); + out.writeBoolean(create); + this.indexTemplate.writeTo(out); + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (name == null || Strings.hasText(name) == false) { + validationException = addValidationError("name is missing", validationException); + } + validationException = validateIndexTemplate(validationException); + return validationException; + } + + public ActionRequestValidationException validateIndexTemplate(@Nullable ActionRequestValidationException validationException) { + if (indexTemplate == null) { + validationException = addValidationError("an index template is required", validationException); + } else { + if (indexTemplate.template() != null && indexTemplate.indexPatterns().stream().anyMatch(Regex::isMatchAllPattern)) { + if (indexTemplate.template().settings() != null + && IndexMetadata.INDEX_HIDDEN_SETTING.exists(indexTemplate.template().settings())) { + validationException = addValidationError( + "global composable templates may not specify the setting " + IndexMetadata.INDEX_HIDDEN_SETTING.getKey(), + validationException + ); + } + } + if (indexTemplate.priority() != null && indexTemplate.priority() < 0) { + validationException = addValidationError("index template priority must be >= 0", validationException); + } + } + return validationException; + } + + /** + * The name of the index template. + */ + public String name() { + return this.name; + } + + /** + * Set to {@code true} to force only creation, not an update of an index template. If it already + * exists, it will fail with an {@link IllegalArgumentException}. + */ + public Request create(boolean create) { + this.create = create; + return this; + } + + public boolean create() { + return create; + } + + /** + * The cause for this index template creation. + */ + public Request cause(@Nullable String cause) { + this.cause = cause; + return this; + } + + @Nullable + public String cause() { + return this.cause; + } + + /** + * The index template that will be inserted into the cluster state + */ + public Request indexTemplate(ComposableIndexTemplate template) { + this.indexTemplate = template; + return this; + } + + public ComposableIndexTemplate indexTemplate() { + return this.indexTemplate; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("PutTemplateV2Request["); + sb.append("name=").append(name); + sb.append(", cause=").append(cause); + sb.append(", create=").append(create); + sb.append(", index_template=").append(indexTemplate); + sb.append("]"); + return sb.toString(); + } + + @Override + public String[] indices() { + return indexTemplate.indexPatterns().toArray(Strings.EMPTY_ARRAY); + } + + @Override + public IndicesOptions indicesOptions() { + return IndicesOptions.strictExpand(); + } + + @Override + public int hashCode() { + return Objects.hash(name, cause, create, indexTemplate); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(this.name, other.name) + && Objects.equals(this.cause, other.cause) + && Objects.equals(this.indexTemplate, other.indexTemplate) + && this.create == other.create; + } + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java index 73f3f10680784..d1d701e63675f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.AcknowledgedTransportMasterNodeAction; @@ -36,6 +37,7 @@ */ public class TransportPutIndexTemplateAction extends AcknowledgedTransportMasterNodeAction { + public static final ActionType TYPE = ActionType.localOnly("indices:admin/template/put"); private static final Logger logger = LogManager.getLogger(TransportPutIndexTemplateAction.class); private final MetadataIndexTemplateService indexTemplateService; @@ -52,7 +54,7 @@ public TransportPutIndexTemplateAction( IndexScopedSettings indexScopedSettings ) { super( - PutIndexTemplateAction.NAME, + TYPE.name(), transportService, clusterService, threadPool, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/reservedstate/ReservedComposableIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/reservedstate/ReservedComposableIndexTemplateAction.java index dbb6b5994867f..abd2c9f6872e8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/reservedstate/ReservedComposableIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/reservedstate/ReservedComposableIndexTemplateAction.java @@ -9,8 +9,8 @@ package org.elasticsearch.action.admin.indices.template.reservedstate; import org.elasticsearch.action.admin.indices.template.put.PutComponentTemplateAction; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.put.TransportPutComponentTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.ComponentTemplate; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; @@ -197,7 +197,7 @@ public TransformState transform(Object source, TransformState prevState) throws @Override public ComponentsAndComposables fromXContent(XContentParser parser) throws IOException { List componentTemplates = new ArrayList<>(); - List composableTemplates = new ArrayList<>(); + List composableTemplates = new ArrayList<>(); Map source = parser.map(); @SuppressWarnings("unchecked") @@ -223,7 +223,7 @@ public ComponentsAndComposables fromXContent(XContentParser parser) throws IOExc @SuppressWarnings("unchecked") Map content = (Map) entry.getValue(); try (XContentParser componentParser = mapToXContentParser(XContentParserConfiguration.EMPTY, content)) { - var composableTemplate = new PutComposableIndexTemplateAction.Request(entry.getKey()); + var composableTemplate = new TransportPutComposableIndexTemplateAction.Request(entry.getKey()); composableTemplate.indexTemplate(ComposableIndexTemplate.parse(componentParser)); composableTemplates.add(composableTemplate); } @@ -235,6 +235,6 @@ public ComponentsAndComposables fromXContent(XContentParser parser) throws IOExc record ComponentsAndComposables( List componentTemplates, - List composableTemplates + List composableTemplates ) {} } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkPrimaryExecutionContext.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkPrimaryExecutionContext.java index 4291ba5895beb..78df7fdc25542 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkPrimaryExecutionContext.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkPrimaryExecutionContext.java @@ -61,6 +61,7 @@ enum ItemProcessingState { private DocWriteRequest requestToExecute; private BulkItemResponse executionResult; private int updateRetryCounter; + private long noopMappingUpdateRetryForMappingVersion; BulkPrimaryExecutionContext(BulkShardRequest request, IndexShard primary) { this.request = request; @@ -89,6 +90,7 @@ private void advance() { updateRetryCounter = 0; requestToExecute = null; executionResult = null; + noopMappingUpdateRetryForMappingVersion = -1; assert assertInvariants(ItemProcessingState.INITIAL); } @@ -191,12 +193,39 @@ public void resetForMappingUpdateRetry() { resetForExecutionRetry(); } + /** + * Don't bother the master node if the mapping update is a noop. + * This may happen if there was a concurrent mapping update that added the same field. + * + * @param mappingVersion the current mapping version. This is used to guard against infinite loops. + * @throws IllegalStateException if retried multiple times with the same mapping version, to guard against infinite loops. + */ + public void resetForNoopMappingUpdateRetry(long mappingVersion) { + assert assertInvariants(ItemProcessingState.TRANSLATED); + if (noopMappingUpdateRetryForMappingVersion == mappingVersion) { + // this should never happen, if we end up here, there's probably a bug + // seems like we're in a live lock/infinite loop here + // we've already re-tried and are about to retry again + // as no state has changed in the meantime (the mapping version is still the same), + // we can't expect another retry would yield a different result + // a possible cause: + // maybe we added more dynamic mappers in DocumentParserContext.addDynamicMapper than possible according to the field limit + // the additional fields are then ignored by the mapping merge and the process repeats + String message = "On retry, this indexing request resulted in another noop mapping update. " + + "Failing the indexing operation to prevent an infinite retry loop."; + assert false : message; + throw new IllegalStateException(message); + } + resetForExecutionRetry(); + noopMappingUpdateRetryForMappingVersion = mappingVersion; + } + /** resets the current item state, prepare for a new execution */ private void resetForExecutionRetry() { - assert assertInvariants(ItemProcessingState.WAIT_FOR_MAPPING_UPDATE, ItemProcessingState.EXECUTED); currentItemState = ItemProcessingState.INITIAL; requestToExecute = null; executionResult = null; + noopMappingUpdateRetryForMappingVersion = -1; assert assertInvariants(ItemProcessingState.INITIAL); } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 39b45f92dc6da..75b45cecfadf2 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -25,6 +25,9 @@ import org.elasticsearch.action.admin.indices.create.AutoCreateAction; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; +import org.elasticsearch.action.admin.indices.rollover.RolloverAction; +import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; +import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.inference.InferenceAction; import org.elasticsearch.action.ingest.IngestActionForwarder; @@ -355,7 +358,7 @@ protected void doInternalExecute(Task task, BulkRequest bulkRequest, String exec // Step 1: collect all the indices in the request final Map indices = bulkRequest.requests.stream() // delete requests should not attempt to create the index (if the index does not - // exists), unless an external versioning is used + // exist), unless an external versioning is used .filter( request -> request.opType() != DocWriteRequest.OpType.DELETE || request.versionType() == VersionType.EXTERNAL @@ -376,20 +379,28 @@ protected void doInternalExecute(Task task, BulkRequest bulkRequest, String exec } } - // Step 3: create all the indices that are missing, if there are any missing. start the bulk after all the creates come back. + // Step 3: Collect all the data streams that need to be rolled over before writing + Set dataStreamsToBeRolledOver = indices.keySet().stream().filter(target -> { + DataStream dataStream = state.metadata().dataStreams().get(target); + return dataStream != null && dataStream.rolloverOnWrite(); + }).collect(Collectors.toSet()); + + // Step 4: create all the indices that are missing, if there are any missing. start the bulk after all the creates come back. createMissingIndicesAndIndexData( task, bulkRequest, executorName, listener, autoCreateIndices, + dataStreamsToBeRolledOver, indicesThatCannotBeCreated, startTime ); } /* - * This method is responsible for creating any missing indices and indexing the data in the BulkRequest + * This method is responsible for creating any missing indices, rolling over a data stream when needed and then + * indexing the data in the BulkRequest */ protected void createMissingIndicesAndIndexData( Task task, @@ -397,22 +408,27 @@ protected void createMissingIndicesAndIndexData( String executorName, ActionListener listener, Set autoCreateIndices, + Set dataStreamsToBeRolledOver, Map indicesThatCannotBeCreated, long startTime ) { final AtomicArray responses = new AtomicArray<>(bulkRequest.requests.size()); - if (autoCreateIndices.isEmpty()) { + // Optimizing when there are no prerequisite actions + if (autoCreateIndices.isEmpty() && dataStreamsToBeRolledOver.isEmpty()) { executeBulk(task, bulkRequest, startTime, listener, executorName, responses, indicesThatCannotBeCreated); - } else { - final AtomicInteger counter = new AtomicInteger(autoCreateIndices.size()); + return; + } + Runnable executeBulkRunnable = () -> threadPool.executor(executorName).execute(new ActionRunnable<>(listener) { + @Override + protected void doRun() { + executeBulk(task, bulkRequest, startTime, listener, executorName, responses, indicesThatCannotBeCreated); + } + }); + try (RefCountingRunnable refs = new RefCountingRunnable(executeBulkRunnable)) { for (String index : autoCreateIndices) { - createIndex(index, bulkRequest.timeout(), new ActionListener<>() { + createIndex(index, bulkRequest.timeout(), ActionListener.releaseAfter(new ActionListener<>() { @Override - public void onResponse(CreateIndexResponse result) { - if (counter.decrementAndGet() == 0) { - forkExecuteBulk(listener); - } - } + public void onResponse(CreateIndexResponse createIndexResponse) {} @Override public void onFailure(Exception e) { @@ -423,30 +439,47 @@ public void onFailure(Exception e) { } } else if ((cause instanceof ResourceAlreadyExistsException) == false) { // fail all requests involving this index, if create didn't work - for (int i = 0; i < bulkRequest.requests.size(); i++) { - DocWriteRequest request = bulkRequest.requests.get(i); - if (request != null && setResponseFailureIfIndexMatches(responses, i, request, index, e)) { - bulkRequest.requests.set(i, null); - } - } - } - if (counter.decrementAndGet() == 0) { - forkExecuteBulk(ActionListener.wrap(listener::onResponse, inner -> { - inner.addSuppressed(e); - listener.onFailure(inner); - })); + failRequestsWhenPrerequisiteActionFailed(index, bulkRequest, responses, e); } } + }, refs.acquire())); + } + for (String dataStream : dataStreamsToBeRolledOver) { + rolloverDataStream(dataStream, bulkRequest.timeout(), ActionListener.releaseAfter(new ActionListener<>() { - private void forkExecuteBulk(ActionListener finalListener) { - threadPool.executor(executorName).execute(new ActionRunnable<>(finalListener) { - @Override - protected void doRun() { - executeBulk(task, bulkRequest, startTime, listener, executorName, responses, indicesThatCannotBeCreated); - } - }); + @Override + public void onResponse(RolloverResponse result) { + // A successful response has rolled_over false when in the following cases: + // - A request had the parameter lazy or dry_run enabled + // - A request had conditions that were not met + // Since none of the above apply, getting a response with rolled_over false is considered a bug + // that should be caught here and inform the developer. + assert result.isRolledOver() + : "An successful unconditional rollover should always result in a rolled over data stream"; } - }); + + @Override + public void onFailure(Exception e) { + failRequestsWhenPrerequisiteActionFailed(dataStream, bulkRequest, responses, e); + } + }, refs.acquire())); + } + } + } + + /** + * Fails all requests involving this index or data stream because the prerequisite action failed too. + */ + private static void failRequestsWhenPrerequisiteActionFailed( + String target, + BulkRequest bulkRequest, + AtomicArray responses, + Exception error + ) { + for (int i = 0; i < bulkRequest.requests.size(); i++) { + DocWriteRequest request = bulkRequest.requests.get(i); + if (request != null && setResponseFailureIfIndexMatches(responses, i, request, target, error)) { + bulkRequest.requests.set(i, null); } } } @@ -549,6 +582,12 @@ void createIndex(String index, TimeValue timeout, ActionListener listener) { + RolloverRequest rolloverRequest = new RolloverRequest(dataStream, null); + rolloverRequest.masterNodeTimeout(timeout); + client.execute(RolloverAction.INSTANCE, rolloverRequest, listener); + } + private static boolean setResponseFailureIfIndexMatches( AtomicArray responses, int idx, diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 9266ee3ee0b68..e6d5bdcc46696 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -47,6 +47,7 @@ import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.get.GetResult; +import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperException; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.SourceToParse; @@ -64,6 +65,7 @@ import java.io.IOException; import java.util.Map; +import java.util.Optional; import java.util.concurrent.Executor; import java.util.function.Consumer; import java.util.function.LongSupplier; @@ -370,12 +372,21 @@ static boolean executeBulkItemRequest( if (result.getResultType() == Engine.Result.Type.MAPPING_UPDATE_REQUIRED) { try { - primary.mapperService() - .merge( - MapperService.SINGLE_MAPPING_NAME, - new CompressedXContent(result.getRequiredMappingUpdate()), - MapperService.MergeReason.MAPPING_UPDATE_PREFLIGHT - ); + Optional mergedSource = Optional.ofNullable( + primary.mapperService() + .merge( + MapperService.SINGLE_MAPPING_NAME, + new CompressedXContent(result.getRequiredMappingUpdate()), + MapperService.MergeReason.MAPPING_UPDATE_PREFLIGHT + ) + ).map(DocumentMapper::mappingSource); + Optional previousSource = Optional.ofNullable(primary.mapperService().documentMapper()) + .map(DocumentMapper::mappingSource); + + if (mergedSource.equals(previousSource)) { + context.resetForNoopMappingUpdateRetry(primary.mapperService().mappingVersion()); + return true; + } } catch (Exception e) { logger.info(() -> format("%s mapping update rejected by primary", primary.shardId()), e); assert result.getId() != null; diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java index 7e2fef88c7680..a44c8091aaa2e 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java @@ -70,6 +70,7 @@ protected void createMissingIndicesAndIndexData( String executorName, ActionListener listener, Set autoCreateIndices, + Set dataStreamsToRollover, Map indicesThatCannotBeCreated, long startTime ) { diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java index 7530fc18acb59..073ac021f787a 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java @@ -174,6 +174,7 @@ public static class DataStreamInfo implements SimpleDiffable, To public static final ParseField SYSTEM_FIELD = new ParseField("system"); public static final ParseField ALLOW_CUSTOM_ROUTING = new ParseField("allow_custom_routing"); public static final ParseField REPLICATED = new ParseField("replicated"); + public static final ParseField ROLLOVER_ON_WRITE = new ParseField("rollover_on_write"); public static final ParseField TIME_SERIES = new ParseField("time_series"); public static final ParseField TEMPORAL_RANGES = new ParseField("temporal_ranges"); public static final ParseField TEMPORAL_RANGE_START = new ParseField("start"); @@ -345,6 +346,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params, @Nulla builder.field(SYSTEM_FIELD.getPreferredName(), dataStream.isSystem()); builder.field(ALLOW_CUSTOM_ROUTING.getPreferredName(), dataStream.isAllowCustomRouting()); builder.field(REPLICATED.getPreferredName(), dataStream.isReplicated()); + builder.field(ROLLOVER_ON_WRITE.getPreferredName(), dataStream.rolloverOnWrite()); if (DataStream.isFailureStoreEnabled()) { builder.field(DataStream.FAILURE_STORE_FIELD.getPreferredName(), dataStream.isFailureStore()); } diff --git a/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java b/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java index a2f4d6408a3a4..61c979f9494b5 100644 --- a/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java +++ b/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java @@ -62,7 +62,7 @@ public Request(StreamInput in) throws IOException { super(in); sourceIndex = in.readString(); targetIndex = in.readString(); - waitTimeout = in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_054) + waitTimeout = in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061) ? TimeValue.parseTimeValue(in.readString(), "timeout") : DEFAULT_WAIT_TIMEOUT; downsampleConfig = new DownsampleConfig(in); @@ -89,7 +89,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(sourceIndex); out.writeString(targetIndex); out.writeString( - out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_054) + out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061) ? waitTimeout.getStringRep() : DEFAULT_WAIT_TIMEOUT.getStringRep() ); diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesNodeRequest.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesNodeRequest.java index ba238638efba4..87d09acfe3a42 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesNodeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesNodeRequest.java @@ -54,7 +54,7 @@ class FieldCapabilitiesNodeRequest extends ActionRequest implements IndicesReque originalIndices = OriginalIndices.readOriginalIndices(in); indexFilter = in.readOptionalNamedWriteable(QueryBuilder.class); nowInMillis = in.readLong(); - runtimeFields = in.readMap(); + runtimeFields = in.readGenericMap(); } FieldCapabilitiesNodeRequest( diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java index 95555cfd59ab2..0bb7833911993 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java @@ -57,7 +57,7 @@ public FieldCapabilitiesRequest(StreamInput in) throws IOException { includeUnmapped = in.readBoolean(); indexFilter = in.readOptionalNamedWriteable(QueryBuilder.class); nowInMillis = in.readOptionalLong(); - runtimeFields = in.readMap(); + runtimeFields = in.readGenericMap(); if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_2_0)) { filters = in.readStringArray(); types = in.readStringArray(); diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java index 8b5e077fd85b8..12f7c21cba8e1 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -29,12 +29,14 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.StringLiteralDeduplicator; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.Index; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.ingest.IngestService; import org.elasticsearch.plugins.internal.DocumentParsingObserver; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; @@ -71,7 +73,7 @@ public class IndexRequest extends ReplicatedWriteRequest implements DocWriteRequest, CompositeIndicesRequest { private static final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(IndexRequest.class); - private static final TransportVersion PIPELINES_HAVE_RUN_FIELD_ADDED = TransportVersions.V_8_500_049; + private static final TransportVersion PIPELINES_HAVE_RUN_FIELD_ADDED = TransportVersions.V_8_500_061; /** * Max length of the source document to include into string() @@ -153,11 +155,9 @@ public IndexRequest(@Nullable ShardId shardId, StreamInput in) throws IOExceptio opType = OpType.fromId(in.readByte()); version = in.readLong(); versionType = VersionType.fromValue(in.readByte()); - pipeline = in.readOptionalString(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_5_0)) { - finalPipeline = in.readOptionalString(); - } + pipeline = readPipelineName(in); if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_5_0)) { + finalPipeline = readPipelineName(in); isPipelineResolved = in.readBoolean(); } isRetry = in.readBoolean(); @@ -204,6 +204,22 @@ public IndexRequest(String index) { this.index = index; } + private static final StringLiteralDeduplicator pipelineNameDeduplicator = new StringLiteralDeduplicator(); + + // reads pipeline name from the stream and deduplicates it to save heap on large bulk requests + @Nullable + private static String readPipelineName(StreamInput in) throws IOException { + final String read = in.readOptionalString(); + if (read == null) { + return null; + } + if (IngestService.NOOP_PIPELINE_NAME.equals(read)) { + // common path of no pipeline set + return IngestService.NOOP_PIPELINE_NAME; + } + return pipelineNameDeduplicator.deduplicate(read); + } + @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = super.validate(); diff --git a/server/src/main/java/org/elasticsearch/action/inference/InferenceAction.java b/server/src/main/java/org/elasticsearch/action/inference/InferenceAction.java index 7d472b0d47813..f8a0291cef204 100644 --- a/server/src/main/java/org/elasticsearch/action/inference/InferenceAction.java +++ b/server/src/main/java/org/elasticsearch/action/inference/InferenceAction.java @@ -84,7 +84,7 @@ public Request(StreamInput in) throws IOException { } else { this.input = List.of(in.readString()); } - this.taskSettings = in.readMap(); + this.taskSettings = in.readGenericMap(); } public TaskType getTaskType() { diff --git a/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineAction.java b/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineAction.java deleted file mode 100644 index d6adfeb4da1d6..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineAction.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.ingest; - -import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.support.master.AcknowledgedResponse; - -public class DeletePipelineAction extends ActionType { - - public static final DeletePipelineAction INSTANCE = new DeletePipelineAction(); - public static final String NAME = "cluster:admin/ingest/pipeline/delete"; - - public DeletePipelineAction() { - super(NAME, AcknowledgedResponse::readFrom); - } - -} diff --git a/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineRequestBuilder.java index e953bf7d4ec6b..ef08f64765f98 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineRequestBuilder.java @@ -15,7 +15,7 @@ public class DeletePipelineRequestBuilder extends ActionRequestBuilder { public DeletePipelineRequestBuilder(ElasticsearchClient client, String id) { - super(client, DeletePipelineAction.INSTANCE, new DeletePipelineRequest(id)); + super(client, DeletePipelineTransportAction.TYPE, new DeletePipelineRequest(id)); } /** diff --git a/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineTransportAction.java b/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineTransportAction.java index c8a8e175c69a9..6878096e38614 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineTransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineTransportAction.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.ingest; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.AcknowledgedTransportMasterNodeAction; @@ -28,6 +29,7 @@ public class DeletePipelineTransportAction extends AcknowledgedTransportMasterNodeAction { + public static final ActionType TYPE = ActionType.localOnly("cluster:admin/ingest/pipeline/delete"); private final IngestService ingestService; @Inject @@ -39,7 +41,7 @@ public DeletePipelineTransportAction( IndexNameExpressionResolver indexNameExpressionResolver ) { super( - DeletePipelineAction.NAME, + TYPE.name(), transportService, ingestService.getClusterService(), threadPool, diff --git a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineAction.java b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineAction.java deleted file mode 100644 index 70db9a2e7b2ce..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineAction.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.ingest; - -import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.support.master.AcknowledgedResponse; - -public class PutPipelineAction extends ActionType { - - public static final PutPipelineAction INSTANCE = new PutPipelineAction(); - public static final String NAME = "cluster:admin/ingest/pipeline/put"; - - public PutPipelineAction() { - super(NAME, AcknowledgedResponse::readFrom); - } - -} diff --git a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequestBuilder.java index 13fac13ef5437..2fce285d83f06 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequestBuilder.java @@ -17,6 +17,6 @@ public class PutPipelineRequestBuilder extends ActionRequestBuilder { public PutPipelineRequestBuilder(ElasticsearchClient client, String id, BytesReference source, XContentType xContentType) { - super(client, PutPipelineAction.INSTANCE, new PutPipelineRequest(id, source, xContentType)); + super(client, PutPipelineTransportAction.TYPE, new PutPipelineRequest(id, source, xContentType)); } } diff --git a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java index b1e2533d1038d..5233961cdda7b 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.ingest; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoMetrics; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; import org.elasticsearch.action.support.ActionFilters; @@ -33,6 +34,7 @@ import static org.elasticsearch.ingest.IngestService.INGEST_ORIGIN; public class PutPipelineTransportAction extends AcknowledgedTransportMasterNodeAction { + public static final ActionType TYPE = ActionType.localOnly("cluster:admin/ingest/pipeline/put"); private final IngestService ingestService; private final OriginSettingClient client; @@ -46,7 +48,7 @@ public PutPipelineTransportAction( NodeClient client ) { super( - PutPipelineAction.NAME, + TYPE.name(), transportService, ingestService.getClusterService(), threadPool, diff --git a/server/src/main/java/org/elasticsearch/action/ingest/WriteableIngestDocument.java b/server/src/main/java/org/elasticsearch/action/ingest/WriteableIngestDocument.java index a77d0bf7e2b01..a8fb51b30713c 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/WriteableIngestDocument.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/WriteableIngestDocument.java @@ -103,7 +103,7 @@ private WriteableIngestDocument(Map sourceAndMetadata, Map(0) ); return; @@ -655,7 +654,7 @@ public boolean isPartOfPointInTime(ShardSearchContextId contextId) { } private SearchResponse buildSearchResponse( - InternalSearchResponse internalSearchResponse, + SearchResponseSections internalSearchResponse, ShardSearchFailure[] failures, String scrollId, String searchContextId @@ -682,7 +681,7 @@ boolean buildPointInTimeFromSearchResults() { } @Override - public void sendSearchResponse(InternalSearchResponse internalSearchResponse, AtomicArray queryResults) { + public void sendSearchResponse(SearchResponseSections internalSearchResponse, AtomicArray queryResults) { ShardSearchFailure[] failures = buildShardFailures(); Boolean allowPartialResults = request.allowPartialSearchResults(); assert allowPartialResults != null : "SearchRequest missing setting for allowPartialSearchResults"; diff --git a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java index 2df8b60cd9728..00e2b41fde3da 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java @@ -18,7 +18,6 @@ import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.collapse.CollapseBuilder; -import org.elasticsearch.search.internal.InternalSearchResponse; import java.util.Iterator; import java.util.List; @@ -31,13 +30,13 @@ */ final class ExpandSearchPhase extends SearchPhase { private final SearchPhaseContext context; - private final InternalSearchResponse searchResponse; + private final SearchHits searchHits; private final Supplier nextPhase; - ExpandSearchPhase(SearchPhaseContext context, InternalSearchResponse searchResponse, Supplier nextPhase) { + ExpandSearchPhase(SearchPhaseContext context, SearchHits searchHits, Supplier nextPhase) { super("expand"); this.context = context; - this.searchResponse = searchResponse; + this.searchHits = searchHits; this.nextPhase = nextPhase; } @@ -53,7 +52,7 @@ private boolean isCollapseRequest() { @Override public void run() { - if (isCollapseRequest() && searchResponse.hits().getHits().length > 0) { + if (isCollapseRequest() && searchHits.getHits().length > 0) { SearchRequest searchRequest = context.getRequest(); CollapseBuilder collapseBuilder = searchRequest.source().collapse(); final List innerHitBuilders = collapseBuilder.getInnerHits(); @@ -61,7 +60,7 @@ public void run() { if (collapseBuilder.getMaxConcurrentGroupRequests() > 0) { multiRequest.maxConcurrentSearchRequests(collapseBuilder.getMaxConcurrentGroupRequests()); } - for (SearchHit hit : searchResponse.hits().getHits()) { + for (SearchHit hit : searchHits.getHits()) { BoolQueryBuilder groupQuery = new BoolQueryBuilder(); Object collapseValue = hit.field(collapseBuilder.getField()).getValue(); if (collapseValue != null) { @@ -85,7 +84,7 @@ public void run() { } context.getSearchTransport().sendExecuteMultiSearch(multiRequest, context.getTask(), ActionListener.wrap(response -> { Iterator it = response.iterator(); - for (SearchHit hit : searchResponse.hits.getHits()) { + for (SearchHit hit : searchHits.getHits()) { for (InnerHitBuilder innerHitBuilder : innerHitBuilders) { MultiSearchResponse.Item item = it.next(); if (item.isFailure()) { diff --git a/server/src/main/java/org/elasticsearch/action/search/FetchLookupFieldsPhase.java b/server/src/main/java/org/elasticsearch/action/search/FetchLookupFieldsPhase.java index 9f1da9a7e2b03..9c50d534ac4ce 100644 --- a/server/src/main/java/org/elasticsearch/action/search/FetchLookupFieldsPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/FetchLookupFieldsPhase.java @@ -15,9 +15,9 @@ import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.fetch.subphase.LookupField; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.transport.RemoteClusterService; import java.util.ArrayList; @@ -33,10 +33,10 @@ */ final class FetchLookupFieldsPhase extends SearchPhase { private final SearchPhaseContext context; - private final InternalSearchResponse searchResponse; + private final SearchResponseSections searchResponse; private final AtomicArray queryResults; - FetchLookupFieldsPhase(SearchPhaseContext context, InternalSearchResponse searchResponse, AtomicArray queryResults) { + FetchLookupFieldsPhase(SearchPhaseContext context, SearchResponseSections searchResponse, AtomicArray queryResults) { super("fetch_lookup_fields"); this.context = context; this.searchResponse = searchResponse; @@ -47,9 +47,9 @@ private record Cluster(String clusterAlias, List hitsWithLookupFields } - private static List groupLookupFieldsByClusterAlias(InternalSearchResponse response) { + private static List groupLookupFieldsByClusterAlias(SearchHits searchHits) { final Map> perClusters = new HashMap<>(); - for (SearchHit hit : response.hits.getHits()) { + for (SearchHit hit : searchHits.getHits()) { String clusterAlias = hit.getClusterAlias() != null ? hit.getClusterAlias() : RemoteClusterService.LOCAL_CLUSTER_GROUP_KEY; if (hit.hasLookupFields()) { perClusters.computeIfAbsent(clusterAlias, k -> new ArrayList<>()).add(hit); @@ -70,7 +70,7 @@ private static List groupLookupFieldsByClusterAlias(InternalSearchRespo @Override public void run() { - final List clusters = groupLookupFieldsByClusterAlias(searchResponse); + final List clusters = groupLookupFieldsByClusterAlias(searchResponse.hits); if (clusters.isEmpty()) { context.sendSearchResponse(searchResponse, queryResults); return; diff --git a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java index e8d3ded154f55..11528f8e1521f 100644 --- a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java @@ -16,7 +16,6 @@ import org.elasticsearch.search.dfs.AggregatedDfs; import org.elasticsearch.search.fetch.FetchSearchResult; import org.elasticsearch.search.fetch.ShardFetchSearchRequest; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.transport.Transport; @@ -31,7 +30,7 @@ final class FetchSearchPhase extends SearchPhase { private final ArraySearchPhaseResults fetchResults; private final AtomicArray queryResults; - private final BiFunction, SearchPhase> nextPhaseFactory; + private final BiFunction, SearchPhase> nextPhaseFactory; private final SearchPhaseContext context; private final Logger logger; private final SearchPhaseResults resultConsumer; @@ -39,23 +38,18 @@ final class FetchSearchPhase extends SearchPhase { private final AggregatedDfs aggregatedDfs; FetchSearchPhase(SearchPhaseResults resultConsumer, AggregatedDfs aggregatedDfs, SearchPhaseContext context) { - this( - resultConsumer, - aggregatedDfs, - context, - (response, queryPhaseResults) -> new ExpandSearchPhase( - context, - response, - () -> new FetchLookupFieldsPhase(context, response, queryPhaseResults) - ) - ); + this(resultConsumer, aggregatedDfs, context, (response, queryPhaseResults) -> { + response.mustIncRef(); + context.addReleasable(response::decRef); + return new ExpandSearchPhase(context, response.hits, () -> new FetchLookupFieldsPhase(context, response, queryPhaseResults)); + }); } FetchSearchPhase( SearchPhaseResults resultConsumer, AggregatedDfs aggregatedDfs, SearchPhaseContext context, - BiFunction, SearchPhase> nextPhaseFactory + BiFunction, SearchPhase> nextPhaseFactory ) { super("fetch"); if (context.getNumShards() != resultConsumer.getNumShards()) { @@ -230,11 +224,11 @@ private void moveToNextPhase( SearchPhaseController.ReducedQueryPhase reducedQueryPhase, AtomicArray fetchResultsArr ) { - final InternalSearchResponse internalResponse = SearchPhaseController.merge( - context.getRequest().scroll() != null, - reducedQueryPhase, - fetchResultsArr - ); - context.executeNextPhase(this, nextPhaseFactory.apply(internalResponse, queryResults)); + var resp = SearchPhaseController.merge(context.getRequest().scroll() != null, reducedQueryPhase, fetchResultsArr); + try { + context.executeNextPhase(this, nextPhaseFactory.apply(resp, queryResults)); + } finally { + resp.decRef(); + } } } diff --git a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java index cadcd6ca57334..f33f13dd7741f 100644 --- a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java @@ -19,6 +19,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.rest.action.search.RestMultiSearchAction; import org.elasticsearch.rest.action.search.RestSearchAction; @@ -28,12 +29,12 @@ import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContent; import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParseException; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import java.io.ByteArrayOutputStream; import java.io.IOException; -import java.io.InputStream; import java.util.ArrayList; import java.util.List; import java.util.Locale; @@ -239,10 +240,16 @@ public static void readMultiLineFormat( // now parse the action if (nextMarker - from > 0) { try ( - InputStream stream = data.slice(from, nextMarker - from).streamInput(); - XContentParser parser = xContent.createParser(parserConfig, stream) + XContentParser parser = XContentHelper.createParserNotCompressed( + parserConfig, + data.slice(from, nextMarker - from), + xContent.type() + ) ) { Map source = parser.map(); + if (parser.nextToken() != null) { + throw new XContentParseException(parser.getTokenLocation(), "Unexpected token after end of object"); + } Object expandWildcards = null; Object ignoreUnavailable = null; Object ignoreThrottled = null; @@ -301,9 +308,17 @@ public static void readMultiLineFormat( if (nextMarker == -1) { break; } - BytesReference bytes = data.slice(from, nextMarker - from); - try (InputStream stream = bytes.streamInput(); XContentParser parser = xContent.createParser(parserConfig, stream)) { + try ( + XContentParser parser = XContentHelper.createParserNotCompressed( + parserConfig, + data.slice(from, nextMarker - from), + xContent.type() + ) + ) { consumer.accept(searchRequest, parser); + if (parser.nextToken() != null) { + throw new XContentParseException(parser.getTokenLocation(), "Unexpected token after end of object"); + } } // move pointers from = nextMarker + 1; diff --git a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java index 92a2a1503aefc..82cb158a0c59a 100644 --- a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; @@ -19,8 +18,6 @@ import java.util.Objects; public final class OpenPointInTimeResponse extends ActionResponse implements ToXContentObject { - private static final ParseField ID = new ParseField("id"); - private final String pointInTimeId; public OpenPointInTimeResponse(String pointInTimeId) { @@ -40,7 +37,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field(ID.getPreferredName(), pointInTimeId); + builder.field("id", pointInTimeId); builder.endObject(); return builder; } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java b/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java index f10650a6401d6..83a6870d72491 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java @@ -32,10 +32,10 @@ import java.nio.charset.StandardCharsets; import java.util.Base64; import java.util.Collections; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.TreeSet; import java.util.stream.Collectors; public final class SearchContextId { @@ -110,12 +110,30 @@ public static SearchContextId decode(NamedWriteableRegistry namedWriteableRegist } } + public static String[] decodeIndices(String id) { + try ( + var decodedInputStream = Base64.getUrlDecoder().wrap(new ByteArrayInputStream(id.getBytes(StandardCharsets.ISO_8859_1))); + var in = new InputStreamStreamInput(decodedInputStream) + ) { + final TransportVersion version = TransportVersion.readVersion(in); + in.setTransportVersion(version); + final Map shards = Collections.unmodifiableMap( + in.readCollection(Maps::newHashMapWithExpectedSize, SearchContextId::readShardsMapEntry) + ); + return new SearchContextId(shards, Collections.emptyMap()).getActualIndices(); + } catch (IOException e) { + assert false : e; + throw new IllegalArgumentException(e); + } + } + private static void readShardsMapEntry(StreamInput in, Map shards) throws IOException { shards.put(new ShardId(in), new SearchContextIdForNode(in)); } public String[] getActualIndices() { - final Set indices = new HashSet<>(); + // ensure that the order is consistent + final Set indices = new TreeSet<>(); for (Map.Entry entry : shards().entrySet()) { final String indexName = entry.getKey().getIndexName(); final String clusterAlias = entry.getValue().getClusterAlias(); diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseContext.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseContext.java index d70b99fe46c00..af9bcac8e3a33 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseContext.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseContext.java @@ -14,7 +14,6 @@ import org.elasticsearch.core.Releasable; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.transport.Transport; @@ -64,7 +63,7 @@ interface SearchPhaseContext extends Executor { * @param internalSearchResponse the internal search response * @param queryResults the results of the query phase */ - void sendSearchResponse(InternalSearchResponse internalSearchResponse, AtomicArray queryResults); + void sendSearchResponse(SearchResponseSections internalSearchResponse, AtomicArray queryResults); /** * Notifies the top-level listener of the provided exception diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index d4808def29d1f..5ffb9024d3ee1 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -40,7 +40,6 @@ import org.elasticsearch.search.dfs.DfsKnnResults; import org.elasticsearch.search.dfs.DfsSearchResult; import org.elasticsearch.search.fetch.FetchSearchResult; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.profile.SearchProfileQueryPhaseResult; import org.elasticsearch.search.profile.SearchProfileResults; @@ -355,13 +354,13 @@ public static List[] fillDocIdsToLoad(int numShards, ScoreDoc[] shardDo * Expects sortedDocs to have top search docs across all shards, optionally followed by top suggest docs for each named * completion suggestion ordered by suggestion name */ - public static InternalSearchResponse merge( + public static SearchResponseSections merge( boolean ignoreFrom, ReducedQueryPhase reducedQueryPhase, AtomicArray fetchResultsArray ) { if (reducedQueryPhase.isEmptyResult) { - return InternalSearchResponse.EMPTY_WITH_TOTAL_HITS; + return SearchResponseSections.EMPTY_WITH_TOTAL_HITS; } ScoreDoc[] sortedDocs = reducedQueryPhase.sortedTopDocs.scoreDocs; var fetchResults = fetchResultsArray.asList(); @@ -466,7 +465,7 @@ private static SearchHits getHits( } } return new SearchHits( - hits.toArray(new SearchHit[0]), + hits.toArray(SearchHits.EMPTY), reducedQueryPhase.totalHits, reducedQueryPhase.maxScore, sortedTopDocs.sortFields, @@ -753,14 +752,14 @@ public record ReducedQueryPhase( * Creates a new search response from the given merged hits. * @see #merge(boolean, ReducedQueryPhase, AtomicArray) */ - public InternalSearchResponse buildResponse(SearchHits hits, Collection fetchResults) { - return new InternalSearchResponse( + public SearchResponseSections buildResponse(SearchHits hits, Collection fetchResults) { + return new SearchResponseSections( hits, aggregations, suggest, - buildSearchProfileResults(fetchResults), timedOut, terminatedEarly, + buildSearchProfileResults(fetchResults), numReducePhases ); } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java index 7ac8c4d5299d4..456a574c6f6b2 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -407,6 +407,21 @@ public ActionRequestValidationException validate() { if (scroll) { validationException = addValidationError("using [point in time] is not allowed in a scroll context", validationException); } + if (indices().length > 0) { + validationException = addValidationError( + "[indices] cannot be used with point in time. Do not specify any index with point in time.", + validationException + ); + } + if (indicesOptions().equals(DEFAULT_INDICES_OPTIONS) == false) { + validationException = addValidationError("[indicesOptions] cannot be used with point in time", validationException); + } + if (routing() != null) { + validationException = addValidationError("[routing] cannot be used with point in time", validationException); + } + if (preference() != null) { + validationException = addValidationError("[preference] cannot be used with point in time", validationException); + } } else if (source != null && source.sorts() != null) { for (SortBuilder sortBuilder : source.sorts()) { if (sortBuilder instanceof FieldSortBuilder diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java index 9ff0f6273171b..660fdb38b130b 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java @@ -24,11 +24,9 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.RestActions; -import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.profile.SearchProfileResults; import org.elasticsearch.search.profile.SearchProfileShardResult; import org.elasticsearch.search.suggest.Suggest; @@ -67,7 +65,13 @@ public class SearchResponse extends ActionResponse implements ChunkedToXContentO private static final ParseField TERMINATED_EARLY = new ParseField("terminated_early"); private static final ParseField NUM_REDUCE_PHASES = new ParseField("num_reduce_phases"); - private final SearchResponseSections internalResponse; + private final SearchHits hits; + private final Aggregations aggregations; + private final Suggest suggest; + private final SearchProfileResults profileResults; + private final boolean timedOut; + private final Boolean terminatedEarly; + private final int numReducePhases; private final String scrollId; private final String pointInTimeId; private final int totalShards; @@ -79,7 +83,13 @@ public class SearchResponse extends ActionResponse implements ChunkedToXContentO public SearchResponse(StreamInput in) throws IOException { super(in); - internalResponse = new InternalSearchResponse(in); + this.hits = new SearchHits(in); + this.aggregations = in.readBoolean() ? InternalAggregations.readFrom(in) : null; + this.suggest = in.readBoolean() ? new Suggest(in) : null; + this.timedOut = in.readBoolean(); + this.terminatedEarly = in.readOptionalBoolean(); + this.profileResults = in.readOptionalWriteable(SearchProfileResults::new); + this.numReducePhases = in.readVInt(); totalShards = in.readVInt(); successfulShards = in.readVInt(); int size = in.readVInt(); @@ -99,7 +109,13 @@ public SearchResponse(StreamInput in) throws IOException { } public SearchResponse( - SearchResponseSections internalResponse, + SearchHits hits, + Aggregations aggregations, + Suggest suggest, + boolean timedOut, + Boolean terminatedEarly, + SearchProfileResults profileResults, + int numReducePhases, String scrollId, int totalShards, int successfulShards, @@ -108,11 +124,63 @@ public SearchResponse( ShardSearchFailure[] shardFailures, Clusters clusters ) { - this(internalResponse, scrollId, totalShards, successfulShards, skippedShards, tookInMillis, shardFailures, clusters, null); + this( + hits, + aggregations, + suggest, + timedOut, + terminatedEarly, + profileResults, + numReducePhases, + scrollId, + totalShards, + successfulShards, + skippedShards, + tookInMillis, + shardFailures, + clusters, + null + ); + } + + public SearchResponse( + SearchResponseSections searchResponseSections, + String scrollId, + int totalShards, + int successfulShards, + int skippedShards, + long tookInMillis, + ShardSearchFailure[] shardFailures, + Clusters clusters, + String pointInTimeId + ) { + this( + searchResponseSections.hits, + searchResponseSections.aggregations, + searchResponseSections.suggest, + searchResponseSections.timedOut, + searchResponseSections.terminatedEarly, + searchResponseSections.profileResults, + searchResponseSections.numReducePhases, + scrollId, + totalShards, + successfulShards, + skippedShards, + tookInMillis, + shardFailures, + clusters, + pointInTimeId + ); } public SearchResponse( - SearchResponseSections internalResponse, + SearchHits hits, + Aggregations aggregations, + Suggest suggest, + boolean timedOut, + Boolean terminatedEarly, + SearchProfileResults profileResults, + int numReducePhases, String scrollId, int totalShards, int successfulShards, @@ -122,7 +190,13 @@ public SearchResponse( Clusters clusters, String pointInTimeId ) { - this.internalResponse = internalResponse; + this.hits = hits; + this.aggregations = aggregations; + this.suggest = suggest; + this.profileResults = profileResults; + this.timedOut = timedOut; + this.terminatedEarly = terminatedEarly; + this.numReducePhases = numReducePhases; this.scrollId = scrollId; this.pointInTimeId = pointInTimeId; this.clusters = clusters; @@ -144,7 +218,7 @@ public RestStatus status() { * The search hits. */ public SearchHits getHits() { - return internalResponse.hits(); + return hits; } /** @@ -152,7 +226,7 @@ public SearchHits getHits() { * either {@code null} or {@link InternalAggregations#EMPTY}. */ public @Nullable Aggregations getAggregations() { - return internalResponse.aggregations(); + return aggregations; } /** @@ -163,14 +237,14 @@ public boolean hasAggregations() { } public Suggest getSuggest() { - return internalResponse.suggest(); + return suggest; } /** * Has the search operation timed out. */ public boolean isTimedOut() { - return internalResponse.timedOut(); + return timedOut; } /** @@ -178,14 +252,14 @@ public boolean isTimedOut() { * terminateAfter */ public Boolean isTerminatedEarly() { - return internalResponse.terminatedEarly(); + return terminatedEarly; } /** * Returns the number of reduce phases applied to obtain this search response */ public int getNumReducePhases() { - return internalResponse.getNumReducePhases(); + return numReducePhases; } /** @@ -253,7 +327,10 @@ public String pointInTimeId() { */ @Nullable public Map getProfileResults() { - return internalResponse.profile(); + if (profileResults == null) { + return Collections.emptyMap(); + } + return profileResults.getShardResults(); } /** @@ -278,7 +355,27 @@ public Iterator innerToXContentChunked(ToXContent.Params p return Iterators.concat( ChunkedToXContentHelper.singleChunk(SearchResponse.this::headerToXContent), Iterators.single(clusters), - internalResponse.toXContentChunked(params) + Iterators.concat( + Iterators.flatMap(Iterators.single(hits), r -> r.toXContentChunked(params)), + Iterators.single((ToXContent) (b, p) -> { + if (aggregations != null) { + aggregations.toXContent(b, p); + } + return b; + }), + Iterators.single((b, p) -> { + if (suggest != null) { + suggest.toXContent(b, p); + } + return b; + }), + Iterators.single((b, p) -> { + if (profileResults != null) { + profileResults.toXContent(b, p); + } + return b; + }) + ) ); } @@ -396,17 +493,14 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE } } } - SearchResponseSections searchResponseSections = new SearchResponseSections( + return new SearchResponse( hits, aggs, suggest, timedOut, terminatedEarly, profile, - numReducePhases - ); - return new SearchResponse( - searchResponseSections, + numReducePhases, scrollId, totalShards, successfulShards, @@ -420,7 +514,13 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE @Override public void writeTo(StreamOutput out) throws IOException { - internalResponse.writeTo(out); + hits.writeTo(out); + out.writeOptionalWriteable((InternalAggregations) aggregations); + out.writeOptionalWriteable(suggest); + out.writeBoolean(timedOut); + out.writeOptionalBoolean(terminatedEarly); + out.writeOptionalWriteable(profileResults); + out.writeVInt(numReducePhases); out.writeVInt(totalShards); out.writeVInt(successfulShards); @@ -532,7 +632,7 @@ public Clusters(StreamInput in) throws IOException { this.total = in.readVInt(); int successfulTemp = in.readVInt(); int skippedTemp = in.readVInt(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_053)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { List clusterList = in.readCollectionAsList(Cluster::new); if (clusterList.isEmpty()) { this.clusterInfo = Collections.emptyMap(); @@ -585,7 +685,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(total); out.writeVInt(successful); out.writeVInt(skipped); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_053)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { if (clusterInfo != null) { List clusterList = clusterInfo.values().stream().toList(); out.writeCollection(clusterList); @@ -1267,18 +1367,14 @@ public String toString() { // public for tests public static SearchResponse empty(Supplier tookInMillisSupplier, Clusters clusters) { - SearchHits searchHits = new SearchHits(new SearchHit[0], new TotalHits(0L, TotalHits.Relation.EQUAL_TO), Float.NaN); - InternalSearchResponse internalSearchResponse = new InternalSearchResponse( - searchHits, + return new SearchResponse( + SearchHits.empty(new TotalHits(0L, TotalHits.Relation.EQUAL_TO), Float.NaN), InternalAggregations.EMPTY, null, - null, false, null, - 0 - ); - return new SearchResponse( - internalSearchResponse, + null, + 0, null, 0, 0, diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java index b6143cfc51c3a..1b616b9f3bc87 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java @@ -27,7 +27,6 @@ import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.aggregations.AggregationReduceContext; import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.profile.SearchProfileResults; import org.elasticsearch.search.profile.SearchProfileShardResult; import org.elasticsearch.search.suggest.Suggest; @@ -211,18 +210,15 @@ SearchResponse getMergedResponse(Clusters clusters) { SearchProfileResults profileShardResults = profileResults.isEmpty() ? null : new SearchProfileResults(profileResults); // make failures ordering consistent between ordinary search and CCS by looking at the shard they come from Arrays.sort(shardFailures, FAILURES_COMPARATOR); - InternalSearchResponse response = new InternalSearchResponse( + long tookInMillis = searchTimeProvider.buildTookInMillis(); + return new SearchResponse( mergedSearchHits, reducedAggs, suggest, - profileShardResults, topDocsStats.timedOut, topDocsStats.terminatedEarly, - numReducePhases - ); - long tookInMillis = searchTimeProvider.buildTookInMillis(); - return new SearchResponse( - response, + profileShardResults, + numReducePhases, null, totalShards, successfulShards, diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponseSections.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponseSections.java index b4de15f4cc413..805ef033db27a 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponseSections.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponseSections.java @@ -8,31 +8,42 @@ package org.elasticsearch.action.search; -import org.elasticsearch.common.collect.Iterators; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ChunkedToXContent; +import org.elasticsearch.core.AbstractRefCounted; +import org.elasticsearch.core.RefCounted; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.profile.SearchProfileResults; import org.elasticsearch.search.profile.SearchProfileShardResult; import org.elasticsearch.search.suggest.Suggest; -import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.transport.LeakTracker; -import java.io.IOException; import java.util.Collections; -import java.util.Iterator; import java.util.Map; /** - * Base class that holds the various sections which a search response is - * composed of (hits, aggs, suggestions etc.) and allows to retrieve them. - * - * The reason why this class exists is that the high level REST client uses its own classes - * to parse aggregations into, which are not serializable. This is the common part that can be - * shared between core and client. + * Holds some sections that a search response is composed of (hits, aggs, suggestions etc.) during some steps of the search response + * building. */ -public class SearchResponseSections implements ChunkedToXContent { +public class SearchResponseSections implements RefCounted { + public static final SearchResponseSections EMPTY_WITH_TOTAL_HITS = new SearchResponseSections( + SearchHits.EMPTY_WITH_TOTAL_HITS, + null, + null, + false, + null, + null, + 1 + ); + public static final SearchResponseSections EMPTY_WITHOUT_TOTAL_HITS = new SearchResponseSections( + SearchHits.EMPTY_WITHOUT_TOTAL_HITS, + null, + null, + false, + null, + null, + 1 + ); protected final SearchHits hits; protected final Aggregations aggregations; protected final Suggest suggest; @@ -41,6 +52,8 @@ public class SearchResponseSections implements ChunkedToXContent { protected final Boolean terminatedEarly; protected final int numReducePhases; + private final RefCounted refCounted; + public SearchResponseSections( SearchHits hits, Aggregations aggregations, @@ -57,6 +70,12 @@ public SearchResponseSections( this.timedOut = timedOut; this.terminatedEarly = terminatedEarly; this.numReducePhases = numReducePhases; + refCounted = hits.getHits().length > 0 ? LeakTracker.wrap(new AbstractRefCounted() { + @Override + protected void closeInternal() { + // TODO: noop until hits are ref counted + } + }) : ALWAYS_REFERENCED; } public final boolean timedOut() { @@ -100,31 +119,22 @@ public final Map profile() { } @Override - public Iterator toXContentChunked(ToXContent.Params params) { - return Iterators.concat( - Iterators.flatMap(Iterators.single(hits), r -> r.toXContentChunked(params)), - Iterators.single((ToXContent) (b, p) -> { - if (aggregations != null) { - aggregations.toXContent(b, p); - } - return b; - }), - Iterators.single((b, p) -> { - if (suggest != null) { - suggest.toXContent(b, p); - } - return b; - }), - Iterators.single((b, p) -> { - if (profileResults != null) { - profileResults.toXContent(b, p); - } - return b; - }) - ); + public void incRef() { + refCounted.incRef(); + } + + @Override + public boolean tryIncRef() { + return refCounted.tryIncRef(); } - protected void writeTo(StreamOutput out) throws IOException { - throw new UnsupportedOperationException(); + @Override + public boolean decRef() { + return refCounted.decRef(); + } + + @Override + public boolean hasReferences() { + return refCounted.hasReferences(); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java index 5681bda8b2741..0616a99fc5dd0 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java @@ -18,7 +18,6 @@ import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.internal.InternalScrollSearchRequest; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.transport.Transport; @@ -240,27 +239,31 @@ protected final void sendResponse( final AtomicArray fetchResults ) { try { - final InternalSearchResponse internalResponse = SearchPhaseController.merge(true, queryPhase, fetchResults); // the scroll ID never changes we always return the same ID. This ID contains all the shards and their context ids // such that we can talk to them again in the next roundtrip. String scrollId = null; if (request.scroll() != null) { scrollId = request.scrollId(); } - ActionListener.respondAndRelease( - listener, - new SearchResponse( - internalResponse, - scrollId, - this.scrollId.getContext().length, - successfulOps.get(), - 0, - buildTookInMillis(), - buildShardFailures(), - SearchResponse.Clusters.EMPTY, - null - ) - ); + var sections = SearchPhaseController.merge(true, queryPhase, fetchResults); + try { + ActionListener.respondAndRelease( + listener, + new SearchResponse( + sections, + scrollId, + this.scrollId.getContext().length, + successfulOps.get(), + 0, + buildTookInMillis(), + buildShardFailures(), + SearchResponse.Clusters.EMPTY, + null + ) + ); + } finally { + sections.decRef(); + } } catch (Exception e) { listener.onFailure(new ReduceSearchPhaseException("fetch", "inner finish failed", e, buildShardFailures())); } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTransportAPMMetrics.java b/server/src/main/java/org/elasticsearch/action/search/SearchTransportAPMMetrics.java new file mode 100644 index 0000000000000..93b8e22d0d7cd --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/search/SearchTransportAPMMetrics.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.search; + +import org.elasticsearch.telemetry.metric.LongHistogram; +import org.elasticsearch.telemetry.metric.MeterRegistry; + +public class SearchTransportAPMMetrics { + public static final String SEARCH_ACTION_LATENCY_BASE_METRIC = "es.search.nodes.transport_actions.latency.histogram"; + public static final String ACTION_ATTRIBUTE_NAME = "action"; + + public static final String QUERY_CAN_MATCH_NODE_METRIC = "shards_can_match"; + public static final String DFS_ACTION_METRIC = "dfs_query_then_fetch/shard_dfs_phase"; + public static final String QUERY_ID_ACTION_METRIC = "dfs_query_then_fetch/shard_query_phase"; + public static final String QUERY_ACTION_METRIC = "query_then_fetch/shard_query_phase"; + public static final String FREE_CONTEXT_ACTION_METRIC = "shard_release_context"; + public static final String FETCH_ID_ACTION_METRIC = "shard_fetch_phase"; + public static final String QUERY_SCROLL_ACTION_METRIC = "scroll/shard_query_phase"; + public static final String FETCH_ID_SCROLL_ACTION_METRIC = "scroll/shard_fetch_phase"; + public static final String QUERY_FETCH_SCROLL_ACTION_METRIC = "scroll/shard_query_and_fetch_phase"; + public static final String FREE_CONTEXT_SCROLL_ACTION_METRIC = "scroll/shard_release_context"; + public static final String CLEAR_SCROLL_CONTEXTS_ACTION_METRIC = "scroll/shard_release_contexts"; + + private final LongHistogram actionLatencies; + + public SearchTransportAPMMetrics(MeterRegistry meterRegistry) { + this( + meterRegistry.registerLongHistogram( + SEARCH_ACTION_LATENCY_BASE_METRIC, + "Transport action execution times at the node level, expressed as a histogram", + "millis" + ) + ); + } + + private SearchTransportAPMMetrics(LongHistogram actionLatencies) { + this.actionLatencies = actionLatencies; + } + + public LongHistogram getActionLatencies() { + return actionLatencies; + } +} diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index e46d26c3532ad..b7cc61ad70e2f 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -59,6 +59,19 @@ import java.util.Objects; import java.util.function.BiFunction; +import static org.elasticsearch.action.search.SearchTransportAPMMetrics.ACTION_ATTRIBUTE_NAME; +import static org.elasticsearch.action.search.SearchTransportAPMMetrics.CLEAR_SCROLL_CONTEXTS_ACTION_METRIC; +import static org.elasticsearch.action.search.SearchTransportAPMMetrics.DFS_ACTION_METRIC; +import static org.elasticsearch.action.search.SearchTransportAPMMetrics.FETCH_ID_ACTION_METRIC; +import static org.elasticsearch.action.search.SearchTransportAPMMetrics.FETCH_ID_SCROLL_ACTION_METRIC; +import static org.elasticsearch.action.search.SearchTransportAPMMetrics.FREE_CONTEXT_ACTION_METRIC; +import static org.elasticsearch.action.search.SearchTransportAPMMetrics.FREE_CONTEXT_SCROLL_ACTION_METRIC; +import static org.elasticsearch.action.search.SearchTransportAPMMetrics.QUERY_ACTION_METRIC; +import static org.elasticsearch.action.search.SearchTransportAPMMetrics.QUERY_CAN_MATCH_NODE_METRIC; +import static org.elasticsearch.action.search.SearchTransportAPMMetrics.QUERY_FETCH_SCROLL_ACTION_METRIC; +import static org.elasticsearch.action.search.SearchTransportAPMMetrics.QUERY_ID_ACTION_METRIC; +import static org.elasticsearch.action.search.SearchTransportAPMMetrics.QUERY_SCROLL_ACTION_METRIC; + /** * An encapsulation of {@link org.elasticsearch.search.SearchService} operations exposed through * transport. @@ -68,13 +81,27 @@ public class SearchTransportService { public static final String FREE_CONTEXT_SCROLL_ACTION_NAME = "indices:data/read/search[free_context/scroll]"; public static final String FREE_CONTEXT_ACTION_NAME = "indices:data/read/search[free_context]"; public static final String CLEAR_SCROLL_CONTEXTS_ACTION_NAME = "indices:data/read/search[clear_scroll_contexts]"; + + /** + * Part of DFS_QUERY_THEN_FETCH, which fetches distributed term frequencies and executes KNN. + */ public static final String DFS_ACTION_NAME = "indices:data/read/search[phase/dfs]"; public static final String QUERY_ACTION_NAME = "indices:data/read/search[phase/query]"; + + /** + * Part of DFS_QUERY_THEN_FETCH, which fetches distributed term frequencies and executes KNN. + */ public static final String QUERY_ID_ACTION_NAME = "indices:data/read/search[phase/query/id]"; public static final String QUERY_SCROLL_ACTION_NAME = "indices:data/read/search[phase/query/scroll]"; public static final String QUERY_FETCH_SCROLL_ACTION_NAME = "indices:data/read/search[phase/query+fetch/scroll]"; public static final String FETCH_ID_SCROLL_ACTION_NAME = "indices:data/read/search[phase/fetch/id/scroll]"; public static final String FETCH_ID_ACTION_NAME = "indices:data/read/search[phase/fetch/id]"; + + /** + * The Can-Match phase. It is executed to pre-filter shards that a search request hits. It rewrites the query on + * the shard and checks whether the result of the rewrite matches no documents, in which case the shard can be + * filtered out. + */ public static final String QUERY_CAN_MATCH_NODE_NAME = "indices:data/read/search[can_match][n]"; private final TransportService transportService; @@ -382,35 +409,41 @@ public void writeTo(StreamOutput out) throws IOException { } } - public static void registerRequestHandler(TransportService transportService, SearchService searchService) { + public static void registerRequestHandler( + TransportService transportService, + SearchService searchService, + SearchTransportAPMMetrics searchTransportMetrics + ) { transportService.registerRequestHandler( FREE_CONTEXT_SCROLL_ACTION_NAME, EsExecutors.DIRECT_EXECUTOR_SERVICE, ScrollFreeContextRequest::new, - (request, channel, task) -> { + instrumentedHandler(FREE_CONTEXT_SCROLL_ACTION_METRIC, transportService, searchTransportMetrics, (request, channel, task) -> { boolean freed = searchService.freeReaderContext(request.id()); channel.sendResponse(new SearchFreeContextResponse(freed)); - } + }) ); TransportActionProxy.registerProxyAction(transportService, FREE_CONTEXT_SCROLL_ACTION_NAME, false, SearchFreeContextResponse::new); + transportService.registerRequestHandler( FREE_CONTEXT_ACTION_NAME, EsExecutors.DIRECT_EXECUTOR_SERVICE, SearchFreeContextRequest::new, - (request, channel, task) -> { + instrumentedHandler(FREE_CONTEXT_ACTION_METRIC, transportService, searchTransportMetrics, (request, channel, task) -> { boolean freed = searchService.freeReaderContext(request.id()); channel.sendResponse(new SearchFreeContextResponse(freed)); - } + }) ); TransportActionProxy.registerProxyAction(transportService, FREE_CONTEXT_ACTION_NAME, false, SearchFreeContextResponse::new); + transportService.registerRequestHandler( CLEAR_SCROLL_CONTEXTS_ACTION_NAME, EsExecutors.DIRECT_EXECUTOR_SERVICE, TransportRequest.Empty::new, - (request, channel, task) -> { + instrumentedHandler(CLEAR_SCROLL_CONTEXTS_ACTION_METRIC, transportService, searchTransportMetrics, (request, channel, task) -> { searchService.freeAllScrollContexts(); channel.sendResponse(TransportResponse.Empty.INSTANCE); - } + }) ); TransportActionProxy.registerProxyAction( transportService, @@ -423,19 +456,32 @@ public static void registerRequestHandler(TransportService transportService, Sea DFS_ACTION_NAME, EsExecutors.DIRECT_EXECUTOR_SERVICE, ShardSearchRequest::new, - (request, channel, task) -> searchService.executeDfsPhase(request, (SearchShardTask) task, new ChannelActionListener<>(channel)) + instrumentedHandler( + DFS_ACTION_METRIC, + transportService, + searchTransportMetrics, + (request, channel, task) -> searchService.executeDfsPhase( + request, + (SearchShardTask) task, + new ChannelActionListener<>(channel) + ) + ) ); - TransportActionProxy.registerProxyAction(transportService, DFS_ACTION_NAME, true, DfsSearchResult::new); transportService.registerRequestHandler( QUERY_ACTION_NAME, EsExecutors.DIRECT_EXECUTOR_SERVICE, ShardSearchRequest::new, - (request, channel, task) -> searchService.executeQueryPhase( - request, - (SearchShardTask) task, - new ChannelActionListener<>(channel) + instrumentedHandler( + QUERY_ACTION_METRIC, + transportService, + searchTransportMetrics, + (request, channel, task) -> searchService.executeQueryPhase( + request, + (SearchShardTask) task, + new ChannelActionListener<>(channel) + ) ) ); TransportActionProxy.registerProxyActionWithDynamicResponseType( @@ -449,9 +495,16 @@ public static void registerRequestHandler(TransportService transportService, Sea QUERY_ID_ACTION_NAME, EsExecutors.DIRECT_EXECUTOR_SERVICE, QuerySearchRequest::new, - (request, channel, task) -> { - searchService.executeQueryPhase(request, (SearchShardTask) task, new ChannelActionListener<>(channel)); - } + instrumentedHandler( + QUERY_ID_ACTION_METRIC, + transportService, + searchTransportMetrics, + (request, channel, task) -> searchService.executeQueryPhase( + request, + (SearchShardTask) task, + new ChannelActionListener<>(channel) + ) + ) ); TransportActionProxy.registerProxyAction(transportService, QUERY_ID_ACTION_NAME, true, QuerySearchResult::new); @@ -459,9 +512,16 @@ public static void registerRequestHandler(TransportService transportService, Sea QUERY_SCROLL_ACTION_NAME, EsExecutors.DIRECT_EXECUTOR_SERVICE, InternalScrollSearchRequest::new, - (request, channel, task) -> { - searchService.executeQueryPhase(request, (SearchShardTask) task, new ChannelActionListener<>(channel)); - } + instrumentedHandler( + QUERY_SCROLL_ACTION_METRIC, + transportService, + searchTransportMetrics, + (request, channel, task) -> searchService.executeQueryPhase( + request, + (SearchShardTask) task, + new ChannelActionListener<>(channel) + ) + ) ); TransportActionProxy.registerProxyAction(transportService, QUERY_SCROLL_ACTION_NAME, true, ScrollQuerySearchResult::new); @@ -469,22 +529,33 @@ public static void registerRequestHandler(TransportService transportService, Sea QUERY_FETCH_SCROLL_ACTION_NAME, EsExecutors.DIRECT_EXECUTOR_SERVICE, InternalScrollSearchRequest::new, - (request, channel, task) -> { - searchService.executeFetchPhase(request, (SearchShardTask) task, new ChannelActionListener<>(channel)); - } + instrumentedHandler( + QUERY_FETCH_SCROLL_ACTION_METRIC, + transportService, + searchTransportMetrics, + (request, channel, task) -> searchService.executeFetchPhase( + request, + (SearchShardTask) task, + new ChannelActionListener<>(channel) + ) + ) ); TransportActionProxy.registerProxyAction(transportService, QUERY_FETCH_SCROLL_ACTION_NAME, true, ScrollQueryFetchSearchResult::new); - TransportRequestHandler shardFetchHandler = (request, channel, task) -> searchService.executeFetchPhase( - request, - (SearchShardTask) task, - new ChannelActionListener<>(channel) - ); transportService.registerRequestHandler( FETCH_ID_SCROLL_ACTION_NAME, EsExecutors.DIRECT_EXECUTOR_SERVICE, ShardFetchRequest::new, - shardFetchHandler + instrumentedHandler( + FETCH_ID_SCROLL_ACTION_METRIC, + transportService, + searchTransportMetrics, + (request, channel, task) -> searchService.executeFetchPhase( + request, + (SearchShardTask) task, + new ChannelActionListener<>(channel) + ) + ) ); TransportActionProxy.registerProxyAction(transportService, FETCH_ID_SCROLL_ACTION_NAME, true, FetchSearchResult::new); @@ -494,7 +565,16 @@ public static void registerRequestHandler(TransportService transportService, Sea true, true, ShardFetchSearchRequest::new, - shardFetchHandler + instrumentedHandler( + FETCH_ID_ACTION_METRIC, + transportService, + searchTransportMetrics, + (request, channel, task) -> searchService.executeFetchPhase( + request, + (SearchShardTask) task, + new ChannelActionListener<>(channel) + ) + ) ); TransportActionProxy.registerProxyAction(transportService, FETCH_ID_ACTION_NAME, true, FetchSearchResult::new); @@ -502,16 +582,39 @@ public static void registerRequestHandler(TransportService transportService, Sea QUERY_CAN_MATCH_NODE_NAME, transportService.getThreadPool().executor(ThreadPool.Names.SEARCH_COORDINATION), CanMatchNodeRequest::new, - (request, channel, task) -> searchService.canMatch(request, new ChannelActionListener<>(channel)) + instrumentedHandler( + QUERY_CAN_MATCH_NODE_METRIC, + transportService, + searchTransportMetrics, + (request, channel, task) -> searchService.canMatch(request, new ChannelActionListener<>(channel)) + ) ); TransportActionProxy.registerProxyAction(transportService, QUERY_CAN_MATCH_NODE_NAME, true, CanMatchNodeResponse::new); } + private static TransportRequestHandler instrumentedHandler( + String actionQualifier, + TransportService transportService, + SearchTransportAPMMetrics searchTransportMetrics, + TransportRequestHandler transportRequestHandler + ) { + return (request, channel, task) -> { + var startTime = transportService.getThreadPool().relativeTimeInMillis(); + try { + transportRequestHandler.messageReceived(request, channel, task); + } finally { + var elapsedTime = transportService.getThreadPool().relativeTimeInMillis() - startTime; + searchTransportMetrics.getActionLatencies().record(elapsedTime, Map.of(ACTION_ATTRIBUTE_NAME, actionQualifier)); + } + }; + } + /** * Returns a connection to the given node on the provided cluster. If the cluster alias is null the node will be resolved * against the local cluster. + * * @param clusterAlias the cluster alias the node should be resolved against - * @param node the node to resolve + * @param node the node to resolve * @return a connection to the given node belonging to the cluster with the provided alias. */ public Transport.Connection getConnection(@Nullable String clusterAlias, DiscoveryNode node) { diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java index 2bc642e6c0907..3b1093c207854 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java @@ -33,7 +33,6 @@ import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.internal.AliasFilter; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; @@ -252,7 +251,7 @@ public void onFailure(Exception e) { @Override protected void doRun() { - sendSearchResponse(InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, results.getAtomicArray()); + sendSearchResponse(SearchResponseSections.EMPTY_WITH_TOTAL_HITS, results.getAtomicArray()); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 6045a9ff5efa3..4e9aed5f643f2 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -63,10 +63,8 @@ import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.aggregations.AggregationReduceContext; -import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.internal.AliasFilter; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.profile.SearchProfileResults; import org.elasticsearch.search.profile.SearchProfileShardResult; @@ -162,7 +160,8 @@ public TransportSearchAction( ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, NamedWriteableRegistry namedWriteableRegistry, - ExecutorSelector executorSelector + ExecutorSelector executorSelector, + SearchTransportAPMMetrics searchTransportMetrics ) { super(TYPE.name(), transportService, actionFilters, SearchRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.threadPool = threadPool; @@ -170,7 +169,7 @@ public TransportSearchAction( this.searchPhaseController = searchPhaseController; this.searchTransportService = searchTransportService; this.remoteClusterService = searchTransportService.getRemoteClusterService(); - SearchTransportService.registerRequestHandler(transportService, searchService); + SearchTransportService.registerRequestHandler(transportService, searchService, searchTransportMetrics); this.clusterService = clusterService; this.transportService = transportService; this.searchService = searchService; @@ -541,19 +540,16 @@ public void onResponse(SearchResponse searchResponse) { ? null : new SearchProfileResults(profileResults); - InternalSearchResponse internalSearchResponse = new InternalSearchResponse( - searchResponse.getHits(), - (InternalAggregations) searchResponse.getAggregations(), - searchResponse.getSuggest(), - profile, - searchResponse.isTimedOut(), - searchResponse.isTerminatedEarly(), - searchResponse.getNumReducePhases() - ); ActionListener.respondAndRelease( listener, new SearchResponse( - internalSearchResponse, + searchResponse.getHits(), + searchResponse.getAggregations(), + searchResponse.getSuggest(), + searchResponse.isTimedOut(), + searchResponse.isTerminatedEarly(), + profile, + searchResponse.getNumReducePhases(), searchResponse.getScrollId(), searchResponse.getTotalShards(), searchResponse.getSuccessfulShards(), diff --git a/server/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java b/server/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java index 721983b6af0e7..e2b8fcbf2825c 100644 --- a/server/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java +++ b/server/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java @@ -31,13 +31,15 @@ public class PlainActionFuture implements ActionFuture, ActionListener { @Override - public void onResponse(T result) { + public void onResponse(@Nullable T result) { set(result); } @Override public void onFailure(Exception e) { - setException(e); + if (sync.setException(Objects.requireNonNull(e))) { + done(false); + } } private static final String BLOCKING_OP_REASON = "Blocking operation"; @@ -115,23 +117,9 @@ public boolean cancel(boolean mayInterruptIfRunning) { return false; } done(false); - if (mayInterruptIfRunning) { - interruptTask(); - } return true; } - /** - * Subclasses can override this method to implement interruption of the - * future's computation. The method is invoked automatically by a successful - * call to {@link #cancel(boolean) cancel(true)}. - *

- * The default implementation does nothing. - * - * @since 10.0 - */ - protected void interruptTask() {} - /** * Subclasses should invoke this method to set the result of the computation * to {@code value}. This will set the state of the future to @@ -141,7 +129,7 @@ protected void interruptTask() {} * @param value the value that was the result of the task. * @return true if the state was successfully changed. */ - protected boolean set(@Nullable T value) { + protected final boolean set(@Nullable T value) { boolean result = sync.set(value); if (result) { done(true); @@ -149,33 +137,6 @@ protected boolean set(@Nullable T value) { return result; } - /** - * Subclasses should invoke this method to set the result of the computation - * to an error, {@code throwable}. This will set the state of the future to - * {@link PlainActionFuture.Sync#COMPLETED} and call {@link #done(boolean)} if the - * state was successfully changed. - * - * @param throwable the exception that the task failed with. - * @return true if the state was successfully changed. - * @throws Error if the throwable was an {@link Error}. - */ - protected boolean setException(Throwable throwable) { - boolean result = sync.setException(Objects.requireNonNull(throwable)); - if (result) { - done(false); - } - - // If it's an Error, we want to make sure it reaches the top of the - // call stack, so we rethrow it. - - // we want to notify the listeners we have with errors as well, as it breaks - // how we work in ES in terms of using assertions - // if (throwable instanceof Error) { - // throw (Error) throwable; - // } - return result; - } - /** * Called when the {@link PlainActionFuture} is completed. The {@code success} boolean indicates if the {@link * PlainActionFuture} was successfully completed (the value is {@code true}). In the cases the {@link PlainActionFuture} @@ -194,16 +155,6 @@ public T actionGet() { } } - @Override - public T actionGet(String timeout) { - return actionGet(TimeValue.parseTimeValue(timeout, null, getClass().getSimpleName() + ".actionGet.timeout")); - } - - @Override - public T actionGet(long timeoutMillis) { - return actionGet(timeoutMillis, TimeUnit.MILLISECONDS); - } - @Override public T actionGet(TimeValue timeout) { return actionGet(timeout.millis(), TimeUnit.MILLISECONDS); @@ -272,7 +223,7 @@ static final class Sync extends AbstractQueuedSynchronizer { static final int CANCELLED = 4; private V value; - private Throwable exception; + private Exception exception; /* * Acquisition succeeds if the future is done, otherwise it fails. @@ -311,7 +262,7 @@ V get(long nanos) throws TimeoutException, CancellationException, ExecutionExcep } /** - * Blocks until {@link #complete(Object, Throwable, int)} has been + * Blocks until {@link #complete(Object, Exception, int)} has been * successfully called. Throws a {@link CancellationException} if the task * was cancelled, or a {@link ExecutionException} if the task completed with * an error. @@ -390,8 +341,8 @@ boolean set(@Nullable V v) { /** * Transition to the COMPLETED state and set the exception. */ - boolean setException(Throwable t) { - return complete(null, t, COMPLETED); + boolean setException(Exception e) { + return complete(null, e, COMPLETED); } /** @@ -409,16 +360,16 @@ boolean cancel() { * final state ({@link #COMPLETED} or {@link #CANCELLED}). * * @param v the value to set as the result of the computation. - * @param t the exception to set as the result of the computation. + * @param e the exception to set as the result of the computation. * @param finalState the state to transition to. */ - private boolean complete(@Nullable V v, @Nullable Throwable t, int finalState) { + private boolean complete(@Nullable V v, @Nullable Exception e, int finalState) { boolean doCompletion = compareAndSetState(RUNNING, COMPLETING); if (doCompletion) { // If this thread successfully transitioned to COMPLETING, set the value // and exception and then release to the final state. this.value = v; - this.exception = t; + this.exception = e; releaseShared(finalState); } else if (getState() == COMPLETING) { // If some other thread is currently completing the future, block until diff --git a/server/src/main/java/org/elasticsearch/action/support/SubscribableListener.java b/server/src/main/java/org/elasticsearch/action/support/SubscribableListener.java index fd10c509d8ef2..d2ce20b1c3d58 100644 --- a/server/src/main/java/org/elasticsearch/action/support/SubscribableListener.java +++ b/server/src/main/java/org/elasticsearch/action/support/SubscribableListener.java @@ -19,6 +19,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.util.concurrent.UncategorizedExecutionException; import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.threadpool.ThreadPool; @@ -379,6 +380,41 @@ public SubscribableListener andThen( return newForked(l -> addListener(l.delegateFailureAndWrap(nextStep), executor, threadContext)); } + /** + * Creates and returns a new {@link SubscribableListener} {@code L} such that if this listener is completed successfully with result + * {@code R} then {@code fn} is invoked with argument {@code R}, and {@code L} is completed with the result of that invocation. If this + * listener is completed exceptionally, or {@code fn} throws an exception, then {@code L} is completed with that exception. + *

+ * This is essentially a shorthand for a call to {@link #andThen} with a {@code nextStep} argument that is fully synchronous. + *

+ * The threading of the {@code fn} invocation is the same as for listeners added with {@link #addListener}: if this listener is + * already complete then {@code fn} is invoked on the thread calling {@link #andThenApply} and in its thread context, but if this + * listener is incomplete then {@code fn} is invoked on the thread, and in the thread context, on which this listener is completed. + */ + public SubscribableListener andThenApply(CheckedFunction fn) { + return newForked(l -> addListener(l.map(fn))); + } + + /** + * Creates and returns a new {@link SubscribableListener} {@code L} such that if this listener is completed successfully with result + * {@code R} then {@code consumer} is applied to argument {@code R}, and {@code L} is completed with {@code null} when {@code + * consumer} returns. If this listener is completed exceptionally, or {@code consumer} throws an exception, then {@code L} is + * completed with that exception. + *

+ * This is essentially a shorthand for a call to {@link #andThen} with a {@code nextStep} argument that is fully synchronous. + *

+ * The threading of the {@code consumer} invocation is the same as for listeners added with {@link #addListener}: if this listener is + * already complete then {@code consumer} is invoked on the thread calling {@link #andThenAccept} and in its thread context, but if + * this listener is incomplete then {@code consumer} is invoked on the thread, and in the thread context, on which this listener is + * completed. + */ + public SubscribableListener andThenAccept(CheckedConsumer consumer) { + return newForked(l -> addListener(l.map(r -> { + consumer.accept(r); + return null; + }))); + } + /** * Adds a timeout to this listener, such that if the timeout elapses before the listener is completed then it will be completed with an * {@link ElasticsearchTimeoutException}. diff --git a/server/src/main/java/org/elasticsearch/action/support/TransportAction.java b/server/src/main/java/org/elasticsearch/action/support/TransportAction.java index cb5a9ce3db353..35f1b645293bd 100644 --- a/server/src/main/java/org/elasticsearch/action/support/TransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/TransportAction.java @@ -15,6 +15,8 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskManager; @@ -58,8 +60,13 @@ public final void execute(Task task, Request request, ActionListener l listener = new TaskResultStoringActionListener<>(taskManager, task, listener); } - RequestFilterChain requestFilterChain = new RequestFilterChain<>(this, logger); - requestFilterChain.proceed(task, actionName, request, listener); + // Note on request refcounting: we can be sure that either we get to the end of the chain (and execute the actual action) or + // we complete the response listener and short-circuit the outer chain, so we release our request ref on both paths, using + // Releasables#releaseOnce to avoid a double-release. + request.mustIncRef(); + final var releaseRef = Releasables.releaseOnce(request::decRef); + RequestFilterChain requestFilterChain = new RequestFilterChain<>(this, logger, releaseRef); + requestFilterChain.proceed(task, actionName, request, ActionListener.runBefore(listener, releaseRef::close)); } protected abstract void doExecute(Task task, Request request, ActionListener listener); @@ -71,10 +78,12 @@ private static class RequestFilterChain action; private final AtomicInteger index = new AtomicInteger(); private final Logger logger; + private final Releasable releaseRef; - private RequestFilterChain(TransportAction action, Logger logger) { + private RequestFilterChain(TransportAction action, Logger logger, Releasable releaseRef) { this.action = action; this.logger = logger; + this.releaseRef = releaseRef; } @Override @@ -84,7 +93,9 @@ public void proceed(Task task, String actionName, Request request, ActionListene if (i < this.action.filters.length) { this.action.filters[i].apply(task, actionName, request, listener, this); } else if (i == this.action.filters.length) { - this.action.doExecute(task, request, listener); + try (releaseRef) { + this.action.doExecute(task, request, listener); + } } else { listener.onFailure(new IllegalStateException("proceed was called too many times")); } diff --git a/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java b/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java index e37f248246920..4fb243891709b 100644 --- a/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java @@ -25,6 +25,8 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportRequest; @@ -96,6 +98,23 @@ protected void doExecute(Task task, NodesRequest request, ActionListener { + final List drainedResponses; + synchronized (responses) { + drainedResponses = List.copyOf(responses); + responses.clear(); + } + Releasables.wrap(Iterators.map(drainedResponses.iterator(), r -> r::decRef)).close(); + }); + } + } + @Override protected void sendItemRequest(DiscoveryNode discoveryNode, ActionListener listener) { final var nodeRequest = newNodeRequest(request); @@ -118,9 +137,14 @@ protected void sendItemRequest(DiscoveryNode discoveryNode, ActionListener, Exception> onCompletion() { // ref releases all happen-before here so no need to be synchronized - return l -> newResponseAsync(task, request, responses, exceptions, l); + return l -> { + try (var ignored = Releasables.wrap(Iterators.map(responses.iterator(), r -> r::decRef))) { + newResponseAsync(task, request, responses, exceptions, l); + } + }; } @Override @@ -154,9 +182,11 @@ private Writeable.Reader nodeResponseReader(DiscoveryNode discover } /** - * Create a new {@link NodesResponse} (multi-node response). + * Create a new {@link NodesResponse}. This method is executed on {@link #finalExecutor}. * - * @param request The associated request. + * @param request The request whose response we are constructing. {@link TransportNodesAction} may have already released all its + * references to this object before calling this method, so it's up to individual implementations to retain their own + * reference to the request if still needed here. * @param responses All successful node-level responses. * @param failures All node-level failures. * @return Never {@code null}. @@ -166,7 +196,11 @@ private Writeable.Reader nodeResponseReader(DiscoveryNode discover /** * Create a new {@link NodesResponse}, possibly asynchronously. The default implementation is synchronous and calls - * {@link #newResponse(BaseNodesRequest, List, List)} + * {@link #newResponse(BaseNodesRequest, List, List)}. This method is executed on {@link #finalExecutor}. + * + * @param request The request whose response we are constructing. {@link TransportNodesAction} may have already released all its + * references to this object before calling this method, so it's up to individual implementations to retain their own + * reference to the request if still needed here. */ protected void newResponseAsync( Task task, @@ -175,7 +209,7 @@ protected void newResponseAsync( List failures, ActionListener listener ) { - ActionListener.completeWith(listener, () -> newResponse(request, responses, failures)); + ActionListener.run(listener, l -> ActionListener.respondAndRelease(l, newResponse(request, responses, failures))); } protected abstract NodeRequest newNodeRequest(NodesRequest request); diff --git a/server/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java b/server/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java index 4f637c4792cd6..80b7a95bbe0de 100644 --- a/server/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java @@ -11,8 +11,10 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.ChannelActionListener; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; @@ -25,7 +27,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; @@ -37,7 +38,6 @@ import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; @@ -57,6 +57,7 @@ public abstract class TransportInstanceSingleOperationAction< final String shardActionName; + @SuppressWarnings("this-escape") protected TransportInstanceSingleOperationAction( String actionName, ThreadPool threadPool, @@ -72,7 +73,7 @@ protected TransportInstanceSingleOperationAction( this.transportService = transportService; this.indexNameExpressionResolver = indexNameExpressionResolver; this.shardActionName = actionName + "[s]"; - transportService.registerRequestHandler(shardActionName, EsExecutors.DIRECT_EXECUTOR_SERVICE, request, new ShardTransportHandler()); + transportService.registerRequestHandler(shardActionName, EsExecutors.DIRECT_EXECUTOR_SERVICE, request, this::handleShardRequest); } @Override @@ -257,26 +258,8 @@ public void onTimeout(TimeValue timeout) { } } - private class ShardTransportHandler implements TransportRequestHandler { - - @Override - public void messageReceived(final Request request, final TransportChannel channel, Task task) throws Exception { - threadPool.executor(executor(request.shardId)).execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - try { - channel.sendResponse(e); - } catch (Exception inner) { - inner.addSuppressed(e); - logger.warn("failed to send response for " + shardActionName, inner); - } - } - - @Override - protected void doRun() { - shardOperation(request, ActionListener.wrap(channel::sendResponse, this::onFailure)); - } - }); - } + private void handleShardRequest(Request request, TransportChannel channel, Task task) { + threadPool.executor(executor(request.shardId)) + .execute(ActionRunnable.wrap(new ChannelActionListener(channel), l -> shardOperation(request, l))); } } diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java index 650b9db7f3d69..284831ef18060 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java @@ -156,7 +156,7 @@ public TermVectorsRequest() {} } } if (in.readBoolean()) { - perFieldAnalyzer = readPerFieldAnalyzer(in.readMap()); + perFieldAnalyzer = readPerFieldAnalyzer(in.readGenericMap()); } if (in.readBoolean()) { filterSettings = new FilterSettings(); diff --git a/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java b/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java index 17d712bdf5af4..a8365a62c9e58 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java +++ b/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java @@ -16,13 +16,9 @@ import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequestBuilder; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequest; -import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequestBuilder; -import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsResponse; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequestBuilder; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; -import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsRequestBuilder; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestBuilder; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -88,13 +84,6 @@ import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptResponse; import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptRequest; import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptRequestBuilder; -import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksRequest; -import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksRequestBuilder; -import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse; -import org.elasticsearch.action.admin.indices.dangling.delete.DeleteDanglingIndexRequest; -import org.elasticsearch.action.admin.indices.dangling.import_index.ImportDanglingIndexRequest; -import org.elasticsearch.action.admin.indices.dangling.list.ListDanglingIndicesRequest; -import org.elasticsearch.action.admin.indices.dangling.list.ListDanglingIndicesResponse; import org.elasticsearch.action.ingest.DeletePipelineRequest; import org.elasticsearch.action.ingest.DeletePipelineRequestBuilder; import org.elasticsearch.action.ingest.GetPipelineRequest; @@ -174,11 +163,6 @@ public interface ClusterAdminClient extends ElasticsearchClient { */ ClusterUpdateSettingsRequestBuilder prepareUpdateSettings(); - /** - * Re initialize each cluster node and pass them the secret store password. - */ - NodesReloadSecureSettingsRequestBuilder prepareReloadSecureSettings(); - /** * Reroutes allocation of shards. Advance API. */ @@ -256,18 +240,6 @@ public interface ClusterAdminClient extends ElasticsearchClient { */ void nodesUsage(NodesUsageRequest request, ActionListener listener); - /** - * Returns top N hot-threads samples per node. The hot-threads are only sampled - * for the node ids specified in the request. - */ - void nodesHotThreads(NodesHotThreadsRequest request, ActionListener listener); - - /** - * Returns a request builder to fetch top N hot-threads samples per node. The hot-threads are only sampled - * for the node ids provided. Note: Use {@code *} to fetch samples for all nodes - */ - NodesHotThreadsRequestBuilder prepareNodesHotThreads(String... nodesIds); - /** * List tasks * @@ -456,18 +428,6 @@ public interface ClusterAdminClient extends ElasticsearchClient { */ RestoreSnapshotRequestBuilder prepareRestoreSnapshot(String repository, String snapshot); - /** - * Returns a list of the pending cluster tasks, that are scheduled to be executed. This includes operations - * that update the cluster state (for example, a create index operation) - */ - void pendingClusterTasks(PendingClusterTasksRequest request, ActionListener listener); - - /** - * Returns a list of the pending cluster tasks, that are scheduled to be executed. This includes operations - * that update the cluster state (for example, a create index operation) - */ - PendingClusterTasksRequestBuilder preparePendingClusterTasks(); - /** * Get snapshot status. */ @@ -582,34 +542,4 @@ public interface ClusterAdminClient extends ElasticsearchClient { * Get a script from the cluster state */ void getStoredScript(GetStoredScriptRequest request, ActionListener listener); - - /** - * List dangling indices on all nodes. - */ - void listDanglingIndices(ListDanglingIndicesRequest request, ActionListener listener); - - /** - * List dangling indices on all nodes. - */ - ActionFuture listDanglingIndices(ListDanglingIndicesRequest request); - - /** - * Restore specified dangling indices. - */ - void importDanglingIndex(ImportDanglingIndexRequest request, ActionListener listener); - - /** - * Restore specified dangling indices. - */ - ActionFuture importDanglingIndex(ImportDanglingIndexRequest request); - - /** - * Delete specified dangling indices. - */ - void deleteDanglingIndex(DeleteDanglingIndexRequest request, ActionListener listener); - - /** - * Delete specified dangling indices. - */ - ActionFuture deleteDanglingIndex(DeleteDanglingIndexRequest request); } diff --git a/server/src/main/java/org/elasticsearch/client/internal/IndicesAdminClient.java b/server/src/main/java/org/elasticsearch/client/internal/IndicesAdminClient.java index b16eba26f3594..9ba26b95244ab 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/IndicesAdminClient.java +++ b/server/src/main/java/org/elasticsearch/client/internal/IndicesAdminClient.java @@ -69,9 +69,6 @@ import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequestBuilder; -import org.elasticsearch.action.admin.indices.shards.IndicesShardStoreRequestBuilder; -import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest; -import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeRequestBuilder; import org.elasticsearch.action.admin.indices.shrink.ResizeResponse; @@ -150,27 +147,6 @@ public interface IndicesAdminClient extends ElasticsearchClient { */ IndicesSegmentsRequestBuilder prepareSegments(String... indices); - /** - * The shard stores info of one or more indices. - * - * @param request The indices shard stores request - * @return The result future - */ - ActionFuture shardStores(IndicesShardStoresRequest request); - - /** - * The shard stores info of one or more indices. - * - * @param request The indices shard stores request - * @param listener A listener to be notified with a result - */ - void shardStores(IndicesShardStoresRequest request, ActionListener listener); - - /** - * The shard stores info of one or more indices. - */ - IndicesShardStoreRequestBuilder prepareShardStores(String... indices); - /** * Creates an index using an explicit request allowing to specify the settings of the index. * diff --git a/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java b/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java index 21c01abd52437..12f3dec804809 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java +++ b/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java @@ -24,15 +24,10 @@ import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequestBuilder; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.health.TransportClusterHealthAction; -import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequest; -import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequestBuilder; -import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsResponse; -import org.elasticsearch.action.admin.cluster.node.hotthreads.TransportNodesHotThreadsAction; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequestBuilder; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.admin.cluster.node.info.TransportNodesInfoAction; -import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsRequestBuilder; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestBuilder; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -122,10 +117,6 @@ import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptRequestBuilder; import org.elasticsearch.action.admin.cluster.storedscripts.TransportDeleteStoredScriptAction; import org.elasticsearch.action.admin.cluster.storedscripts.TransportPutStoredScriptAction; -import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksRequest; -import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksRequestBuilder; -import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse; -import org.elasticsearch.action.admin.cluster.tasks.TransportPendingClusterTasksAction; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequestBuilder; import org.elasticsearch.action.admin.indices.alias.TransportIndicesAliasesAction; @@ -147,13 +138,6 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; -import org.elasticsearch.action.admin.indices.dangling.delete.DeleteDanglingIndexRequest; -import org.elasticsearch.action.admin.indices.dangling.delete.TransportDeleteDanglingIndexAction; -import org.elasticsearch.action.admin.indices.dangling.import_index.ImportDanglingIndexRequest; -import org.elasticsearch.action.admin.indices.dangling.import_index.TransportImportDanglingIndexAction; -import org.elasticsearch.action.admin.indices.dangling.list.ListDanglingIndicesAction; -import org.elasticsearch.action.admin.indices.dangling.list.ListDanglingIndicesRequest; -import org.elasticsearch.action.admin.indices.dangling.list.ListDanglingIndicesResponse; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequestBuilder; import org.elasticsearch.action.admin.indices.delete.TransportDeleteIndexAction; @@ -177,9 +161,9 @@ import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequestBuilder; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequestBuilder; +import org.elasticsearch.action.admin.indices.mapping.put.TransportPutMappingAction; import org.elasticsearch.action.admin.indices.open.OpenIndexAction; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexRequestBuilder; @@ -209,13 +193,9 @@ import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequestBuilder; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; -import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction; +import org.elasticsearch.action.admin.indices.settings.put.TransportUpdateSettingsAction; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequestBuilder; -import org.elasticsearch.action.admin.indices.shards.IndicesShardStoreRequestBuilder; -import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresAction; -import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest; -import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; import org.elasticsearch.action.admin.indices.shrink.ResizeAction; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeRequestBuilder; @@ -224,16 +204,16 @@ import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; -import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequestBuilder; +import org.elasticsearch.action.admin.indices.template.delete.TransportDeleteIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesAction; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesRequest; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesRequestBuilder; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; -import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequestBuilder; +import org.elasticsearch.action.admin.indices.template.put.TransportPutIndexTemplateAction; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryAction; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder; @@ -265,16 +245,16 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.TransportIndexAction; -import org.elasticsearch.action.ingest.DeletePipelineAction; import org.elasticsearch.action.ingest.DeletePipelineRequest; import org.elasticsearch.action.ingest.DeletePipelineRequestBuilder; +import org.elasticsearch.action.ingest.DeletePipelineTransportAction; import org.elasticsearch.action.ingest.GetPipelineAction; import org.elasticsearch.action.ingest.GetPipelineRequest; import org.elasticsearch.action.ingest.GetPipelineRequestBuilder; import org.elasticsearch.action.ingest.GetPipelineResponse; -import org.elasticsearch.action.ingest.PutPipelineAction; import org.elasticsearch.action.ingest.PutPipelineRequest; import org.elasticsearch.action.ingest.PutPipelineRequestBuilder; +import org.elasticsearch.action.ingest.PutPipelineTransportAction; import org.elasticsearch.action.ingest.SimulatePipelineAction; import org.elasticsearch.action.ingest.SimulatePipelineRequest; import org.elasticsearch.action.ingest.SimulatePipelineRequestBuilder; @@ -745,11 +725,6 @@ public ClusterUpdateSettingsRequestBuilder prepareUpdateSettings() { return new ClusterUpdateSettingsRequestBuilder(this); } - @Override - public NodesReloadSecureSettingsRequestBuilder prepareReloadSecureSettings() { - return new NodesReloadSecureSettingsRequestBuilder(this); - } - @Override public ActionFuture nodesInfo(final NodesInfoRequest request) { return execute(TransportNodesInfoAction.TYPE, request); @@ -795,16 +770,6 @@ public ClusterStatsRequestBuilder prepareClusterStats() { return new ClusterStatsRequestBuilder(this); } - @Override - public void nodesHotThreads(NodesHotThreadsRequest request, ActionListener listener) { - execute(TransportNodesHotThreadsAction.TYPE, request, listener); - } - - @Override - public NodesHotThreadsRequestBuilder prepareNodesHotThreads(String... nodesIds) { - return new NodesHotThreadsRequestBuilder(this).setNodesIds(nodesIds); - } - @Override public ActionFuture listTasks(final ListTasksRequest request) { return execute(TransportListTasksAction.TYPE, request); @@ -865,16 +830,6 @@ public ClusterSearchShardsRequestBuilder prepareSearchShards(String... indices) return new ClusterSearchShardsRequestBuilder(this).setIndices(indices); } - @Override - public PendingClusterTasksRequestBuilder preparePendingClusterTasks() { - return new PendingClusterTasksRequestBuilder(this); - } - - @Override - public void pendingClusterTasks(PendingClusterTasksRequest request, ActionListener listener) { - execute(TransportPendingClusterTasksAction.TYPE, request, listener); - } - @Override public void putRepository(PutRepositoryRequest request, ActionListener listener) { execute(TransportPutRepositoryAction.TYPE, request, listener); @@ -1002,12 +957,12 @@ public SnapshotsStatusRequestBuilder prepareSnapshotStatus() { @Override public void putPipeline(PutPipelineRequest request, ActionListener listener) { - execute(PutPipelineAction.INSTANCE, request, listener); + execute(PutPipelineTransportAction.TYPE, request, listener); } @Override public ActionFuture putPipeline(PutPipelineRequest request) { - return execute(PutPipelineAction.INSTANCE, request); + return execute(PutPipelineTransportAction.TYPE, request); } @Override @@ -1017,12 +972,12 @@ public PutPipelineRequestBuilder preparePutPipeline(String id, BytesReference so @Override public void deletePipeline(DeletePipelineRequest request, ActionListener listener) { - execute(DeletePipelineAction.INSTANCE, request, listener); + execute(DeletePipelineTransportAction.TYPE, request, listener); } @Override public ActionFuture deletePipeline(DeletePipelineRequest request) { - return execute(DeletePipelineAction.INSTANCE, request); + return execute(DeletePipelineTransportAction.TYPE, request); } @Override @@ -1075,36 +1030,6 @@ public void getStoredScript(final GetStoredScriptRequest request, final ActionLi execute(GetStoredScriptAction.INSTANCE, request, listener); } - @Override - public ActionFuture listDanglingIndices(ListDanglingIndicesRequest request) { - return execute(ListDanglingIndicesAction.INSTANCE, request); - } - - @Override - public void listDanglingIndices(ListDanglingIndicesRequest request, ActionListener listener) { - execute(ListDanglingIndicesAction.INSTANCE, request, listener); - } - - @Override - public ActionFuture importDanglingIndex(ImportDanglingIndexRequest request) { - return execute(TransportImportDanglingIndexAction.TYPE, request); - } - - @Override - public void importDanglingIndex(ImportDanglingIndexRequest request, ActionListener listener) { - execute(TransportImportDanglingIndexAction.TYPE, request, listener); - } - - @Override - public ActionFuture deleteDanglingIndex(DeleteDanglingIndexRequest request) { - return execute(TransportDeleteDanglingIndexAction.TYPE, request); - } - - @Override - public void deleteDanglingIndex(DeleteDanglingIndexRequest request, ActionListener listener) { - execute(TransportDeleteDanglingIndexAction.TYPE, request, listener); - } - @Override public GetStoredScriptRequestBuilder prepareGetStoredScript(String id) { return new GetStoredScriptRequestBuilder(this).setId(id); @@ -1339,12 +1264,12 @@ public ActionFuture getFieldMappings(GetFieldMappingsR @Override public ActionFuture putMapping(final PutMappingRequest request) { - return execute(PutMappingAction.INSTANCE, request); + return execute(TransportPutMappingAction.TYPE, request); } @Override public void putMapping(final PutMappingRequest request, final ActionListener listener) { - execute(PutMappingAction.INSTANCE, request, listener); + execute(TransportPutMappingAction.TYPE, request, listener); } @Override @@ -1427,29 +1352,14 @@ public IndicesSegmentsRequestBuilder prepareSegments(String... indices) { return new IndicesSegmentsRequestBuilder(this).setIndices(indices); } - @Override - public ActionFuture shardStores(IndicesShardStoresRequest request) { - return execute(IndicesShardStoresAction.INSTANCE, request); - } - - @Override - public void shardStores(IndicesShardStoresRequest request, ActionListener listener) { - execute(IndicesShardStoresAction.INSTANCE, request, listener); - } - - @Override - public IndicesShardStoreRequestBuilder prepareShardStores(String... indices) { - return new IndicesShardStoreRequestBuilder(this, indices); - } - @Override public ActionFuture updateSettings(final UpdateSettingsRequest request) { - return execute(UpdateSettingsAction.INSTANCE, request); + return execute(TransportUpdateSettingsAction.TYPE, request); } @Override public void updateSettings(final UpdateSettingsRequest request, final ActionListener listener) { - execute(UpdateSettingsAction.INSTANCE, request, listener); + execute(TransportUpdateSettingsAction.TYPE, request, listener); } @Override @@ -1484,12 +1394,12 @@ public AnalyzeRequestBuilder prepareAnalyze() { @Override public ActionFuture putTemplate(final PutIndexTemplateRequest request) { - return execute(PutIndexTemplateAction.INSTANCE, request); + return execute(TransportPutIndexTemplateAction.TYPE, request); } @Override public void putTemplate(final PutIndexTemplateRequest request, final ActionListener listener) { - execute(PutIndexTemplateAction.INSTANCE, request, listener); + execute(TransportPutIndexTemplateAction.TYPE, request, listener); } @Override @@ -1509,7 +1419,7 @@ public GetIndexTemplatesRequestBuilder prepareGetTemplates(String... names) { @Override public void deleteTemplate(final DeleteIndexTemplateRequest request, final ActionListener listener) { - execute(DeleteIndexTemplateAction.INSTANCE, request, listener); + execute(TransportDeleteIndexTemplateAction.TYPE, request, listener); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterInfo.java b/server/src/main/java/org/elasticsearch/cluster/ClusterInfo.java index ba0e1de15f192..c2b61e496e9c9 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterInfo.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterInfo.java @@ -376,10 +376,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeCollection(shardIds); } - public long getTotal() { - return total; - } - public boolean containsShardId(ShardId shardId) { return shardIds.contains(shardId); } diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterInfoSimulator.java b/server/src/main/java/org/elasticsearch/cluster/ClusterInfoSimulator.java index f9e56b5b2ff2d..7dbd4f864bdb3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterInfoSimulator.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterInfoSimulator.java @@ -8,7 +8,9 @@ package org.elasticsearch.cluster; +import org.elasticsearch.cluster.ClusterInfo.NodeAndShard; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.util.CopyOnFirstWriteMap; import org.elasticsearch.index.shard.ShardId; @@ -16,20 +18,63 @@ import java.util.Map; import java.util.Objects; +import static org.elasticsearch.cluster.ClusterInfo.shardIdentifierFromRouting; +import static org.elasticsearch.cluster.routing.ExpectedShardSizeEstimator.getExpectedShardSize; +import static org.elasticsearch.cluster.routing.ExpectedShardSizeEstimator.shouldReserveSpaceForInitializingShard; +import static org.elasticsearch.cluster.routing.ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE; + public class ClusterInfoSimulator { + private final RoutingAllocation allocation; + private final Map leastAvailableSpaceUsage; private final Map mostAvailableSpaceUsage; private final CopyOnFirstWriteMap shardSizes; private final Map shardDataSetSizes; - private final Map dataPath; - - public ClusterInfoSimulator(ClusterInfo clusterInfo) { - this.leastAvailableSpaceUsage = new HashMap<>(clusterInfo.getNodeLeastAvailableDiskUsages()); - this.mostAvailableSpaceUsage = new HashMap<>(clusterInfo.getNodeMostAvailableDiskUsages()); - this.shardSizes = new CopyOnFirstWriteMap<>(clusterInfo.shardSizes); - this.shardDataSetSizes = Map.copyOf(clusterInfo.shardDataSetSizes); - this.dataPath = Map.copyOf(clusterInfo.dataPath); + private final Map dataPath; + + public ClusterInfoSimulator(RoutingAllocation allocation) { + this.allocation = allocation; + this.leastAvailableSpaceUsage = getAdjustedDiskSpace(allocation, allocation.clusterInfo().getNodeLeastAvailableDiskUsages()); + this.mostAvailableSpaceUsage = getAdjustedDiskSpace(allocation, allocation.clusterInfo().getNodeMostAvailableDiskUsages()); + this.shardSizes = new CopyOnFirstWriteMap<>(allocation.clusterInfo().shardSizes); + this.shardDataSetSizes = Map.copyOf(allocation.clusterInfo().shardDataSetSizes); + this.dataPath = Map.copyOf(allocation.clusterInfo().dataPath); + } + + /** + * Cluster info contains a reserved space that is necessary to finish initializing shards (that are currently in progress). + * for all initializing shards sum(expected size) = reserved space + already used space + * This deducts already used space from disk usage as when shard start is simulated it is going to add entire expected shard size. + */ + private static Map getAdjustedDiskSpace(RoutingAllocation allocation, Map diskUsage) { + var diskUsageCopy = new HashMap<>(diskUsage); + for (var entry : diskUsageCopy.entrySet()) { + var nodeId = entry.getKey(); + var usage = entry.getValue(); + + var reserved = allocation.clusterInfo().getReservedSpace(nodeId, usage.path()); + if (reserved.total() == 0) { + continue; + } + var node = allocation.routingNodes().node(nodeId); + if (node == null) { + continue; + } + + long adjustment = 0; + for (ShardId shardId : reserved.shardIds()) { + var shard = node.getByShardId(shardId); + if (shard != null) { + var expectedSize = getExpectedShardSize(shard, 0, allocation); + adjustment += expectedSize; + } + } + adjustment -= reserved.total(); + + entry.setValue(updateWithFreeBytes(usage, adjustment)); + } + return diskUsageCopy; } /** @@ -43,49 +88,50 @@ public ClusterInfoSimulator(ClusterInfo clusterInfo) { public void simulateShardStarted(ShardRouting shard) { assert shard.initializing(); - var size = getEstimatedShardSize(shard); - if (size != null && size > 0) { + var size = getExpectedShardSize( + shard, + UNAVAILABLE_EXPECTED_SHARD_SIZE, + getClusterInfo(), + allocation.snapshotShardSizeInfo(), + allocation.metadata(), + allocation.routingTable() + ); + if (size != UNAVAILABLE_EXPECTED_SHARD_SIZE) { if (shard.relocatingNodeId() != null) { // relocation modifyDiskUsage(shard.relocatingNodeId(), size); modifyDiskUsage(shard.currentNodeId(), -size); } else { // new shard - modifyDiskUsage(shard.currentNodeId(), -size); - shardSizes.put(ClusterInfo.shardIdentifierFromRouting(shard), size); + if (shouldReserveSpaceForInitializingShard(shard, allocation.metadata())) { + modifyDiskUsage(shard.currentNodeId(), -size); + } + shardSizes.put( + shardIdentifierFromRouting(shard), + allocation.metadata().getIndexSafe(shard.index()).ignoreDiskWatermarks() ? 0 : size + ); } } } - private Long getEstimatedShardSize(ShardRouting shard) { - if (shard.relocatingNodeId() != null) { - // relocation existing shard, get size of the source shard - return shardSizes.get(ClusterInfo.shardIdentifierFromRouting(shard)); - } else if (shard.primary() == false) { - // initializing new replica, get size of the source primary shard - return shardSizes.get(ClusterInfo.shardIdentifierFromRouting(shard.shardId(), true)); - } else { - // initializing new (empty?) primary - return shard.getExpectedShardSize(); + private void modifyDiskUsage(String nodeId, long freeDelta) { + if (freeDelta == 0) { + return; } - } - - private void modifyDiskUsage(String nodeId, long delta) { var diskUsage = mostAvailableSpaceUsage.get(nodeId); if (diskUsage == null) { return; } var path = diskUsage.getPath(); + updateDiskUsage(leastAvailableSpaceUsage, nodeId, path, freeDelta); + updateDiskUsage(mostAvailableSpaceUsage, nodeId, path, freeDelta); + } - var leastUsage = leastAvailableSpaceUsage.get(nodeId); - if (leastUsage != null && Objects.equals(leastUsage.getPath(), path)) { - // ensure new value is within bounds - leastAvailableSpaceUsage.put(nodeId, updateWithFreeBytes(leastUsage, delta)); - } - var mostUsage = mostAvailableSpaceUsage.get(nodeId); - if (mostUsage != null && Objects.equals(mostUsage.getPath(), path)) { + private void updateDiskUsage(Map availableSpaceUsage, String nodeId, String path, long freeDelta) { + var usage = availableSpaceUsage.get(nodeId); + if (usage != null && Objects.equals(usage.getPath(), path)) { // ensure new value is within bounds - mostAvailableSpaceUsage.put(nodeId, updateWithFreeBytes(mostUsage, delta)); + availableSpaceUsage.put(nodeId, updateWithFreeBytes(usage, freeDelta)); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java b/server/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java index 30ee992c917ea..03b32ea0b3bfb 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java @@ -12,6 +12,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.service.ClusterApplierService; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; @@ -226,7 +227,11 @@ public void clusterChanged(ClusterChangedEvent event) { clusterApplierService.removeTimeoutListener(this); logger.trace("observer: accepting cluster state change ({})", state); lastObservedVersion = state.version(); - context.listener.onNewClusterState(state); + try { + context.listener.onNewClusterState(state); + } catch (Exception e) { + logUnexpectedException(e, "cluster state version [%d]", state.version()); + } } else { logger.trace( "observer: predicate approved change but observing context has changed " @@ -253,7 +258,11 @@ public void postAdded() { logger.trace("observer: post adding listener: accepting current cluster state ({})", newState); clusterApplierService.removeTimeoutListener(this); lastObservedVersion = newState.version(); - context.listener.onNewClusterState(newState); + try { + context.listener.onNewClusterState(newState); + } catch (Exception e) { + logUnexpectedException(e, "cluster state version [%d]", newState.version()); + } } else { logger.trace( "observer: postAdded - predicate approved state but observing context has changed - ignoring ({})", @@ -272,7 +281,11 @@ public void onClose() { if (context != null) { logger.trace("observer: cluster service closed. notifying listener."); clusterApplierService.removeTimeoutListener(this); - context.listener.onClusterServiceClose(); + try { + context.listener.onClusterServiceClose(); + } catch (Exception e) { + logUnexpectedException(e, "cluster service close"); + } } } @@ -290,7 +303,11 @@ public void onTimeout(TimeValue timeout) { // update to latest, in case people want to retry lastObservedVersion = clusterApplierService.state().version(); timedOut = true; - context.listener.onTimeout(timeOutValue); + try { + context.listener.onTimeout(timeOutValue); + } catch (Exception e) { + logUnexpectedException(e, "timeout after [%s]", timeOutValue); + } } } @@ -298,6 +315,19 @@ public void onTimeout(TimeValue timeout) { public String toString() { return "ClusterStateObserver[" + observingContext.get() + "]"; } + + private void logUnexpectedException(Exception exception, String format, Object... args) { + final var illegalStateException = new IllegalStateException( + Strings.format( + "unexpected exception processing %s in context [%s]", + Strings.format(format, args), + ObserverClusterStateListener.this + ), + exception + ); + logger.error(illegalStateException.getMessage(), illegalStateException); + assert false : illegalStateException; + } } public interface Listener { diff --git a/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java b/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java index 1744bcc91b834..26c453d419f4c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java +++ b/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java @@ -26,7 +26,6 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings; import org.elasticsearch.cluster.service.ClusterService; @@ -97,7 +96,6 @@ public class InternalClusterInfoService implements ClusterInfoService, ClusterSt private final Object mutex = new Object(); private final List> nextRefreshListeners = new ArrayList<>(); - private final ClusterService clusterService; private AsyncRefresh currentRefresh; private RefreshScheduler refreshScheduler; @@ -108,7 +106,6 @@ public InternalClusterInfoService(Settings settings, ClusterService clusterServi this.indicesStatsSummary = IndicesStatsSummary.EMPTY; this.threadPool = threadPool; this.client = client; - this.clusterService = clusterService; this.updateFrequency = INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.get(settings); this.fetchTimeout = INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.get(settings); this.enabled = DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.get(settings); @@ -250,7 +247,6 @@ public void onResponse(IndicesStatsResponse indicesStatsResponse) { final Map reservedSpaceBuilders = new HashMap<>(); buildShardLevelInfo( - clusterService.state().routingTable(), adjustShardStats(stats), shardSizeByIdentifierBuilder, shardDataSetSizeBuilder, @@ -445,7 +441,6 @@ public void addListener(Consumer clusterInfoConsumer) { } static void buildShardLevelInfo( - RoutingTable routingTable, ShardStats[] stats, Map shardSizes, Map shardDataSetSizeBuilder, @@ -453,7 +448,7 @@ static void buildShardLevelInfo( Map reservedSpaceByShard ) { for (ShardStats s : stats) { - final ShardRouting shardRouting = routingTable.deduplicate(s.getShardRouting()); + final ShardRouting shardRouting = s.getShardRouting(); dataPathByShard.put(ClusterInfo.NodeAndShard.from(shardRouting), s.getDataPath()); final StoreStats storeStats = s.getStats().getStore(); @@ -462,7 +457,7 @@ static void buildShardLevelInfo( } final long size = storeStats.sizeInBytes(); final long dataSetSize = storeStats.totalDataSetSizeInBytes(); - final long reserved = storeStats.getReservedSize().getBytes(); + final long reserved = storeStats.reservedSizeInBytes(); final String shardIdentifier = ClusterInfo.shardIdentifierFromRouting(shardRouting); logger.trace("shard: {} size: {} reserved: {}", shardIdentifier, size, reserved); diff --git a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java index 470f175deb247..1baa287830c75 100644 --- a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java @@ -11,6 +11,7 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.ClusterState.Custom; +import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; @@ -18,6 +19,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.Maps; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.SuppressForbidden; @@ -41,6 +43,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -50,12 +53,14 @@ import java.util.Set; import java.util.stream.Stream; +import static org.elasticsearch.TransportVersions.SNAPSHOTS_IN_PROGRESS_TRACKING_REMOVING_NODES_ADDED; + /** * Meta data about snapshots that are currently executing */ public class SnapshotsInProgress extends AbstractNamedDiffable implements Custom { - public static final SnapshotsInProgress EMPTY = new SnapshotsInProgress(Map.of()); + public static final SnapshotsInProgress EMPTY = new SnapshotsInProgress(Map.of(), Set.of()); public static final String TYPE = "snapshots"; @@ -64,12 +69,33 @@ public class SnapshotsInProgress extends AbstractNamedDiffable implement // keyed by repository name private final Map entries; + /** + * IDs of nodes which are marked for removal, or which were previously marked for removal and still have running shard snapshots. + */ + // When a node is marked for removal it pauses all its shard snapshots as promptly as possible. When each shard snapshot pauses it + // enters state PAUSED_FOR_NODE_REMOVAL to allow the shard to move to a different node where its snapshot can resume. However, if the + // removal marker is deleted before the node shuts down then we need to make sure to resume the snapshots of any remaining shards, which + // we do by moving all those PAUSED_FOR_NODE_REMOVAL shards back to state INIT. The problem is that the data node needs to be able to + // distinguish an INIT shard whose snapshot was successfully paused and now needs to be resumed from an INIT shard whose move to state + // PAUSED_FOR_NODE_REMOVAL has not yet been processed on the master: the latter kind of shard will move back to PAUSED_FOR_NODE_REMOVAL + // in a subsequent update and so shouldn't be resumed. The solution is to wait for all the shards on the previously-shutting-down node + // to finish pausing before resuming any of them. We do this by tracking the nodes in this field, avoiding moving any shards back to + // state INIT while the node appears in this set and, conversely, we only remove nodes from this set when none of their shards are in + // INIT state. + private final Set nodesIdsForRemoval; + public static SnapshotsInProgress get(ClusterState state) { return state.custom(TYPE, EMPTY); } public SnapshotsInProgress(StreamInput in) throws IOException { - this(collectByRepo(in)); + this(collectByRepo(in), readNodeIdsForRemoval(in)); + } + + private static Set readNodeIdsForRemoval(StreamInput in) throws IOException { + return in.getTransportVersion().onOrAfter(SNAPSHOTS_IN_PROGRESS_TRACKING_REMOVING_NODES_ADDED) + ? in.readCollectionAsImmutableSet(StreamInput::readString) + : Set.of(); } private static Map collectByRepo(StreamInput in) throws IOException { @@ -89,8 +115,9 @@ private static Map collectByRepo(StreamInput in) throws IOExcept return res; } - private SnapshotsInProgress(Map entries) { + private SnapshotsInProgress(Map entries, Set nodesIdsForRemoval) { this.entries = Map.copyOf(entries); + this.nodesIdsForRemoval = nodesIdsForRemoval; assert assertConsistentEntries(this.entries); } @@ -107,7 +134,7 @@ public SnapshotsInProgress withUpdatedEntriesForRepo(String repository, List toXContentChunked(ToXContent.Params ignored) { - return Iterators.concat(ChunkedToXContentHelper.startArray("snapshots"), asStream().iterator(), ChunkedToXContentHelper.endArray()); + return Iterators.concat( + ChunkedToXContentHelper.startArray("snapshots"), + asStream().iterator(), + ChunkedToXContentHelper.endArray(), + ChunkedToXContentHelper.startArray("node_ids_for_removal"), + Iterators.map(nodesIdsForRemoval.iterator(), s -> (builder, params) -> builder.value(s)), + ChunkedToXContentHelper.endArray() + ); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - return entries.equals(((SnapshotsInProgress) o).entries); + final var other = (SnapshotsInProgress) o; + return nodesIdsForRemoval.equals(other.nodesIdsForRemoval) && entries.equals(other.entries); } @Override public int hashCode() { - return entries.hashCode(); + return Objects.hash(entries, nodesIdsForRemoval); } @Override public String toString() { - StringBuilder builder = new StringBuilder("SnapshotsInProgress["); + StringBuilder builder = new StringBuilder("SnapshotsInProgress[entries=["); final Iterator entryList = asStream().iterator(); boolean firstEntry = true; while (entryList.hasNext()) { @@ -250,7 +290,7 @@ public String toString() { builder.append(entryList.next().snapshot().getSnapshotId().getName()); firstEntry = false; } - return builder.append("]").toString(); + return builder.append("],nodeIdsForRemoval=").append(nodesIdsForRemoval).append("]").toString(); } /** @@ -324,6 +364,10 @@ public static boolean completed(Collection shards) { return true; } + public boolean isNodeIdForRemoval(String nodeId) { + return nodeId != null && nodesIdsForRemoval.contains(nodeId); + } + private static boolean hasFailures(Map clones) { for (ShardSnapshotStatus value : clones.values()) { if (value.state().failed()) { @@ -384,6 +428,76 @@ private static boolean assertShardStateConsistent( return true; } + /** + * Adds any new node IDs to {@link #nodesIdsForRemoval}, and removes any node IDs that are no longer marked for shutdown if they have no + * running shard snapshots. + */ + public SnapshotsInProgress withUpdatedNodeIdsForRemoval(ClusterState clusterState) { + assert clusterState.getMinTransportVersion().onOrAfter(TransportVersions.SNAPSHOTS_IN_PROGRESS_TRACKING_REMOVING_NODES_ADDED); + + final var updatedNodeIdsForRemoval = new HashSet<>(nodesIdsForRemoval); + + final var nodeIdsMarkedForRemoval = getNodesIdsMarkedForRemoval(clusterState); + + // add any nodes newly marked for removal + updatedNodeIdsForRemoval.addAll(nodeIdsMarkedForRemoval); + + // remove any nodes which are no longer marked for shutdown if they have no running shard snapshots + updatedNodeIdsForRemoval.removeAll(getObsoleteNodeIdsForRemoval(nodeIdsMarkedForRemoval)); + + if (updatedNodeIdsForRemoval.equals(nodesIdsForRemoval)) { + return this; + } else { + return new SnapshotsInProgress(entries, Collections.unmodifiableSet(updatedNodeIdsForRemoval)); + } + } + + private static Set getNodesIdsMarkedForRemoval(ClusterState clusterState) { + final var nodesShutdownMetadata = clusterState.metadata().nodeShutdowns(); + final var shutdownMetadataCount = nodesShutdownMetadata.getAllNodeIds().size(); + if (shutdownMetadataCount == 0) { + return Set.of(); + } + + final Set result = Sets.newHashSetWithExpectedSize(shutdownMetadataCount); + for (final var entry : nodesShutdownMetadata.getAll().entrySet()) { + if (entry.getValue().getType() != SingleNodeShutdownMetadata.Type.RESTART) { + // Only pause the snapshot when the node is being removed (to let shards vacate) and not when it is restarting in place. If + // it is restarting and there are replicas to promote then we need #71333 to move the shard snapshot over; if there are no + // replicas then we do not expect the restart to be graceful so a PARTIAL or FAILED snapshot is ok. + result.add(entry.getKey()); + } + } + return result; + } + + private Set getObsoleteNodeIdsForRemoval(Set latestNodeIdsMarkedForRemoval) { + final var obsoleteNodeIdsForRemoval = new HashSet<>(nodesIdsForRemoval); + obsoleteNodeIdsForRemoval.removeIf(latestNodeIdsMarkedForRemoval::contains); + if (obsoleteNodeIdsForRemoval.isEmpty()) { + return Set.of(); + } + for (final var byRepo : entries.values()) { + for (final var entry : byRepo.entries()) { + if (entry.state() == State.STARTED && entry.hasShardsInInitState()) { + for (final var shardSnapshotStatus : entry.shards().values()) { + if (shardSnapshotStatus.state() == ShardState.INIT) { + obsoleteNodeIdsForRemoval.remove(shardSnapshotStatus.nodeId()); + if (obsoleteNodeIdsForRemoval.isEmpty()) { + return Set.of(); + } + } + } + } + } + } + return obsoleteNodeIdsForRemoval; + } + + public boolean nodeIdsForRemovalChanged(SnapshotsInProgress other) { + return nodesIdsForRemoval.equals(other.nodesIdsForRemoval) == false; + } + public enum ShardState { INIT((byte) 0, false, false), SUCCESS((byte) 2, true, false), @@ -397,7 +511,12 @@ public enum ShardState { /** * Shard snapshot is waiting for another shard snapshot for the same shard and to the same repository to finish. */ - QUEUED((byte) 7, false, false); + QUEUED((byte) 7, false, false), + /** + * Primary shard is assigned to a node which is marked for removal from the cluster (or which was previously marked for removal and + * we're still waiting for its other shards to pause). + */ + PAUSED_FOR_NODE_REMOVAL((byte) 8, false, false); private final byte value; @@ -428,6 +547,7 @@ public static ShardState fromValue(byte value) { case 5 -> MISSING; case 6 -> WAITING; case 7 -> QUEUED; + case 8 -> PAUSED_FOR_NODE_REMOVAL; default -> throw new IllegalArgumentException("No shard snapshot state for value [" + value + "]"); }; } @@ -539,7 +659,8 @@ public ShardSnapshotStatus( private boolean assertConsistent() { // If the state is failed we have to have a reason for this failure assert state.failed() == false || reason != null; - assert (state != ShardState.INIT && state != ShardState.WAITING) || nodeId != null : "Null node id for state [" + state + "]"; + assert (state != ShardState.INIT && state != ShardState.WAITING && state != ShardState.PAUSED_FOR_NODE_REMOVAL) + || nodeId != null : "Null node id for state [" + state + "]"; assert state != ShardState.QUEUED || (nodeId == null && generation == null && reason == null) : "Found unexpected non-null values for queued state shard nodeId[" + nodeId + "][" + generation + "][" + reason + "]"; assert state == ShardState.SUCCESS || shardSnapshotResult == null; @@ -584,10 +705,14 @@ public ShardSnapshotResult shardSnapshotResult() { /** * Checks if this shard snapshot is actively executing. * A shard is defined as actively executing if it either is in a state that may write to the repository - * ({@link ShardState#INIT} or {@link ShardState#ABORTED}) or about to write to it in state {@link ShardState#WAITING}. + * ({@link ShardState#INIT} or {@link ShardState#ABORTED}) or about to write to it in state {@link ShardState#WAITING} or + * {@link ShardState#PAUSED_FOR_NODE_REMOVAL}. */ public boolean isActive() { - return state == ShardState.INIT || state == ShardState.ABORTED || state == ShardState.WAITING; + return switch (state) { + case INIT, ABORTED, WAITING, PAUSED_FOR_NODE_REMOVAL -> true; + case SUCCESS, FAILED, MISSING, QUEUED -> false; + }; } @Override @@ -783,7 +908,7 @@ private static Entry readFrom(StreamInput in) throws IOException { final Map shards = in.readImmutableMap(ShardId::new, ShardSnapshotStatus::readFrom); final long repositoryStateId = in.readLong(); final String failure = in.readOptionalString(); - final Map userMetadata = in.readMap(); + final Map userMetadata = in.readGenericMap(); final IndexVersion version = IndexVersion.readVersion(in); final List dataStreams = in.readStringCollectionAsImmutableList(); final SnapshotId source = in.readOptionalWriteable(SnapshotId::new); @@ -824,6 +949,9 @@ private static boolean assertShardsConsistent( if ((state == State.INIT || state == State.ABORTED) && shards.isEmpty()) { return true; } + if (hasInitStateShards) { + assert state == State.STARTED : "shouldn't have INIT-state shards in state " + state; + } final Set indexNames = indices.keySet(); final Set indexNamesInShards = new HashSet<>(); shards.entrySet().forEach(s -> { @@ -1535,9 +1663,11 @@ private static final class SnapshotInProgressDiff implements NamedDiff { private final SnapshotsInProgress after; private final DiffableUtils.MapDiff> mapDiff; + private final Set nodeIdsForRemoval; SnapshotInProgressDiff(SnapshotsInProgress before, SnapshotsInProgress after) { this.mapDiff = DiffableUtils.diff(before.entries, after.entries, DiffableUtils.getStringKeySerializer()); + this.nodeIdsForRemoval = after.nodesIdsForRemoval; this.after = after; } @@ -1551,12 +1681,14 @@ private static final class SnapshotInProgressDiff implements NamedDiff { DiffableUtils.readJdkMapDiff(i, DiffableUtils.getStringKeySerializer(), ByRepo.INT_DIFF_VALUE_SERIALIZER) ) ); + this.nodeIdsForRemoval = readNodeIdsForRemoval(in); this.after = null; } @Override public SnapshotsInProgress apply(Custom part) { - return new SnapshotsInProgress(mapDiff.apply(((SnapshotsInProgress) part).entries)); + final var snapshotsInProgress = (SnapshotsInProgress) part; + return new SnapshotsInProgress(mapDiff.apply(snapshotsInProgress.entries), this.nodeIdsForRemoval); } @Override @@ -1577,6 +1709,11 @@ public void writeTo(StreamOutput out) throws IOException { } else { new SimpleDiffable.CompleteDiff<>(after).writeTo(out); } + if (out.getTransportVersion().onOrAfter(SNAPSHOTS_IN_PROGRESS_TRACKING_REMOVING_NODES_ADDED)) { + out.writeStringCollection(nodeIdsForRemoval); + } else { + assert nodeIdsForRemoval.isEmpty() : nodeIdsForRemoval; + } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java b/server/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java index 452aae4a1c467..bcc0425211a9a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java @@ -9,8 +9,8 @@ package org.elasticsearch.cluster.action.index; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.mapping.put.AutoPutMappingAction; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.action.admin.indices.mapping.put.TransportAutoPutMappingAction; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.IndicesAdminClient; @@ -110,6 +110,10 @@ protected void sendUpdateMapping(Index index, Mapping mappingUpdate, ActionListe putMappingRequest.source(mappingUpdate.toString(), XContentType.JSON); putMappingRequest.masterNodeTimeout(dynamicMappingUpdateTimeout); putMappingRequest.timeout(TimeValue.ZERO); - client.execute(AutoPutMappingAction.INSTANCE, putMappingRequest, listener.delegateFailureAndWrap((l, r) -> l.onResponse(null))); + client.execute( + TransportAutoPutMappingAction.TYPE, + putMappingRequest, + listener.delegateFailureAndWrap((l, r) -> l.onResponse(null)) + ); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/LagDetector.java b/server/src/main/java/org/elasticsearch/cluster/coordination/LagDetector.java index 7a9c7c84d0f00..4f6b938b3745e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/LagDetector.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/LagDetector.java @@ -237,12 +237,14 @@ public void onResponse(NodesHotThreadsResponse nodesHotThreadsResponse) { return; } + nodesHotThreadsResponse.mustIncRef(); loggingTaskRunner.enqueueTask( new HotThreadsLoggingTask( discoveryNode, appliedVersion, expectedVersion, - nodesHotThreadsResponse.getNodes().get(0).getHotThreads() + nodesHotThreadsResponse.getNodes().get(0).getHotThreads(), + Releasables.assertOnce(nodesHotThreadsResponse::decRef) ) ); } @@ -298,10 +300,18 @@ public void onFailure(Exception e) { static class HotThreadsLoggingTask extends AbstractRunnable implements Comparable { private final String nodeHotThreads; + private final Releasable releasable; private final String prefix; - HotThreadsLoggingTask(DiscoveryNode discoveryNode, long appliedVersion, long expectedVersion, String nodeHotThreads) { + HotThreadsLoggingTask( + DiscoveryNode discoveryNode, + long appliedVersion, + long expectedVersion, + String nodeHotThreads, + Releasable releasable + ) { this.nodeHotThreads = nodeHotThreads; + this.releasable = releasable; this.prefix = Strings.format( "hot threads from node [%s] lagging at version [%d] despite commit of cluster state version [%d]", discoveryNode.descriptionWithoutAttributes(), @@ -327,6 +337,11 @@ protected void doRun() throws Exception { } } + @Override + public void onAfter() { + Releasables.closeExpectNoException(releasable); + } + @Override public int compareTo(HotThreadsLoggingTask o) { return 0; diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java b/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java index 781a05d535b16..5c5c5eee17da3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.compress.Compressor; import org.elasticsearch.common.compress.CompressorFactory; import org.elasticsearch.common.io.Streams; -import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.PositionTrackingOutputStreamStreamOutput; @@ -128,7 +127,7 @@ private PublishWithJoinResponse handleIncomingPublishRequest(BytesTransportReque StreamInput in = request.bytes().streamInput(); try { if (compressor != null) { - in = new InputStreamStreamInput(compressor.threadLocalInputStream(in)); + in = compressor.threadLocalStreamInput(in); } in = new NamedWriteableAwareStreamInput(in, namedWriteableRegistry); in.setTransportVersion(request.version()); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ValidateJoinRequest.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ValidateJoinRequest.java index c6463949f774f..20c1139884b24 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ValidateJoinRequest.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ValidateJoinRequest.java @@ -13,7 +13,6 @@ import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressorFactory; -import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; @@ -52,7 +51,7 @@ private static ClusterState readCompressed( try ( var bytesStreamInput = bytes.streamInput(); var in = new NamedWriteableAwareStreamInput( - new InputStreamStreamInput(CompressorFactory.COMPRESSOR.threadLocalInputStream(bytesStreamInput)), + CompressorFactory.COMPRESSOR.threadLocalStreamInput(bytesStreamInput), namedWriteableRegistry ) ) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java index cc0ff0b26f4d7..6147712a5d70a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java @@ -24,7 +24,6 @@ import org.elasticsearch.xcontent.XContentParserConfiguration; import java.io.IOException; -import java.io.InputStream; import java.util.Optional; import java.util.function.Function; @@ -142,14 +141,11 @@ public static void validateAliasFilter( assert searchExecutionContext != null; try ( - InputStream inputStream = filter.streamInput(); - XContentParser parser = XContentFactory.xContentType(inputStream) - .xContent() - .createParser( - XContentParserConfiguration.EMPTY.withRegistry(xContentRegistry) - .withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), - filter.streamInput() - ) + XContentParser parser = XContentHelper.createParserNotCompressed( + XContentParserConfiguration.EMPTY.withRegistry(xContentRegistry).withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), + filter, + XContentHelper.xContentType(filter) + ) ) { validateAliasFilter(parser, searchExecutionContext); } catch (Exception e) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ComponentTemplate.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ComponentTemplate.java index dac5005e0e043..8b5548c9ea8ef 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ComponentTemplate.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ComponentTemplate.java @@ -88,7 +88,7 @@ public ComponentTemplate(StreamInput in) throws IOException { this.template = new Template(in); this.version = in.readOptionalVLong(); if (in.readBoolean()) { - this.metadata = in.readMap(); + this.metadata = in.readGenericMap(); } else { this.metadata = null; } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java index a0dd7bc3e9eef..2cc09beffa28a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java @@ -145,7 +145,7 @@ public ComposableIndexTemplate(StreamInput in) throws IOException { this.componentTemplates = in.readOptionalStringCollectionAsList(); this.priority = in.readOptionalVLong(); this.version = in.readOptionalVLong(); - this.metadata = in.readMap(); + this.metadata = in.readGenericMap(); this.dataStreamTemplate = in.readOptionalWriteable(DataStreamTemplate::new); this.allowAutoCreate = in.readOptionalBoolean(); if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java index 34d8515d2dfdd..84db5887b5926 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.util.FeatureFlag; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; @@ -38,7 +39,6 @@ import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; -import org.elasticsearch.xcontent.XContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; @@ -110,6 +110,7 @@ public static boolean isFailureStoreEnabled() { private final IndexMode indexMode; @Nullable private final DataStreamLifecycle lifecycle; + private final boolean rolloverOnWrite; private final boolean failureStore; private final List failureIndices; @@ -140,7 +141,41 @@ public DataStream( indexMode, lifecycle, failureStore, - failureIndices + failureIndices, + false + ); + } + + public DataStream( + String name, + List indices, + long generation, + Map metadata, + boolean hidden, + boolean replicated, + boolean system, + boolean allowCustomRouting, + IndexMode indexMode, + DataStreamLifecycle lifecycle, + boolean failureStore, + List failureIndices, + boolean rolloverOnWrite + ) { + this( + name, + indices, + generation, + metadata, + hidden, + replicated, + system, + System::currentTimeMillis, + allowCustomRouting, + indexMode, + lifecycle, + failureStore, + failureIndices, + rolloverOnWrite ); } @@ -158,7 +193,8 @@ public DataStream( IndexMode indexMode, DataStreamLifecycle lifecycle, boolean failureStore, - List failureIndices + List failureIndices, + boolean rolloverOnWrite ) { this.name = name; this.indices = List.copyOf(indices); @@ -176,6 +212,7 @@ public DataStream( this.failureStore = failureStore; this.failureIndices = failureIndices; assert assertConsistent(this.indices); + this.rolloverOnWrite = rolloverOnWrite; } // mainly available for testing @@ -236,6 +273,10 @@ public Index getWriteIndex() { return indices.get(indices.size() - 1); } + public boolean rolloverOnWrite() { + return rolloverOnWrite; + } + /** * @param timestamp The timestamp used to select a backing index based on its start and end time. * @param metadata The metadata that is used to fetch the start and end times for backing indices of this data stream. @@ -616,7 +657,8 @@ public DataStream promoteDataStream() { indexMode, lifecycle, failureStore, - failureIndices + failureIndices, + rolloverOnWrite ); } @@ -859,7 +901,7 @@ public DataStream(StreamInput in) throws IOException { readName(in), readIndices(in), in.readVLong(), - in.readMap(), + in.readGenericMap(), in.readBoolean(), in.readBoolean(), in.readBoolean(), @@ -867,7 +909,8 @@ public DataStream(StreamInput in) throws IOException { in.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0) ? in.readOptionalEnum(IndexMode.class) : null, in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020) ? in.readOptionalWriteable(DataStreamLifecycle::new) : null, in.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION) ? in.readBoolean() : false, - in.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION) ? readIndices(in) : List.of() + in.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION) ? readIndices(in) : List.of(), + in.getTransportVersion().onOrAfter(TransportVersions.LAZY_ROLLOVER_ADDED) ? in.readBoolean() : false ); } @@ -908,6 +951,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(failureStore); out.writeCollection(failureIndices); } + if (out.getTransportVersion().onOrAfter(TransportVersions.LAZY_ROLLOVER_ADDED)) { + out.writeBoolean(rolloverOnWrite); + } } public static final ParseField NAME_FIELD = new ParseField("name"); @@ -923,11 +969,15 @@ public void writeTo(StreamOutput out) throws IOException { public static final ParseField LIFECYCLE = new ParseField("lifecycle"); public static final ParseField FAILURE_STORE_FIELD = new ParseField("failure_store"); public static final ParseField FAILURE_INDICES_FIELD = new ParseField("failure_indices"); + public static final ParseField ROLLOVER_ON_WRITE_FIELD = new ParseField("rollover_on_write"); @SuppressWarnings("unchecked") - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "data_stream", - args -> new DataStream( + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("data_stream", args -> { + // Fields behind a feature flag need to be parsed last otherwise the parser will fail when the feature flag is disabled. + // Until the feature flag is removed we keep them separately to be mindful of this. + boolean failureStoreEnabled = DataStream.isFailureStoreEnabled() && args[11] != null && (boolean) args[11]; + List failureStoreIndices = DataStream.isFailureStoreEnabled() && args[12] != null ? (List) args[12] : List.of(); + return new DataStream( (String) args[0], (List) args[1], (Long) args[2], @@ -938,10 +988,11 @@ public void writeTo(StreamOutput out) throws IOException { args[7] != null && (boolean) args[7], args[8] != null ? IndexMode.fromString((String) args[8]) : null, (DataStreamLifecycle) args[9], - DataStream.isFailureStoreEnabled() && args[10] != null && (boolean) args[10], - DataStream.isFailureStoreEnabled() && args[11] != null ? (List) args[11] : List.of() - ) - ); + failureStoreEnabled, + failureStoreIndices, + args[10] != null && (boolean) args[10] + ); + }); static { PARSER.declareString(ConstructingObjectParser.constructorArg(), NAME_FIELD); @@ -962,6 +1013,8 @@ public void writeTo(StreamOutput out) throws IOException { PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), ALLOW_CUSTOM_ROUTING); PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), INDEX_MODE); PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> DataStreamLifecycle.fromXContent(p), LIFECYCLE); + PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), ROLLOVER_ON_WRITE_FIELD); + // The fields behind the feature flag should always be last. if (DataStream.isFailureStoreEnabled()) { PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), FAILURE_STORE_FIELD); PARSER.declareObjectArray( @@ -1014,6 +1067,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params, @Nulla builder.field(LIFECYCLE.getPreferredName()); lifecycle.toXContent(builder, params, rolloverConfiguration); } + builder.field(ROLLOVER_ON_WRITE_FIELD.getPreferredName(), rolloverOnWrite); builder.endObject(); return builder; } @@ -1034,7 +1088,8 @@ public boolean equals(Object o) { && indexMode == that.indexMode && Objects.equals(lifecycle, that.lifecycle) && failureStore == that.failureStore - && failureIndices.equals(that.failureIndices); + && failureIndices.equals(that.failureIndices) + && rolloverOnWrite == that.rolloverOnWrite; } @Override @@ -1051,7 +1106,8 @@ public int hashCode() { indexMode, lifecycle, failureStore, - failureIndices + failureIndices, + rolloverOnWrite ); } @@ -1129,8 +1185,7 @@ private static Instant getTimeStampFromRaw(Object rawTimestamp) { } private static Instant getTimestampFromParser(BytesReference source, XContentType xContentType) { - XContent xContent = xContentType.xContent(); - try (XContentParser parser = xContent.createParser(TS_EXTRACT_CONFIG, source.streamInput())) { + try (XContentParser parser = XContentHelper.createParserNotCompressed(TS_EXTRACT_CONFIG, source, xContentType)) { ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.nextToken(), parser); return switch (parser.nextToken()) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java index 8d7ce0525e943..83a5d99c8f348 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java @@ -48,7 +48,7 @@ public class DataStreamLifecycle implements SimpleDiffable, ToXContentObject { // Versions over the wire - public static final TransportVersion ADDED_ENABLED_FLAG_VERSION = TransportVersions.V_8_500_057; + public static final TransportVersion ADDED_ENABLED_FLAG_VERSION = TransportVersions.V_8_500_061; public static final String DATA_STREAMS_LIFECYCLE_ONLY_SETTING_NAME = "data_streams.lifecycle_only.mode"; @@ -190,10 +190,8 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { out.writeOptionalWriteable(dataRetention); } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_040)) { - out.writeOptionalWriteable(downsampling); - } if (out.getTransportVersion().onOrAfter(ADDED_ENABLED_FLAG_VERSION)) { + out.writeOptionalWriteable(downsampling); out.writeBoolean(enabled); } } @@ -204,14 +202,11 @@ public DataStreamLifecycle(StreamInput in) throws IOException { } else { dataRetention = null; } - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_040)) { - downsampling = in.readOptionalWriteable(Downsampling::read); - } else { - downsampling = null; - } if (in.getTransportVersion().onOrAfter(ADDED_ENABLED_FLAG_VERSION)) { + downsampling = in.readOptionalWriteable(Downsampling::read); enabled = in.readBoolean(); } else { + downsampling = null; enabled = true; } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DiffableStringMap.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DiffableStringMap.java index 7c74014a1da2e..51a66ab094591 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DiffableStringMap.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DiffableStringMap.java @@ -34,7 +34,7 @@ public class DiffableStringMap extends AbstractMap implements Di @SuppressWarnings("unchecked") public static DiffableStringMap readFrom(StreamInput in) throws IOException { - final Map map = (Map) in.readMap(); + final Map map = (Map) in.readGenericMap(); return map.isEmpty() ? EMPTY : new DiffableStringMap(map); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java index 2ebcad22185fd..6b81aa230f0d9 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java @@ -41,12 +41,13 @@ public class MetadataDataStreamsService { private final ClusterService clusterService; private final IndicesService indicesService; - private final MasterServiceTaskQueue taskQueue; + private final MasterServiceTaskQueue updateLifecycleTaskQueue; + private final MasterServiceTaskQueue setRolloverOnWriteTaskQueue; public MetadataDataStreamsService(ClusterService clusterService, IndicesService indicesService) { this.clusterService = clusterService; this.indicesService = indicesService; - ClusterStateTaskExecutor executor = new SimpleBatchedAckListenerTaskExecutor<>() { + ClusterStateTaskExecutor updateLifecycleExecutor = new SimpleBatchedAckListenerTaskExecutor<>() { @Override public Tuple executeTask( @@ -61,7 +62,25 @@ public Tuple executeTask( }; // We chose priority high because changing the lifecycle is changing the retention of a backing index, so processing it quickly // can either free space when the retention is shortened, or prevent an index to be deleted when the retention is extended. - this.taskQueue = clusterService.createTaskQueue("modify-lifecycle", Priority.HIGH, executor); + this.updateLifecycleTaskQueue = clusterService.createTaskQueue("modify-lifecycle", Priority.HIGH, updateLifecycleExecutor); + ClusterStateTaskExecutor rolloverOnWriteExecutor = new SimpleBatchedAckListenerTaskExecutor<>() { + + @Override + public Tuple executeTask( + SetRolloverOnWriteTask setRolloverOnWriteTask, + ClusterState clusterState + ) { + return new Tuple<>( + setRolloverOnWrite(clusterState, setRolloverOnWriteTask.getDataStreamName(), setRolloverOnWriteTask.rolloverOnWrite()), + setRolloverOnWriteTask + ); + } + }; + this.setRolloverOnWriteTaskQueue = clusterService.createTaskQueue( + "data-stream-rollover-on-write", + Priority.NORMAL, + rolloverOnWriteExecutor + ); } public void modifyDataStream(final ModifyDataStreamsAction.Request request, final ActionListener listener) { @@ -93,7 +112,11 @@ public void setLifecycle( TimeValue masterTimeout, final ActionListener listener ) { - taskQueue.submitTask("set-lifecycle", new UpdateLifecycleTask(dataStreamNames, lifecycle, ackTimeout, listener), masterTimeout); + updateLifecycleTaskQueue.submitTask( + "set-lifecycle", + new UpdateLifecycleTask(dataStreamNames, lifecycle, ackTimeout, listener), + masterTimeout + ); } /** @@ -105,7 +128,11 @@ public void removeLifecycle( TimeValue masterTimeout, ActionListener listener ) { - taskQueue.submitTask("delete-lifecycle", new UpdateLifecycleTask(dataStreamNames, null, ackTimeout, listener), masterTimeout); + updateLifecycleTaskQueue.submitTask( + "delete-lifecycle", + new UpdateLifecycleTask(dataStreamNames, null, ackTimeout, listener), + masterTimeout + ); } @SuppressForbidden(reason = "legacy usage of unbatched task") // TODO add support for batching here @@ -113,6 +140,23 @@ private void submitUnbatchedTask(@SuppressWarnings("SameParameterValue") String clusterService.submitUnbatchedStateUpdateTask(source, task); } + /** + * Submits the task to signal that the next time this data stream receives a document, it will be rolled over. + */ + public void setRolloverOnWrite( + String dataStreamName, + boolean rolloverOnWrite, + TimeValue ackTimeout, + TimeValue masterTimeout, + ActionListener listener + ) { + setRolloverOnWriteTaskQueue.submitTask( + "set-rollover-on-write", + new SetRolloverOnWriteTask(dataStreamName, rolloverOnWrite, ackTimeout, listener), + masterTimeout + ); + } + /** * Computes the resulting cluster state after applying all requested data stream modifications in order. * @@ -175,6 +219,42 @@ static ClusterState updateDataLifecycle( return ClusterState.builder(currentState).metadata(builder.build()).build(); } + /** + * Creates an updated cluster state in which the requested data stream has the flag {@link DataStream#rolloverOnWrite()} + * set to the value of the parameter rolloverOnWrite + * + * @param currentState the initial cluster state + * @param dataStreamName the name of the data stream to be updated + * @param rolloverOnWrite the value of the flag + * @return the updated cluster state + */ + public static ClusterState setRolloverOnWrite(ClusterState currentState, String dataStreamName, boolean rolloverOnWrite) { + Metadata metadata = currentState.metadata(); + var dataStream = validateDataStream(metadata, dataStreamName); + if (dataStream.rolloverOnWrite() == rolloverOnWrite) { + return currentState; + } + Metadata.Builder builder = Metadata.builder(metadata); + builder.put( + new DataStream( + dataStream.getName(), + dataStream.getIndices(), + dataStream.getGeneration(), + dataStream.getMetadata(), + dataStream.isHidden(), + dataStream.isReplicated(), + dataStream.isSystem(), + dataStream.isAllowCustomRouting(), + dataStream.getIndexMode(), + dataStream.getLifecycle(), + dataStream.isFailureStore(), + dataStream.getFailureIndices(), + rolloverOnWrite + ) + ); + return ClusterState.builder(currentState).metadata(builder.build()).build(); + } + private static void addBackingIndex( Metadata metadata, Metadata.Builder builder, @@ -270,4 +350,32 @@ public DataStreamLifecycle getDataLifecycle() { return lifecycle; } } + + /** + * A cluster state update task that consists of the cluster state request and the listeners that need to be notified upon completion. + */ + static class SetRolloverOnWriteTask extends AckedBatchedClusterStateUpdateTask { + + private final String dataStreamName; + private final boolean rolloverOnWrite; + + SetRolloverOnWriteTask( + String dataStreamName, + boolean rolloverOnWrite, + TimeValue ackTimeout, + ActionListener listener + ) { + super(ackTimeout, listener); + this.dataStreamName = dataStreamName; + this.rolloverOnWrite = rolloverOnWrite; + } + + public String getDataStreamName() { + return dataStreamName; + } + + public boolean rolloverOnWrite() { + return rolloverOnWrite; + } + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java index 43f117acbd9fe..e77a7b27e1a2c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java +++ b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java @@ -337,7 +337,7 @@ public DiscoveryNode(StreamInput in) throws IOException { } } this.roles = Collections.unmodifiableSortedSet(roles); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_040)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { versionInfo = new VersionInformation(Version.readVersion(in), IndexVersion.readVersion(in), IndexVersion.readVersion(in)); } else { versionInfo = inferVersionInformation(Version.readVersion(in)); @@ -374,7 +374,7 @@ public void writeTo(StreamOutput out) throws IOException { o.writeString(role.roleNameAbbreviation()); o.writeBoolean(role.canContainData()); }); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_040)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { Version.writeVersion(versionInfo.nodeVersion(), out); IndexVersion.writeVersion(versionInfo.minIndexVersion(), out); IndexVersion.writeVersion(versionInfo.maxIndexVersion(), out); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/ExpectedShardSizeEstimator.java b/server/src/main/java/org/elasticsearch/cluster/routing/ExpectedShardSizeEstimator.java index 1f364e1ace6e4..46a45a058f123 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/ExpectedShardSizeEstimator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/ExpectedShardSizeEstimator.java @@ -48,6 +48,8 @@ public static boolean shouldReserveSpaceForInitializingShard(ShardRouting shard, case PEER -> true; // Snapshot restore (unless it is partial) require downloading all segments locally from the blobstore to start the shard. + // See org.elasticsearch.xpack.searchablesnapshots.action.TransportMountSearchableSnapshotAction.buildIndexSettings + // and DiskThresholdDecider.SETTING_IGNORE_DISK_WATERMARKS case SNAPSHOT -> metadata.getIndexSafe(shard.index()).isPartialSearchableSnapshot() == false; // shrink/split/clone operation is going to clone existing locally placed shards using file system hard links diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java index cd05ca3d523d8..0d40bd2d08c14 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java @@ -19,6 +19,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.util.ByteUtils; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.transport.Transports; import org.elasticsearch.xcontent.XContentParser; @@ -281,16 +282,14 @@ public Builder builder() { private Builder hashSource(XContentType sourceType, BytesReference source) { Builder b = builder(); - try { - try (XContentParser parser = sourceType.xContent().createParser(parserConfig, source.streamInput())) { - parser.nextToken(); // Move to first token - if (parser.currentToken() == null) { - throw new IllegalArgumentException("Error extracting routing: source didn't contain any routing fields"); - } - parser.nextToken(); - b.extractObject(null, parser); - ensureExpectedToken(null, parser.nextToken(), parser); + try (XContentParser parser = XContentHelper.createParserNotCompressed(parserConfig, source, sourceType)) { + parser.nextToken(); // Move to first token + if (parser.currentToken() == null) { + throw new IllegalArgumentException("Error extracting routing: source didn't contain any routing fields"); } + parser.nextToken(); + b.extractObject(null, parser); + ensureExpectedToken(null, parser.nextToken(), parser); } catch (IOException | ParsingException e) { throw new IllegalArgumentException("Error extracting routing: " + e.getMessage(), e); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java index 986a6bd0385e8..723d65fbc2a3f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java @@ -148,33 +148,6 @@ public IndexShardRoutingTable shardRoutingTable(ShardId shardId) { return shard; } - /** - * Try to deduplicate the given shard routing with an equal instance found in this routing table. This is used by the logic of the - * {@link org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider} and - * {@link org.elasticsearch.cluster.InternalClusterInfoService} to deduplicate instances created by a master node and those read from - * the network to speed up the use of {@link ShardRouting} as a map key in {@link org.elasticsearch.cluster.ClusterInfo#getDataPath}. - * - * @param shardRouting shard routing to deduplicate - * @return deduplicated shard routing from this routing table if an equivalent shard routing was found or the given instance otherwise - */ - public ShardRouting deduplicate(ShardRouting shardRouting) { - final IndexRoutingTable indexShardRoutingTable = indicesRouting.get(shardRouting.index().getName()); - if (indexShardRoutingTable == null) { - return shardRouting; - } - final IndexShardRoutingTable shardRoutingTable = indexShardRoutingTable.shard(shardRouting.id()); - if (shardRoutingTable == null) { - return shardRouting; - } - for (int i = 0; i < shardRoutingTable.size(); i++) { - ShardRouting found = shardRoutingTable.shard(i); - if (shardRouting.equals(found)) { - return found; - } - } - return shardRouting; - } - @Nullable public ShardRouting getByAllocationId(ShardId shardId, String allocationId) { final IndexRoutingTable indexRoutingTable = index(shardId.getIndex()); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java b/server/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java index 59b344080c054..bde667df3821a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java @@ -15,7 +15,6 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.NodesShutdownMetadata; import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata; -import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -422,7 +421,7 @@ public AllocationStatus getLastAllocationStatus() { * This set will be discarded when a shard moves to started. And if a shard is failed while started (i.e., from started to unassigned), * the currently assigned node won't be added to this set. * - * @see org.elasticsearch.gateway.ReplicaShardAllocator#processExistingRecoveries(RoutingAllocation) + * @see org.elasticsearch.gateway.ReplicaShardAllocator#processExistingRecoveries * @see org.elasticsearch.cluster.routing.allocation.AllocationService#applyFailedShards(ClusterState, List, List) */ public Set getFailedNodeIds() { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index 3ad5e7fa43fe1..20e7429fcfaa3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -52,6 +52,7 @@ import java.util.Map; import java.util.Set; import java.util.function.Function; +import java.util.function.Predicate; import java.util.function.Supplier; import java.util.stream.Collectors; @@ -543,7 +544,10 @@ private void allocateExistingUnassignedShards(RoutingAllocation allocation) { } for (final ExistingShardsAllocator existingShardsAllocator : existingShardsAllocators.values()) { - existingShardsAllocator.afterPrimariesBeforeReplicas(allocation); + existingShardsAllocator.afterPrimariesBeforeReplicas( + allocation, + shardRouting -> getAllocatorForShard(shardRouting, allocation) == existingShardsAllocator + ); } final RoutingNodes.UnassignedShards.UnassignedIterator replicaIterator = allocation.routingNodes().unassigned().iterator(); @@ -695,7 +699,7 @@ private NotFoundAllocator(String allocatorName) { public void beforeAllocation(RoutingAllocation allocation) {} @Override - public void afterPrimariesBeforeReplicas(RoutingAllocation allocation) {} + public void afterPrimariesBeforeReplicas(RoutingAllocation allocation, Predicate isRelevantShardPredicate) {} @Override public void allocateUnassigned( diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java index 6645fd7d0e895..bb0ca372e6a4c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java @@ -232,7 +232,7 @@ public void onNewInfo(ClusterInfo info) { } } - final long reservedSpace = info.getReservedSpace(usage.getNodeId(), usage.getPath()).getTotal(); + final long reservedSpace = info.getReservedSpace(usage.getNodeId(), usage.getPath()).total(); final DiskUsage usageWithReservedSpace = new DiskUsage( usage.getNodeId(), usage.getNodeName(), diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/ExistingShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/ExistingShardsAllocator.java index b09400a787164..31b5e5a7cad41 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/ExistingShardsAllocator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/ExistingShardsAllocator.java @@ -17,6 +17,7 @@ import org.elasticsearch.gateway.GatewayAllocator; import java.util.List; +import java.util.function.Predicate; /** * Searches for, and allocates, shards for which there is an existing on-disk copy somewhere in the cluster. The default implementation is @@ -43,7 +44,15 @@ public interface ExistingShardsAllocator { * Called during a round of allocation after attempting to allocate all the primaries but before any replicas, allowing the allocator * to prepare for replica allocation. */ - void afterPrimariesBeforeReplicas(RoutingAllocation allocation); + @Deprecated(forRemoval = true) + default void afterPrimariesBeforeReplicas(@SuppressWarnings("unused") RoutingAllocation allocation) { + assert false : "must be overridden"; + throw new UnsupportedOperationException(); + } + + default void afterPrimariesBeforeReplicas(RoutingAllocation allocation, Predicate isRelevantShardPredicate) { + afterPrimariesBeforeReplicas(allocation); + } /** * Allocate any unassigned shards in the given {@link RoutingAllocation} for which this {@link ExistingShardsAllocator} is responsible. diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java index ed6b2af2fb55d..4e674648bc3a4 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java @@ -109,7 +109,7 @@ public RoutingAllocation( AllocationDeciders deciders, @Nullable RoutingNodes routingNodes, ClusterState clusterState, - @Nullable ClusterInfo clusterInfo, + ClusterInfo clusterInfo, SnapshotShardSizeInfo shardSizeInfo, long currentNanoTime ) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 64f88ac1e2417..56d0966e0594f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -371,6 +371,11 @@ private static long getTotalDiskUsageInBytes(ClusterInfo clusterInfo, Metadata m // Visible for testing static long getIndexDiskUsageInBytes(ClusterInfo clusterInfo, IndexMetadata indexMetadata) { + if (indexMetadata.ignoreDiskWatermarks()) { + // disk watermarks are ignored for partial searchable snapshots + // and is equivalent to indexMetadata.isPartialSearchableSnapshot() + return 0; + } final long forecastedShardSize = indexMetadata.getForecastedShardSizeInBytes().orElse(-1L); long totalSizeInBytes = 0; int shardCount = 0; @@ -394,6 +399,11 @@ static long getIndexDiskUsageInBytes(ClusterInfo clusterInfo, IndexMetadata inde } private static long getShardDiskUsageInBytes(ShardRouting shardRouting, IndexMetadata indexMetadata, ClusterInfo clusterInfo) { + if (indexMetadata.ignoreDiskWatermarks()) { + // disk watermarks are ignored for partial searchable snapshots + // and is equivalent to indexMetadata.isPartialSearchableSnapshot() + return 0; + } return Math.max(indexMetadata.getForecastedShardSizeInBytes().orElse(0L), clusterInfo.getShardSize(shardRouting, 0L)); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java index 60a6ec2e49899..effd5ec110c44 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java @@ -84,7 +84,7 @@ public DesiredBalance compute( final var knownNodeIds = routingNodes.getAllNodeIds(); final var changes = routingAllocation.changes(); final var ignoredShards = getIgnoredShardsWithDiscardedAllocationStatus(desiredBalanceInput.ignoredShards()); - final var clusterInfoSimulator = new ClusterInfoSimulator(routingAllocation.clusterInfo()); + final var clusterInfoSimulator = new ClusterInfoSimulator(routingAllocation); if (routingNodes.size() == 0) { return new DesiredBalance(desiredBalanceInput.index(), Map.of()); @@ -274,7 +274,7 @@ public DesiredBalance compute( routingAllocation.setSimulatedClusterInfo(clusterInfoSimulator.getClusterInfo()); logger.trace("running delegate allocator"); delegateAllocator.allocate(routingAllocation); - assert routingNodes.unassigned().size() == 0; // any unassigned shards should now be ignored + assert routingNodes.unassigned().isEmpty(); // any unassigned shards should now be ignored hasChanges = false; for (final var routingNode : routingNodes) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java index dc3cbfa8b5ae8..95b0d23b564a2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java @@ -98,24 +98,24 @@ public DesiredBalanceReconciler(ClusterSettings clusterSettings, ThreadPool thre unassignedShards = LongGaugeMetric.create( meterRegistry, - "es.allocator.desired_balance.shards.unassigned", - "Unassigned shards count", + "es.allocator.desired_balance.shards.unassigned.current", + "Current number of unassigned shards", "{shard}" ); totalAllocations = LongGaugeMetric.create( meterRegistry, - "es.allocator.desired_balance.shards.count", - "Total shards count", + "es.allocator.desired_balance.shards.current", + "Total number of shards", "{shard}" ); undesiredAllocations = LongGaugeMetric.create( meterRegistry, - "es.allocator.desired_balance.allocations.undesired", - "Count of shards allocated on undesired nodes", + "es.allocator.desired_balance.allocations.undesired.current", + "Total number of shards allocated on undesired nodes", "{shard}" ); undesiredAllocationsRatio = meterRegistry.registerDoubleGauge( - "es.allocator.desired_balance.allocations.undesired_ratio", + "es.allocator.desired_balance.allocations.undesired.ratio", "Ratio of undesired allocations to shard count", "1", () -> { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index 2fa1994f9f74b..22bed76fa2b2e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -125,7 +125,7 @@ public static long sizeOfUnaccountedShards( ) { // Account for reserved space wherever it is available final ClusterInfo.ReservedSpace reservedSpace = clusterInfo.getReservedSpace(node.nodeId(), dataPath); - long totalSize = reservedSpace.getTotal(); + long totalSize = reservedSpace.total(); // NB this counts all shards on the node when the ClusterInfoService retrieved the node stats, which may include shards that are // no longer initializing because their recovery failed or was cancelled. diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java index b562ba8e9482d..ac88374e74b34 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java @@ -9,6 +9,7 @@ package org.elasticsearch.cluster.routing.allocation.decider; import org.elasticsearch.cluster.SnapshotsInProgress; +import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; @@ -101,6 +102,18 @@ private static Decision canMove(ShardRouting shardRouting, RoutingAllocation all continue; } + if (shardSnapshotStatus.state() == SnapshotsInProgress.ShardState.PAUSED_FOR_NODE_REMOVAL) { + // this shard snapshot is paused pending the removal of its assigned node + final var nodeShutdown = allocation.metadata().nodeShutdowns().get(shardRouting.currentNodeId()); + if (nodeShutdown != null && nodeShutdown.getType() != SingleNodeShutdownMetadata.Type.RESTART) { + // NB we check metadata().nodeShutdowns() too because if the node was marked for removal and then that mark was + // removed then the shard can still be PAUSED_FOR_NODE_REMOVAL while there are other shards on the node which + // haven't finished pausing yet. In that case the shard is about to go back into INIT state again, so we should keep + // it where it is. + continue; + } + } + return allocation.decision( Decision.THROTTLE, NAME, diff --git a/server/src/main/java/org/elasticsearch/common/breaker/ChildMemoryCircuitBreaker.java b/server/src/main/java/org/elasticsearch/common/breaker/ChildMemoryCircuitBreaker.java index a12071f9c27e3..20ab42eba0386 100644 --- a/server/src/main/java/org/elasticsearch/common/breaker/ChildMemoryCircuitBreaker.java +++ b/server/src/main/java/org/elasticsearch/common/breaker/ChildMemoryCircuitBreaker.java @@ -14,6 +14,7 @@ import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.telemetry.metric.LongCounter; +import java.util.Collections; import java.util.concurrent.atomic.AtomicLong; import static org.elasticsearch.core.Strings.format; @@ -32,6 +33,8 @@ public class ChildMemoryCircuitBreaker implements CircuitBreaker { private final String name; private final LongCounter trippedCountMeter; + public static final String CIRCUIT_BREAKER_TYPE_ATTRIBUTE = "type"; + /** * Create a circuit breaker that will break if the number of estimated * bytes grows above the limit. All estimations will be multiplied by @@ -68,7 +71,7 @@ public ChildMemoryCircuitBreaker( public void circuitBreak(String fieldName, long bytesNeeded) { final long memoryBytesLimit = this.limitAndOverhead.limit; this.trippedCount.incrementAndGet(); - this.trippedCountMeter.increment(); + this.trippedCountMeter.incrementBy(1L, Collections.singletonMap(CIRCUIT_BREAKER_TYPE_ATTRIBUTE, this.name)); final String message = "[" + this.name + "] Data too large, data for [" diff --git a/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java b/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java index 337e4cd28c2b3..905373f9400f6 100644 --- a/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java +++ b/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java @@ -26,9 +26,7 @@ */ public final class ReleasableBytesReference implements RefCounted, Releasable, BytesReference { - public static final Releasable NO_OP = () -> {}; - - private static final ReleasableBytesReference EMPTY = new ReleasableBytesReference(BytesArray.EMPTY, NO_OP); + private static final ReleasableBytesReference EMPTY = new ReleasableBytesReference(BytesArray.EMPTY, RefCounted.ALWAYS_REFERENCED); private final BytesReference delegate; private final RefCounted refCounted; @@ -50,7 +48,7 @@ public ReleasableBytesReference(BytesReference delegate, RefCounted refCounted) public static ReleasableBytesReference wrap(BytesReference reference) { assert reference instanceof ReleasableBytesReference == false : "use #retain() instead of #wrap() on a " + reference.getClass(); - return reference.length() == 0 ? empty() : new ReleasableBytesReference(reference, NO_OP); + return reference.length() == 0 ? empty() : new ReleasableBytesReference(reference, ALWAYS_REFERENCED); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/collect/ImmutableOpenMap.java b/server/src/main/java/org/elasticsearch/common/collect/ImmutableOpenMap.java index 9f852f01397da..33d8fbf99f31f 100644 --- a/server/src/main/java/org/elasticsearch/common/collect/ImmutableOpenMap.java +++ b/server/src/main/java/org/elasticsearch/common/collect/ImmutableOpenMap.java @@ -10,9 +10,9 @@ import com.carrotsearch.hppc.ObjectCollection; import com.carrotsearch.hppc.ObjectObjectHashMap; -import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.carrotsearch.hppc.procedures.ObjectObjectProcedure; +import com.carrotsearch.hppc.procedures.ObjectProcedure; import org.elasticsearch.common.util.Maps; @@ -84,18 +84,9 @@ public boolean containsKey(Object key) { } @Override + @SuppressWarnings("unchecked") public boolean containsValue(Object value) { - for (ObjectCursor cursor : map.values()) { - if (Objects.equals(cursor.value, value)) { - return true; - } - } - return false; - } - - @Override - public VType put(KType key, VType value) { - throw new UnsupportedOperationException("modification is not supported"); + return map.values().contains((VType) value); } @Override @@ -103,16 +94,6 @@ public VType remove(Object key) { throw new UnsupportedOperationException("modification is not supported"); } - @Override - public void putAll(Map m) { - throw new UnsupportedOperationException("modification is not supported"); - } - - @Override - public void clear() { - throw new UnsupportedOperationException("modification is not supported"); - } - @Override public int size() { return map.size(); @@ -146,35 +127,7 @@ public int hashCode() { return super.hashCode(); } - private static final class ConversionIterator implements Iterator> { - - private final Iterator> original; - - ConversionIterator(Iterator> original) { - this.original = original; - } - - @Override - public boolean hasNext() { - return original.hasNext(); - } - - @Override - public Map.Entry next() { - final ObjectObjectCursor obj = original.next(); - if (obj == null) { - return null; - } - return new Maps.ImmutableEntry<>(obj.key, obj.value); - } - - @Override - public void remove() { - throw new UnsupportedOperationException("removal is unsupported"); - } - } - - private static final class EntrySet extends AbstractSet> { + private static class EntrySet extends AbstractSet> { private final ObjectObjectHashMap map; private EntrySet(ObjectObjectHashMap map) { @@ -187,13 +140,23 @@ public int size() { } @Override - public void clear() { - throw new UnsupportedOperationException("removal is unsupported"); + public boolean isEmpty() { + return map.isEmpty(); } @Override public Iterator> iterator() { - return new ConversionIterator<>(map.iterator()); + return Iterators.map(map.iterator(), c -> new Maps.ImmutableEntry<>(c.key, c.value)); + } + + @Override + public Spliterator> spliterator() { + return Spliterators.spliterator(iterator(), size(), Spliterator.IMMUTABLE); + } + + @Override + public void forEach(Consumer> action) { + map.forEach((Consumer>) c -> action.accept(new Maps.ImmutableEntry<>(c.key, c.value))); } @SuppressWarnings("unchecked") @@ -204,70 +167,87 @@ public boolean contains(Object o) { } Map.Entry e = (Map.Entry) o; Object key = e.getKey(); - if (map.containsKey((KType) key) == false) { + Object v = map.get((KType) key); + if (v == null && map.containsKey((KType) key) == false) { return false; } - Object val = map.get((KType) key); - return Objects.equals(val, e.getValue()); + return Objects.equals(v, e.getValue()); } @Override - public boolean remove(Object o) { - throw new UnsupportedOperationException("removal is not supported"); + public String toString() { + return map.toString(); + } + } + + private static class MapObjectCollection extends AbstractCollection { + private final ObjectCollection collection; + + private MapObjectCollection(ObjectCollection collection) { + this.collection = collection; } @Override - public Spliterator> spliterator() { - return Spliterators.spliterator(iterator(), size(), Spliterator.SIZED); + public int size() { + return collection.size(); } @Override - public void forEach(Consumer> action) { - map.forEach((Consumer>) ooCursor -> { - Maps.ImmutableEntry entry = new Maps.ImmutableEntry<>(ooCursor.key, ooCursor.value); - action.accept(entry); - }); + public boolean isEmpty() { + return collection.isEmpty(); } - } - private static final class KeySet extends AbstractSet { + @Override + public Iterator iterator() { + return Iterators.map(collection.iterator(), c -> c.value); + } + + @Override + public Spliterator spliterator() { + return Spliterators.spliterator(iterator(), size(), Spliterator.IMMUTABLE); + } - private final ObjectObjectHashMap.KeysContainer keys; + @Override + public void forEach(Consumer action) { + collection.forEach((ObjectProcedure) action::accept); + } - private KeySet(ObjectObjectHashMap.KeysContainer keys) { - this.keys = keys; + @Override + @SuppressWarnings("unchecked") + public boolean contains(Object o) { + return collection.contains((Type) o); } @Override - public Iterator iterator() { - final Iterator> iterator = keys.iterator(); - return new Iterator<>() { - @Override - public boolean hasNext() { - return iterator.hasNext(); - } - - @Override - public KType next() { - return iterator.next().value; - } - - @Override - public void remove() { - throw new UnsupportedOperationException(); - } - }; + public boolean equals(Object obj) { + return collection.equals(obj); } @Override - public int size() { - return keys.size(); + public int hashCode() { + return collection.hashCode(); + } + + @Override + public String toString() { + return collection.toString(); + } + + @Override + public Object[] toArray() { + return collection.toArray(); } @Override @SuppressWarnings("unchecked") - public boolean contains(Object o) { - return keys.contains((KType) o); + public T[] toArray(T[] a) { + return a.length == 0 ? (T[]) collection.toArray(a.getClass().getComponentType()) : super.toArray(a); + } + } + + private static class KeySet extends MapObjectCollection implements Set { + private KeySet(ObjectObjectHashMap.KeysContainer keys) { + super(keys); } }; @@ -278,17 +258,7 @@ public Set keySet() { @Override public Collection values() { - return new AbstractCollection() { - @Override - public Iterator iterator() { - return ImmutableOpenMap.iterator(map.values()); - } - - @Override - public int size() { - return map.size(); - } - }; + return new MapObjectCollection<>(map.values()); } @Override @@ -296,26 +266,6 @@ public void forEach(BiConsumer action) { map.forEach((ObjectObjectProcedure) action::accept); } - static Iterator iterator(ObjectCollection collection) { - final Iterator> iterator = collection.iterator(); - return new Iterator<>() { - @Override - public boolean hasNext() { - return iterator.hasNext(); - } - - @Override - public T next() { - return iterator.next().value; - } - - @Override - public void remove() { - throw new UnsupportedOperationException(); - } - }; - } - @Override public String toString() { return map.toString(); @@ -407,9 +357,7 @@ public ImmutableOpenMap build() { */ public Builder putAllFromMap(Map map) { maybeCloneMap(); - for (Map.Entry entry : map.entrySet()) { - this.mutableMap.put(entry.getKey(), entry.getValue()); - } + map.forEach(mutableMap::put); return this; } diff --git a/server/src/main/java/org/elasticsearch/common/collect/Iterators.java b/server/src/main/java/org/elasticsearch/common/collect/Iterators.java index d7c63edac2c94..4b5cef4bbbd45 100644 --- a/server/src/main/java/org/elasticsearch/common/collect/Iterators.java +++ b/server/src/main/java/org/elasticsearch/common/collect/Iterators.java @@ -10,11 +10,13 @@ import org.elasticsearch.core.Nullable; +import java.util.Arrays; import java.util.Collections; import java.util.Iterator; import java.util.NoSuchElementException; import java.util.Objects; import java.util.function.BiPredicate; +import java.util.function.Consumer; import java.util.function.Function; import java.util.function.IntFunction; import java.util.function.ToIntFunction; @@ -90,35 +92,19 @@ public T next() { } return value; } - } - - public static Iterator forArray(T[] array) { - return new ArrayIterator<>(array); - } - - private static final class ArrayIterator implements Iterator { - - private final T[] array; - private int index; - - private ArrayIterator(T[] array) { - this.array = Objects.requireNonNull(array, "Unable to iterate over a null array"); - } @Override - public boolean hasNext() { - return index < array.length; - } - - @Override - public T next() { - if (index >= array.length) { - throw new NoSuchElementException(); + public void forEachRemaining(Consumer action) { + while (index < iterators.length) { + iterators[index++].forEachRemaining(action); } - return array[index++]; } } + public static Iterator forArray(T[] array) { + return Arrays.asList(array).iterator(); + } + public static Iterator forRange(int lowerBoundInclusive, int upperBoundExclusive, IntFunction fn) { assert lowerBoundInclusive <= upperBoundExclusive : lowerBoundInclusive + " vs " + upperBoundExclusive; if (upperBoundExclusive <= lowerBoundInclusive) { @@ -183,6 +169,11 @@ public boolean hasNext() { public U next() { return fn.apply(input.next()); } + + @Override + public void forEachRemaining(Consumer action) { + input.forEachRemaining(t -> action.accept(fn.apply(t))); + } } public static Iterator flatMap(Iterator input, Function> fn) { diff --git a/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java b/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java index bda33e28fa315..5ebcca93889ff 100644 --- a/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java +++ b/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java @@ -164,11 +164,9 @@ public CompressedXContent(byte[] data) throws IOException { * @return compressed x-content normalized to not contain any whitespaces */ public static CompressedXContent fromJSON(String json) throws IOException { - return new CompressedXContent( - (ToXContentObject) (builder, params) -> builder.copyCurrentStructure( - JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, json) - ) - ); + try (var parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, json)) { + return new CompressedXContent((ToXContentObject) (builder, params) -> builder.copyCurrentStructure(parser)); + } } public CompressedXContent(String str) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/common/compress/Compressor.java b/server/src/main/java/org/elasticsearch/common/compress/Compressor.java index a6c4a9521d9b2..239f168306a94 100644 --- a/server/src/main/java/org/elasticsearch/common/compress/Compressor.java +++ b/server/src/main/java/org/elasticsearch/common/compress/Compressor.java @@ -9,7 +9,10 @@ package org.elasticsearch.common.compress; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.InputStreamStreamInput; +import org.elasticsearch.common.io.stream.StreamInput; +import java.io.BufferedInputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -18,6 +21,14 @@ public interface Compressor { boolean isCompressed(BytesReference bytes); + /** + * Same as {@link #threadLocalInputStream(InputStream)} but wraps the returned stream as a {@link StreamInput}. + */ + default StreamInput threadLocalStreamInput(InputStream in) throws IOException { + // wrap stream in buffer since InputStreamStreamInput doesn't do any buffering itself but does a lot of small reads + return new InputStreamStreamInput(new BufferedInputStream(threadLocalInputStream(in), DeflateCompressor.BUFFER_SIZE)); + } + /** * Creates a new input stream that decompresses the contents read from the provided input stream. * Closing the returned {@link InputStream} will close the provided stream input. diff --git a/server/src/main/java/org/elasticsearch/common/compress/DeflateCompressor.java b/server/src/main/java/org/elasticsearch/common/compress/DeflateCompressor.java index 00465855cb652..f14c906b9d64d 100644 --- a/server/src/main/java/org/elasticsearch/common/compress/DeflateCompressor.java +++ b/server/src/main/java/org/elasticsearch/common/compress/DeflateCompressor.java @@ -14,7 +14,6 @@ import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Streams; -import java.io.BufferedInputStream; import java.io.BufferedOutputStream; import java.io.IOException; import java.io.InputStream; @@ -150,18 +149,24 @@ public static InputStream inputStream(InputStream in, boolean threadLocal) throw inflater = new Inflater(true); releasable = inflater::end; } - return new BufferedInputStream(new InflaterInputStream(in, inflater, BUFFER_SIZE) { + return new InflaterInputStream(in, inflater, BUFFER_SIZE) { + + private Releasable release = releasable; + @Override public void close() throws IOException { + if (release == null) { + return; + } try { super.close(); } finally { - // We are ensured to only call this once since we wrap this stream in a BufferedInputStream that will only close - // its delegate once - releasable.close(); + // We need to ensure that we only call this once + release.close(); + release = null; } } - }, BUFFER_SIZE); + }; } @Override diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeometryFormatterFactory.java b/server/src/main/java/org/elasticsearch/common/geo/GeometryFormatterFactory.java index 1f4ca454b9c8c..1201bab887861 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeometryFormatterFactory.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeometryFormatterFactory.java @@ -9,8 +9,10 @@ package org.elasticsearch.common.geo; import org.elasticsearch.geometry.Geometry; +import org.elasticsearch.geometry.utils.WellKnownBinary; import org.elasticsearch.geometry.utils.WellKnownText; +import java.nio.ByteOrder; import java.util.ArrayList; import java.util.List; import java.util.function.Function; @@ -22,6 +24,7 @@ public class GeometryFormatterFactory { public static final String GEOJSON = "geojson"; public static final String WKT = "wkt"; + public static final String WKB = "wkb"; /** * Returns a formatter by name @@ -38,6 +41,11 @@ public static Function, List> getFormatter(String name, Func geometries.forEach((shape) -> objects.add(WellKnownText.toWKT(toGeometry.apply(shape)))); return objects; }; + case WKB -> geometries -> { + final List objects = new ArrayList<>(geometries.size()); + geometries.forEach((shape) -> objects.add(WellKnownBinary.toWKB(toGeometry.apply(shape), ByteOrder.LITTLE_ENDIAN))); + return objects; + }; default -> throw new IllegalArgumentException("Unrecognized geometry format [" + name + "]."); }; } diff --git a/server/src/main/java/org/elasticsearch/common/geo/SpatialPoint.java b/server/src/main/java/org/elasticsearch/common/geo/SpatialPoint.java index a12aa336e8bdc..1897d508699b8 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/SpatialPoint.java +++ b/server/src/main/java/org/elasticsearch/common/geo/SpatialPoint.java @@ -12,8 +12,30 @@ * To facilitate maximizing the use of common code between GeoPoint and projected CRS * we introduced this ElasticPoint as an interface of commonality. */ -public interface SpatialPoint { +public interface SpatialPoint extends Comparable { double getX(); double getY(); + + default String toWKT() { + // Code designed to mimic WellKnownText.toWKT, with much less stack depth and object creation + return "POINT (" + getX() + " " + getY() + ")"; + } + + @Override + default int compareTo(SpatialPoint other) { + if (this.getClass().equals(other.getClass())) { + double xd = this.getX() - other.getX(); + double yd = this.getY() - other.getY(); + return (xd == 0) ? comparison(yd) : comparison(xd); + } else { + // TODO: Rather separate based on CRS, but since we don't have that yet, we use class name + // The sort order here is unimportant and does not (yet) introduce BWC issues, so we are free to change it later with CRS + return this.getClass().getSimpleName().compareTo(other.getClass().getSimpleName()); + } + } + + private int comparison(double delta) { + return delta == 0 ? 0 : delta < 0 ? -1 : 1; + } } diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/Errors.java b/server/src/main/java/org/elasticsearch/common/inject/internal/Errors.java index 4b258bc78a7d9..03a584d5c508b 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/Errors.java +++ b/server/src/main/java/org/elasticsearch/common/inject/internal/Errors.java @@ -19,6 +19,7 @@ import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.common.inject.ConfigurationException; import org.elasticsearch.common.inject.CreationException; +import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Key; import org.elasticsearch.common.inject.MembersInjector; import org.elasticsearch.common.inject.Provider; @@ -251,8 +252,9 @@ public Errors misplacedBindingAnnotation(Member member, Annotation bindingAnnota ); } - private static final String CONSTRUCTOR_RULES = "Classes must have either one (and only one) constructor " - + "annotated with @Inject or a zero-argument constructor that is not private."; + private static final String CONSTRUCTOR_RULES = "Classes must have either one (and only one) constructor annotated with @" + + Inject.class.getCanonicalName() + + " or a zero-argument constructor that is not private."; public Errors missingConstructor(Class implementation) { return addMessage("Could not find a suitable constructor in %s. " + CONSTRUCTOR_RULES, implementation); diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java index 96240dd053edb..c4f0dc58f5ffd 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java @@ -694,7 +694,7 @@ public Map readMapValues(final Writeable.Reader valueReader, fin */ @Nullable @SuppressWarnings("unchecked") - public Map readMap() throws IOException { + public Map readGenericMap() throws IOException { return (Map) readGenericValue(); } diff --git a/server/src/main/java/org/elasticsearch/common/settings/SecureString.java b/server/src/main/java/org/elasticsearch/common/settings/SecureString.java index 25fba6eebc6f9..84520c6a1dc48 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/SecureString.java +++ b/server/src/main/java/org/elasticsearch/common/settings/SecureString.java @@ -8,14 +8,15 @@ package org.elasticsearch.common.settings; -import java.io.Closeable; +import org.elasticsearch.core.Releasable; + import java.util.Arrays; import java.util.Objects; /** * A String implementations which allows clearing the underlying char array. */ -public final class SecureString implements CharSequence, Closeable { +public final class SecureString implements CharSequence, Releasable { private char[] chars; diff --git a/server/src/main/java/org/elasticsearch/common/util/BitArray.java b/server/src/main/java/org/elasticsearch/common/util/BitArray.java index 696e81b3beec9..96c00538f07d4 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BitArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BitArray.java @@ -129,6 +129,10 @@ public boolean get(long index) { return (bits.get(wordNum) & bitmask) != 0; } + public long size() { + return bits.size() * (long) Long.BYTES * Byte.SIZE; + } + private static long wordNum(long index) { return index >> 6; } diff --git a/server/src/main/java/org/elasticsearch/common/util/CopyOnFirstWriteMap.java b/server/src/main/java/org/elasticsearch/common/util/CopyOnFirstWriteMap.java index 08d86c143fc1b..51491df02c37c 100644 --- a/server/src/main/java/org/elasticsearch/common/util/CopyOnFirstWriteMap.java +++ b/server/src/main/java/org/elasticsearch/common/util/CopyOnFirstWriteMap.java @@ -12,6 +12,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.Objects; import java.util.Set; /** @@ -70,7 +71,11 @@ public V get(Object key) { @Override public V put(K key, V value) { - return getForUpdate().put(key, value); + if (Objects.equals(get(key), value)) { + return value; + } else { + return getForUpdate().put(key, value); + } } @Override diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java index 7bfba1ebdb176..7caf570806c0e 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java @@ -23,6 +23,7 @@ import org.elasticsearch.core.Tuple; import org.elasticsearch.http.HttpTransportSettings; import org.elasticsearch.tasks.Task; +import org.elasticsearch.telemetry.tracing.TraceContext; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -69,7 +70,7 @@ * * */ -public final class ThreadContext implements Writeable { +public final class ThreadContext implements Writeable, TraceContext { public static final String PREFIX = "request.headers"; public static final Setting DEFAULT_HEADERS_SETTING = Setting.groupSetting(PREFIX + ".", Property.NodeScope); diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/LoggingDeprecationHandler.java b/server/src/main/java/org/elasticsearch/common/xcontent/LoggingDeprecationHandler.java index 2132ae0c13379..bbd0c48077db2 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/LoggingDeprecationHandler.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/LoggingDeprecationHandler.java @@ -8,12 +8,12 @@ package org.elasticsearch.common.xcontent; -import org.elasticsearch.common.TriConsumer; import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.xcontent.DeprecationHandler; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentLocation; +import org.elasticsearch.xcontent.XContentParserConfiguration; import java.util.function.Supplier; @@ -38,17 +38,10 @@ public class LoggingDeprecationHandler implements DeprecationHandler { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(ParseField.class); public static final LoggingDeprecationHandler INSTANCE = new LoggingDeprecationHandler(); - - private TriConsumer deprecationLoggerFunction = (message, params, field_name) -> deprecationLogger.warn( - DeprecationCategory.API, - "deprecated_field_" + field_name, - message, - params + public static final XContentParserConfiguration XCONTENT_PARSER_CONFIG = XContentParserConfiguration.EMPTY.withDeprecationHandler( + INSTANCE ); - private TriConsumer compatibleLoggerFunction = (message, params, field_name) -> deprecationLogger - .compatibleCritical("deprecated_field_" + field_name, message, params); - private LoggingDeprecationHandler() { // one instance only } @@ -77,8 +70,8 @@ public void logRenamedField( boolean isCompatibleDeprecation ) { String prefix = parserLocation(parserName, location); - TriConsumer loggingFunction = getLoggingFunction(isCompatibleDeprecation); - loggingFunction.apply( + log( + isCompatibleDeprecation, "{}Deprecated field [{}] used, expected [{}] instead", new Object[] { prefix, oldName, currentName }, oldName @@ -94,8 +87,12 @@ public void logReplacedField( boolean isCompatibleDeprecation ) { String prefix = parserLocation(parserName, location); - TriConsumer loggingFunction = getLoggingFunction(isCompatibleDeprecation); - loggingFunction.apply("{}Deprecated field [{}] used, replaced by [{}]", new Object[] { prefix, oldName, replacedName }, oldName); + log( + isCompatibleDeprecation, + "{}Deprecated field [{}] used, replaced by [{}]", + new Object[] { prefix, oldName, replacedName }, + oldName + ); } @Override @@ -106,8 +103,8 @@ public void logRemovedField( boolean isCompatibleDeprecation ) { String prefix = parserLocation(parserName, location); - TriConsumer loggingFunction = getLoggingFunction(isCompatibleDeprecation); - loggingFunction.apply( + log( + isCompatibleDeprecation, "{}Deprecated field [{}] used, this field is unused and will be removed entirely", new Object[] { prefix, removedName }, removedName @@ -119,11 +116,11 @@ private static String parserLocation(String parserName, Supplier getLoggingFunction(boolean isCompatibleDeprecation) { + private static void log(boolean isCompatibleDeprecation, String message, Object[] params, String fieldName) { if (isCompatibleDeprecation) { - return compatibleLoggerFunction; + deprecationLogger.compatibleCritical("deprecated_field_" + fieldName, message, params); } else { - return deprecationLoggerFunction; + deprecationLogger.warn(DeprecationCategory.API, "deprecated_field_" + fieldName, message, params); } } } diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java b/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java index 3bfe5078a3487..5c1870463149e 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java @@ -74,10 +74,26 @@ public static XContentParser createParser(XContentParserConfiguration config, By final XContentType contentType = XContentFactory.xContentType(compressedInput); return XContentFactory.xContent(contentType).createParser(config, compressedInput); } else { - return XContentFactory.xContent(xContentType(bytes)).createParser(config, bytes.streamInput()); + return createParserNotCompressed(config, bytes, xContentType(bytes)); } } + /** + * Same as {@link #createParser(XContentParserConfiguration, BytesReference, XContentType)} but only supports uncompressed + * {@code bytes}. + */ + public static XContentParser createParserNotCompressed( + XContentParserConfiguration config, + BytesReference bytes, + XContentType xContentType + ) throws IOException { + XContent xContent = xContentType.xContent(); + if (bytes.hasArray()) { + return xContent.createParser(config, bytes.array(), bytes.arrayOffset(), bytes.length()); + } + return xContent.createParser(config, bytes.streamInput()); + } + /** * Creates a parser for the bytes provided * @deprecated use {@link #createParser(XContentParserConfiguration, BytesReference, XContentType)} @@ -104,17 +120,10 @@ public static XContentParser createParser(XContentParserConfiguration config, By Objects.requireNonNull(xContentType); Compressor compressor = CompressorFactory.compressor(bytes); if (compressor != null) { - InputStream compressedInput = compressor.threadLocalInputStream(bytes.streamInput()); - if (compressedInput.markSupported() == false) { - compressedInput = new BufferedInputStream(compressedInput); - } - return XContentFactory.xContent(xContentType).createParser(config, compressedInput); + return XContentFactory.xContent(xContentType).createParser(config, compressor.threadLocalInputStream(bytes.streamInput())); } else { - // TODO now that we have config we make a method on bytes to do this building wihout needing this check everywhere - if (bytes.hasArray()) { - return xContentType.xContent().createParser(config, bytes.array(), bytes.arrayOffset(), bytes.length()); - } - return xContentType.xContent().createParser(config, bytes.streamInput()); + // TODO now that we have config we make a method on bytes to do this building without needing this check everywhere + return createParserNotCompressed(config, bytes, xContentType); } } @@ -310,7 +319,7 @@ public static String convertToJson(BytesReference bytes, boolean reformatJson) t @Deprecated public static String convertToJson(BytesReference bytes, boolean reformatJson, boolean prettyPrint) throws IOException { - return convertToJson(bytes, reformatJson, prettyPrint, XContentFactory.xContentType(bytes.toBytesRef().bytes)); + return convertToJson(bytes, reformatJson, prettyPrint, xContentType(bytes)); } public static String convertToJson(BytesReference bytes, boolean reformatJson, XContentType xContentType) throws IOException { @@ -337,20 +346,8 @@ public static String convertToJson(BytesReference bytes, boolean reformatJson, b return bytes.utf8ToString(); } - if (bytes.hasArray()) { - try ( - XContentParser parser = XContentFactory.xContent(xContentType) - .createParser(XContentParserConfiguration.EMPTY, bytes.array(), bytes.arrayOffset(), bytes.length()) - ) { - return toJsonString(prettyPrint, parser); - } - } else { - try ( - InputStream stream = bytes.streamInput(); - XContentParser parser = XContentFactory.xContent(xContentType).createParser(XContentParserConfiguration.EMPTY, stream) - ) { - return toJsonString(prettyPrint, parser); - } + try (var parser = createParserNotCompressed(XContentParserConfiguration.EMPTY, bytes, xContentType)) { + return toJsonString(prettyPrint, parser); } } @@ -746,7 +743,7 @@ public static void writeTo(StreamOutput out, XContentType xContentType) throws I public static XContentParser mapToXContentParser(XContentParserConfiguration config, Map source) { try (XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)) { builder.map(source); - return XContentFactory.xContent(builder.contentType()).createParser(config, Strings.toString(builder)); + return createParserNotCompressed(config, BytesReference.bytes(builder), builder.contentType()); } catch (IOException e) { throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e); } diff --git a/server/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java b/server/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java index c7145ed444d38..b9d267b922c91 100644 --- a/server/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java +++ b/server/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.AllocateUnassignedDecision; @@ -23,6 +22,8 @@ import java.util.ArrayList; import java.util.List; +import static org.elasticsearch.cluster.routing.ExpectedShardSizeEstimator.getExpectedShardSize; + /** * An abstract class that implements basic functionality for allocating * shards to nodes based on shard copies that already exist in the cluster. @@ -58,7 +59,7 @@ public void allocateUnassigned( unassignedAllocationHandler.initialize( allocateUnassignedDecision.getTargetNode().getId(), allocateUnassignedDecision.getAllocationId(), - getExpectedShardSize(shardRouting, allocation), + getExpectedShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE, allocation), allocation.changes() ); } else { @@ -66,18 +67,6 @@ public void allocateUnassigned( } } - protected static long getExpectedShardSize(ShardRouting shardRouting, RoutingAllocation allocation) { - if (shardRouting.primary()) { - if (shardRouting.recoverySource().getType() == RecoverySource.Type.SNAPSHOT) { - return allocation.snapshotShardSizeInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); - } else { - return ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE; - } - } else { - return allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); - } - } - /** * Make a decision on the allocation of an unassigned shard. This method is used by * {@link #allocateUnassigned(ShardRouting, RoutingAllocation, ExistingShardsAllocator.UnassignedAllocationHandler)} to make decisions diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java b/server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java index f30a924eaa54e..43e03d30bd120 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java @@ -37,6 +37,7 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentMap; +import java.util.function.Predicate; import java.util.stream.Collectors; import static org.elasticsearch.common.util.set.Sets.difference; @@ -116,11 +117,11 @@ public void beforeAllocation(final RoutingAllocation allocation) { } @Override - public void afterPrimariesBeforeReplicas(RoutingAllocation allocation) { + public void afterPrimariesBeforeReplicas(RoutingAllocation allocation, Predicate isRelevantShardPredicate) { assert replicaShardAllocator != null; if (allocation.routingNodes().hasInactiveReplicas()) { // cancel existing recoveries if we have a better match - replicaShardAllocator.processExistingRecoveries(allocation); + replicaShardAllocator.processExistingRecoveries(allocation, isRelevantShardPredicate); } } diff --git a/server/src/main/java/org/elasticsearch/gateway/PersistedClusterStateService.java b/server/src/main/java/org/elasticsearch/gateway/PersistedClusterStateService.java index fcf50ba3a8a44..b86cfa6fdb7af 100644 --- a/server/src/main/java/org/elasticsearch/gateway/PersistedClusterStateService.java +++ b/server/src/main/java/org/elasticsearch/gateway/PersistedClusterStateService.java @@ -61,6 +61,7 @@ import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Assertions; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.CheckedFunction; @@ -438,6 +439,10 @@ OnDiskState loadBestOnDiskState(boolean checkClean) throws IOException { PrintStream printStream = new PrintStream(outputStream, true, StandardCharsets.UTF_8); CheckIndex checkIndex = new CheckIndex(directory) ) { + // Setting thread count to 1 prevents Lucene from starting disposable threads to execute the check and runs + // the check on this thread which is potentially faster for a small index like the cluster state and saves + // resources during test execution + checkIndex.setThreadCount(1); checkIndex.setInfoStream(printStream); checkIndex.setChecksumsOnly(true); isClean = checkIndex.checkIndex().clean; @@ -668,8 +673,7 @@ public OnDiskStateMetadata loadOnDiskStateMetadataFromUserData(Map T readXContent(BytesReference bytes, CheckedFunction reader) throws IOException { - final XContentParser parser = XContentFactory.xContent(XContentType.SMILE).createParser(parserConfig, bytes.streamInput()); - try { + try (XContentParser parser = XContentHelper.createParserNotCompressed(parserConfig, bytes, XContentType.SMILE)) { return reader.apply(parser); } catch (Exception e) { throw new CorruptStateException(e); diff --git a/server/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java b/server/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java index 6922942e91d2b..f12fc2e7291e5 100644 --- a/server/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java +++ b/server/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java @@ -36,6 +36,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.function.Predicate; import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; @@ -45,8 +46,7 @@ public abstract class ReplicaShardAllocator extends BaseGatewayShardAllocator { * match. Today, a better match is one that can perform a no-op recovery while the previous recovery * has to copy segment files. */ - public void processExistingRecoveries(RoutingAllocation allocation) { - Metadata metadata = allocation.metadata(); + public void processExistingRecoveries(RoutingAllocation allocation, Predicate isRelevantShardPredicate) { RoutingNodes routingNodes = allocation.routingNodes(); List shardCancellationActions = new ArrayList<>(); for (RoutingNode routingNode : routingNodes) { @@ -60,6 +60,9 @@ public void processExistingRecoveries(RoutingAllocation allocation) { if (shard.relocatingNodeId() != null) { continue; } + if (isRelevantShardPredicate.test(shard) == false) { + continue; + } // if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one... if (shard.unassignedInfo() != null && shard.unassignedInfo().getReason() == UnassignedInfo.Reason.INDEX_CREATED) { diff --git a/server/src/main/java/org/elasticsearch/health/HealthPeriodicLogger.java b/server/src/main/java/org/elasticsearch/health/HealthPeriodicLogger.java index 878930c2962d0..288837fb3c808 100644 --- a/server/src/main/java/org/elasticsearch/health/HealthPeriodicLogger.java +++ b/server/src/main/java/org/elasticsearch/health/HealthPeriodicLogger.java @@ -204,7 +204,10 @@ private HealthPeriodicLogger( this.logWriter = logWriter == null ? logger::info : logWriter; // create metric for overall level metrics - this.redMetrics.put("overall", LongGaugeMetric.create(this.meterRegistry, "es.health.overall.red", "Overall: Red", "{cluster}")); + this.redMetrics.put( + "overall", + LongGaugeMetric.create(this.meterRegistry, "es.health.overall.red.status", "Overall: Red", "{cluster}") + ); } private void registerListeners() { diff --git a/server/src/main/java/org/elasticsearch/health/RestGetHealthAction.java b/server/src/main/java/org/elasticsearch/health/RestGetHealthAction.java index 8dcea1bb0e7e2..5dd7930c77d68 100644 --- a/server/src/main/java/org/elasticsearch/health/RestGetHealthAction.java +++ b/server/src/main/java/org/elasticsearch/health/RestGetHealthAction.java @@ -14,7 +14,7 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.io.IOException; import java.util.List; @@ -48,7 +48,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).execute( GetHealthAction.INSTANCE, getHealthRequest, - new RestChunkedToXContentListener<>(channel) + new RestRefCountedChunkedToXContentListener<>(channel) ); } diff --git a/server/src/main/java/org/elasticsearch/http/DefaultRestChannel.java b/server/src/main/java/org/elasticsearch/http/DefaultRestChannel.java index 24df7875f7e3d..f4dbf8115da33 100644 --- a/server/src/main/java/org/elasticsearch/http/DefaultRestChannel.java +++ b/server/src/main/java/org/elasticsearch/http/DefaultRestChannel.java @@ -26,7 +26,6 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.telemetry.tracing.SpanId; import org.elasticsearch.telemetry.tracing.Tracer; import java.util.ArrayList; @@ -91,8 +90,6 @@ public void sendResponse(RestResponse restResponse) { // We're sending a response so we know we won't be needing the request content again and release it httpRequest.release(); - final SpanId spanId = SpanId.forRestRequest(request); - final ArrayList toClose = new ArrayList<>(4); if (HttpUtils.shouldCloseConnection(httpRequest)) { toClose.add(() -> CloseableChannel.closeChannel(httpChannel)); @@ -174,9 +171,9 @@ public void sendResponse(RestResponse restResponse) { addCookies(httpResponse); - tracer.setAttribute(spanId, "http.status_code", restResponse.status().getStatus()); + tracer.setAttribute(request, "http.status_code", restResponse.status().getStatus()); restResponse.getHeaders() - .forEach((key, values) -> tracer.setAttribute(spanId, "http.response.headers." + key, String.join("; ", values))); + .forEach((key, values) -> tracer.setAttribute(request, "http.response.headers." + key, String.join("; ", values))); ActionListener listener = ActionListener.releasing(Releasables.wrap(toClose)); if (httpLogger != null) { diff --git a/server/src/main/java/org/elasticsearch/index/IndexVersion.java b/server/src/main/java/org/elasticsearch/index/IndexVersion.java index 765cc256d84b1..f4edb8b1d4039 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexVersion.java +++ b/server/src/main/java/org/elasticsearch/index/IndexVersion.java @@ -56,13 +56,11 @@ public record IndexVersion(int id, Version luceneVersion) implements VersionId null); - if (versionExtension == null) { - return IndexVersions.LATEST_DEFINED; - } - var version = versionExtension.getCurrentIndexVersion(IndexVersions.LATEST_DEFINED); + var version = ExtensionLoader.loadSingleton(ServiceLoader.load(VersionExtension.class)) + .map(e -> e.getCurrentIndexVersion(IndexVersions.LATEST_DEFINED)) + .orElse(IndexVersions.LATEST_DEFINED); assert version.onOrAfter(IndexVersions.LATEST_DEFINED); assert version.luceneVersion.equals(Version.LATEST) diff --git a/server/src/main/java/org/elasticsearch/index/IndexVersions.java b/server/src/main/java/org/elasticsearch/index/IndexVersions.java index f4edb4f79d760..4419abba73c1b 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexVersions.java +++ b/server/src/main/java/org/elasticsearch/index/IndexVersions.java @@ -92,7 +92,10 @@ private static IndexVersion def(int id, Version luceneVersion) { public static final IndexVersion ES_VERSION_8_12 = def(8_500_004, Version.LUCENE_9_8_0); public static final IndexVersion NORMALIZED_VECTOR_COSINE = def(8_500_005, Version.LUCENE_9_8_0); public static final IndexVersion UPGRADE_LUCENE_9_9 = def(8_500_006, Version.LUCENE_9_9_0); - public static final IndexVersion ES_VERSION_8_13 = def(8_500_007, Version.LUCENE_9_9_0); + public static final IndexVersion NORI_DUPLICATES = def(8_500_007, Version.LUCENE_9_9_0); + public static final IndexVersion UPGRADE_LUCENE_9_9_1 = def(8_500_008, Version.LUCENE_9_9_1); + public static final IndexVersion ES_VERSION_8_13 = def(8_500_009, Version.LUCENE_9_9_1); + public static final IndexVersion NEW_INDEXVERSION_FORMAT = def(8_501_00_0, Version.LUCENE_9_9_1); /* * STOP! READ THIS FIRST! No, really, @@ -105,18 +108,46 @@ private static IndexVersion def(int id, Version luceneVersion) { * A new index version should be added EVERY TIME a change is made to index metadata or data storage. * Each index version should only be used in a single merged commit (apart from the BwC versions copied from o.e.Version, ≤V_8_11_0). * - * To add a new index version, add a new constant at the bottom of the list, above this comment, which is one greater than the - * current highest version id. Use a descriptive constant name. Don't add other lines, comments, etc. + * ADDING AN INDEX VERSION + * To add a new index version, add a new constant at the bottom of the list, above this comment. Don't add other lines, + * comments, etc. The version id has the following layout: + * + * M_NNN_SS_P + * + * M - The major version of Elasticsearch + * NNN - The server version part + * SS - The serverless version part. It should always be 00 here, it is used by serverless only. + * P - The patch version part + * + * To determine the id of the next IndexVersion constant, do the following: + * - Use the same major version, unless bumping majors + * - Bump the server version part by 1, unless creating a patch version + * - Leave the serverless part as 00 + * - Bump the patch part if creating a patch version + * + * If a patch version is created, it should be placed sorted among the other existing constants. * * REVERTING AN INDEX VERSION * * If you revert a commit with an index version change, you MUST ensure there is a NEW index version representing the reverted * change. DO NOT let the index version go backwards, it must ALWAYS be incremented. * - * DETERMINING TRANSPORT VERSIONS FROM GIT HISTORY + * DETERMINING INDEX VERSIONS FROM GIT HISTORY + * + * If your git checkout has the expected minor-version-numbered branches and the expected release-version tags then you can find the + * index versions known by a particular release ... + * + * git show v8.12.0:server/src/main/java/org/elasticsearch/index/IndexVersions.java | grep '= def' + * + * ... or by a particular branch ... + * + * git show 8.12:server/src/main/java/org/elasticsearch/index/IndexVersions.java | grep '= def' + * + * ... and you can see which versions were added in between two versions too ... + * + * git diff v8.12.0..main -- server/src/main/java/org/elasticsearch/index/IndexVersions.java * - * TODO after the release of v8.11.0, copy the instructions about using git to track the history of versions from TransportVersion.java - * (the example commands won't make sense until at least 8.11.0 is released) + * In branches 8.7-8.11 see server/src/main/java/org/elasticsearch/index/IndexVersion.java for the equivalent definitions. */ public static final IndexVersion MINIMUM_COMPATIBLE = V_7_0_0; diff --git a/server/src/main/java/org/elasticsearch/index/analysis/Analysis.java b/server/src/main/java/org/elasticsearch/index/analysis/Analysis.java index e19ee050c93a7..d3e281ca115e1 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/Analysis.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/Analysis.java @@ -44,6 +44,7 @@ import org.apache.lucene.analysis.sv.SwedishAnalyzer; import org.apache.lucene.analysis.th.ThaiAnalyzer; import org.apache.lucene.analysis.tr.TurkishAnalyzer; +import org.apache.lucene.analysis.util.CSVUtil; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.DeprecationCategory; @@ -64,6 +65,7 @@ import java.nio.file.Path; import java.util.ArrayList; import java.util.Collection; +import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Map; @@ -257,6 +259,52 @@ public static List getWordList( } } + public static List getWordList( + Environment env, + Settings settings, + String settingPath, + String settingList, + boolean removeComments, + boolean checkDuplicate + ) { + final List ruleList = getWordList(env, settings, settingPath, settingList, removeComments); + if (ruleList != null && ruleList.isEmpty() == false && checkDuplicate) { + checkDuplicateRules(ruleList); + } + return ruleList; + } + + /** + * This method checks for any duplicate rules in the provided ruleList. Each rule in the list is parsed with CSVUtil.parse + * to separate the rule into individual components, represented as a String array. Only the first component from each rule + * is considered in the duplication check. + * + * The method will ignore any line that starts with a '#' character, treating it as a comment. + * + * The check is performed by adding the first component of each rule into a HashSet (dup), which does not allow duplicates. + * If the addition to the HashSet returns false, it means that item was already present in the set, indicating a duplicate. + * In such a case, an IllegalArgumentException is thrown specifying the duplicate term and the line number in the original list. + * + * @param ruleList The list of rules to check for duplicates. + * @throws IllegalArgumentException If a duplicate rule is found. + */ + private static void checkDuplicateRules(List ruleList) { + Set dup = new HashSet<>(); + int lineNum = 0; + for (String line : ruleList) { + // ignore comments + if (line.startsWith("#") == false) { + String[] values = CSVUtil.parse(line); + if (dup.add(values[0]) == false) { + throw new IllegalArgumentException( + "Found duplicate term [" + values[0] + "] in user dictionary " + "at line [" + lineNum + "]" + ); + } + } + ++lineNum; + } + } + private static List loadWordList(Path path, boolean removeComments) throws IOException { final List result = new ArrayList<>(); try (BufferedReader br = Files.newBufferedReader(path, StandardCharsets.UTF_8)) { diff --git a/server/src/main/java/org/elasticsearch/index/codec/PerFieldMapperCodec.java b/server/src/main/java/org/elasticsearch/index/codec/PerFieldMapperCodec.java index d2ca31fe6a197..852547ecb1073 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/PerFieldMapperCodec.java +++ b/server/src/main/java/org/elasticsearch/index/codec/PerFieldMapperCodec.java @@ -19,6 +19,7 @@ import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.codec.bloomfilter.ES87BloomFilterPostingsFormat; +import org.elasticsearch.index.codec.postings.ES812PostingsFormat; import org.elasticsearch.index.codec.tsdb.ES87TSDBDocValuesFormat; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; @@ -44,6 +45,8 @@ public final class PerFieldMapperCodec extends Lucene99Codec { private final ES87BloomFilterPostingsFormat bloomFilterPostingsFormat; private final ES87TSDBDocValuesFormat tsdbDocValuesFormat; + private final ES812PostingsFormat es812PostingsFormat; + static { assert Codec.forName(Lucene.LATEST_CODEC).getClass().isAssignableFrom(PerFieldMapperCodec.class) : "PerFieldMapperCodec must subclass the latest lucene codec: " + Lucene.LATEST_CODEC; @@ -54,6 +57,7 @@ public PerFieldMapperCodec(Mode compressionMode, MapperService mapperService, Bi this.mapperService = mapperService; this.bloomFilterPostingsFormat = new ES87BloomFilterPostingsFormat(bigArrays, this::internalGetPostingsFormatForField); this.tsdbDocValuesFormat = new ES87TSDBDocValuesFormat(); + this.es812PostingsFormat = new ES812PostingsFormat(); } @Override @@ -69,7 +73,8 @@ private PostingsFormat internalGetPostingsFormatForField(String field) { if (format != null) { return format; } - return super.getPostingsFormatForField(field); + // return our own posting format using PFOR + return es812PostingsFormat; } boolean useBloomFilter(String field) { diff --git a/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsFormat.java b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsFormat.java new file mode 100644 index 0000000000000..5270326876e08 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsFormat.java @@ -0,0 +1,506 @@ +/* + * @notice + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Modifications copyright (C) 2022 Elasticsearch B.V. + */ +package org.elasticsearch.index.codec.postings; + +import org.apache.lucene.codecs.BlockTermState; +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.codecs.FieldsConsumer; +import org.apache.lucene.codecs.FieldsProducer; +import org.apache.lucene.codecs.MultiLevelSkipListWriter; +import org.apache.lucene.codecs.PostingsFormat; +import org.apache.lucene.codecs.PostingsReaderBase; +import org.apache.lucene.codecs.PostingsWriterBase; +import org.apache.lucene.codecs.lucene90.blocktree.Lucene90BlockTreeTermsReader; +import org.apache.lucene.codecs.lucene90.blocktree.Lucene90BlockTreeTermsWriter; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.SegmentReadState; +import org.apache.lucene.index.SegmentWriteState; +import org.apache.lucene.index.TermState; +import org.apache.lucene.store.DataOutput; +import org.apache.lucene.util.packed.PackedInts; +import org.elasticsearch.core.IOUtils; + +import java.io.IOException; + +/** + * Based on Lucene 9.0 postings format, which encodes postings in packed integer blocks for fast decode. + * It is introduced to preserve PFOR space efficiency when Lucene switched back to FOR in 9.9 + * + *

Basic idea: + * + *

    + *
  • Packed Blocks and VInt Blocks: + *

    In packed blocks, integers are encoded with the same bit width ({@link PackedInts packed + * format}): the block size (i.e. number of integers inside block) is fixed (currently 128). + * Additionally blocks that are all the same value are encoded in an optimized way. + *

    In VInt blocks, integers are encoded as {@link DataOutput#writeVInt VInt}: the block + * size is variable. + *

  • Block structure: + *

    When the postings are long enough, Lucene90PostingsFormat will try to encode most + * integer data as a packed block. + *

    Take a term with 259 documents as an example, the first 256 document ids are encoded as + * two packed blocks, while the remaining 3 are encoded as one VInt block. + *

    Different kinds of data are always encoded separately into different packed blocks, but + * may possibly be interleaved into the same VInt block. + *

    This strategy is applied to pairs: <document number, frequency>, <position, + * payload length>, <position, offset start, offset length>, and <position, + * payload length, offsetstart, offset length>. + *

  • Skipdata settings: + *

    The structure of skip table is quite similar to previous version of Lucene. Skip + * interval is the same as block size, and each skip entry points to the beginning of each + * block. However, for the first block, skip data is omitted. + *

  • Positions, Payloads, and Offsets: + *

    A position is an integer indicating where the term occurs within one document. A payload + * is a blob of metadata associated with current position. An offset is a pair of integers + * indicating the tokenized start/end offsets for given term in current position: it is + * essentially a specialized payload. + *

    When payloads and offsets are not omitted, numPositions==numPayloads==numOffsets + * (assuming a null payload contributes one count). As mentioned in block structure, it is + * possible to encode these three either combined or separately. + *

    In all cases, payloads and offsets are stored together. When encoded as a packed block, + * position data is separated out as .pos, while payloads and offsets are encoded in .pay + * (payload metadata will also be stored directly in .pay). When encoded as VInt blocks, all + * these three are stored interleaved into the .pos (so is payload metadata). + *

    With this strategy, the majority of payload and offset data will be outside .pos file. + * So for queries that require only position data, running on a full index with payloads and + * offsets, this reduces disk pre-fetches. + *

+ * + *

Files and detailed format: + * + *

+ * + * + * + *
+ *
Term Dictionary + *

The .tim file contains the list of terms in each field along with per-term statistics + * (such as docfreq) and pointers to the frequencies, positions, payload and skip data in the + * .doc, .pos, and .pay files. See {@link Lucene90BlockTreeTermsWriter} for more details on + * the format. + *

NOTE: The term dictionary can plug into different postings implementations: the postings + * writer/reader are actually responsible for encoding and decoding the PostingsHeader and + * TermMetadata sections described here: + *

    + *
  • PostingsHeader --> Header, PackedBlockSize + *
  • TermMetadata --> (DocFPDelta|SingletonDocID), PosFPDelta?, PosVIntBlockFPDelta?, + * PayFPDelta?, SkipFPDelta? + *
  • Header, --> {@link CodecUtil#writeIndexHeader IndexHeader} + *
  • PackedBlockSize, SingletonDocID --> {@link DataOutput#writeVInt VInt} + *
  • DocFPDelta, PosFPDelta, PayFPDelta, PosVIntBlockFPDelta, SkipFPDelta --> {@link + * DataOutput#writeVLong VLong} + *
  • Footer --> {@link CodecUtil#writeFooter CodecFooter} + *
+ *

Notes: + *

    + *
  • Header is a {@link CodecUtil#writeIndexHeader IndexHeader} storing the version + * information for the postings. + *
  • PackedBlockSize is the fixed block size for packed blocks. In packed block, bit width + * is determined by the largest integer. Smaller block size result in smaller variance + * among width of integers hence smaller indexes. Larger block size result in more + * efficient bulk i/o hence better acceleration. This value should always be a multiple + * of 64, currently fixed as 128 as a tradeoff. It is also the skip interval used to + * accelerate {@link org.apache.lucene.index.PostingsEnum#advance(int)}. + *
  • DocFPDelta determines the position of this term's TermFreqs within the .doc file. In + * particular, it is the difference of file offset between this term's data and previous + * term's data (or zero, for the first term in the block).On disk it is stored as the + * difference from previous value in sequence. + *
  • PosFPDelta determines the position of this term's TermPositions within the .pos file. + * While PayFPDelta determines the position of this term's <TermPayloads, + * TermOffsets?> within the .pay file. Similar to DocFPDelta, it is the difference + * between two file positions (or neglected, for fields that omit payloads and offsets). + *
  • PosVIntBlockFPDelta determines the position of this term's last TermPosition in last + * pos packed block within the .pos file. It is synonym for PayVIntBlockFPDelta or + * OffsetVIntBlockFPDelta. This is actually used to indicate whether it is necessary to + * load following payloads and offsets from .pos instead of .pay. Every time a new block + * of positions are to be loaded, the PostingsReader will use this value to check + * whether current block is packed format or VInt. When packed format, payloads and + * offsets are fetched from .pay, otherwise from .pos. (this value is neglected when + * total number of positions i.e. totalTermFreq is less or equal to PackedBlockSize). + *
  • SkipFPDelta determines the position of this term's SkipData within the .doc file. In + * particular, it is the length of the TermFreq data. SkipDelta is only stored if + * DocFreq is not smaller than SkipMinimum (i.e. 128 in Lucene90PostingsFormat). + *
  • SingletonDocID is an optimization when a term only appears in one document. In this + * case, instead of writing a file pointer to the .doc file (DocFPDelta), and then a + * VIntBlock at that location, the single document ID is written to the term dictionary. + *
+ *
+ * + * + * + *
+ *
Term Index + *

The .tip file contains an index into the term dictionary, so that it can be accessed + * randomly. See {@link Lucene90BlockTreeTermsWriter} for more details on the format. + *

+ * + * + * + *
+ *
Frequencies and Skip Data + *

The .doc file contains the lists of documents which contain each term, along with the + * frequency of the term in that document (except when frequencies are omitted: {@link + * IndexOptions#DOCS}). It also saves skip data to the beginning of each packed or VInt block, + * when the length of document list is larger than packed block size. + *

    + *
  • docFile(.doc) --> Header, <TermFreqs, SkipData?>TermCount, Footer + *
  • Header --> {@link CodecUtil#writeIndexHeader IndexHeader} + *
  • TermFreqs --> <PackedBlock> PackedDocBlockNum, VIntBlock? + *
  • PackedBlock --> PackedDocDeltaBlock, PackedFreqBlock? + *
  • VIntBlock --> <DocDelta[, + * Freq?]>DocFreq-PackedBlockSize*PackedDocBlockNum + *
  • SkipData --> <<SkipLevelLength, SkipLevel> NumSkipLevels-1, + * SkipLevel>, SkipDatum? + *
  • SkipLevel --> <SkipDatum> TrimmedDocFreq/(PackedBlockSize^(Level + + * 1)) + *
  • SkipDatum --> DocSkip, DocFPSkip, <PosFPSkip, PosBlockOffset, PayLength?, + * PayFPSkip?>?, ImpactLength, <CompetitiveFreqDelta, CompetitiveNormDelta?> + * ImpactCount, SkipChildLevelPointer? + *
  • PackedDocDeltaBlock, PackedFreqBlock --> {@link PackedInts PackedInts} + *
  • DocDelta, Freq, DocSkip, DocFPSkip, PosFPSkip, PosBlockOffset, PayByteUpto, + * PayFPSkip, ImpactLength, CompetitiveFreqDelta --> {@link DataOutput#writeVInt + * VInt} + *
  • CompetitiveNormDelta --> {@link DataOutput#writeZLong ZLong} + *
  • SkipChildLevelPointer --> {@link DataOutput#writeVLong VLong} + *
  • Footer --> {@link CodecUtil#writeFooter CodecFooter} + *
+ *

Notes: + *

    + *
  • PackedDocDeltaBlock is theoretically generated from two steps: + *
      + *
    1. Calculate the difference between each document number and previous one, and get + * a d-gaps list (for the first document, use absolute value); + *
    2. For those d-gaps from first one to + * PackedDocBlockNum*PackedBlockSizeth, separately encode as packed + * blocks. + *
    + * If frequencies are not omitted, PackedFreqBlock will be generated without d-gap step. + *
  • VIntBlock stores remaining d-gaps (along with frequencies when possible) with a + * format that encodes DocDelta and Freq: + *

    DocDelta: if frequencies are indexed, this determines both the document number and + * the frequency. In particular, DocDelta/2 is the difference between this document + * number and the previous document number (or zero when this is the first document in a + * TermFreqs). When DocDelta is odd, the frequency is one. When DocDelta is even, the + * frequency is read as another VInt. If frequencies are omitted, DocDelta contains the + * gap (not multiplied by 2) between document numbers and no frequency information is + * stored. + *

    For example, the TermFreqs for a term which occurs once in document seven and + * three times in document eleven, with frequencies indexed, would be the following + * sequence of VInts: + *

    15, 8, 3 + *

    If frequencies were omitted ({@link IndexOptions#DOCS}) it would be this sequence + * of VInts instead: + *

    7,4 + *

  • PackedDocBlockNum is the number of packed blocks for current term's docids or + * frequencies. In particular, PackedDocBlockNum = floor(DocFreq/PackedBlockSize) + *
  • TrimmedDocFreq = DocFreq % PackedBlockSize == 0 ? DocFreq - 1 : DocFreq. We use this + * trick since the definition of skip entry is a little different from base interface. + * In {@link MultiLevelSkipListWriter}, skip data is assumed to be saved for + * skipIntervalth, 2*skipIntervalth ... posting in the list. + * However, in Lucene90PostingsFormat, the skip data is saved for + * skipInterval+1th, 2*skipInterval+1th ... posting + * (skipInterval==PackedBlockSize in this case). When DocFreq is multiple of + * PackedBlockSize, MultiLevelSkipListWriter will expect one more skip data than + * Lucene90SkipWriter. + *
  • SkipDatum is the metadata of one skip entry. For the first block (no matter packed or + * VInt), it is omitted. + *
  • DocSkip records the document number of every PackedBlockSizeth document + * number in the postings (i.e. last document number in each packed block). On disk it + * is stored as the difference from previous value in the sequence. + *
  • DocFPSkip records the file offsets of each block (excluding )posting at + * PackedBlockSize+1th, 2*PackedBlockSize+1th ... , in DocFile. + * The file offsets are relative to the start of current term's TermFreqs. On disk it is + * also stored as the difference from previous SkipDatum in the sequence. + *
  • Since positions and payloads are also block encoded, the skip should skip to related + * block first, then fetch the values according to in-block offset. PosFPSkip and + * PayFPSkip record the file offsets of related block in .pos and .pay, respectively. + * While PosBlockOffset indicates which value to fetch inside the related block + * (PayBlockOffset is unnecessary since it is always equal to PosBlockOffset). Same as + * DocFPSkip, the file offsets are relative to the start of current term's TermFreqs, + * and stored as a difference sequence. + *
  • PayByteUpto indicates the start offset of the current payload. It is equivalent to + * the sum of the payload lengths in the current block up to PosBlockOffset + *
  • ImpactLength is the total length of CompetitiveFreqDelta and CompetitiveNormDelta + * pairs. CompetitiveFreqDelta and CompetitiveNormDelta are used to safely skip score + * calculation for uncompetitive documents; See {@link + * org.apache.lucene.codecs.CompetitiveImpactAccumulator} for more details. + *
+ *
+ * + * + * + *
+ *
Positions + *

The .pos file contains the lists of positions that each term occurs at within documents. + * It also sometimes stores part of payloads and offsets for speedup. + *

    + *
  • PosFile(.pos) --> Header, <TermPositions> TermCount, Footer + *
  • Header --> {@link CodecUtil#writeIndexHeader IndexHeader} + *
  • TermPositions --> <PackedPosDeltaBlock> PackedPosBlockNum, + * VIntBlock? + *
  • VIntBlock --> <PositionDelta[, PayloadLength?], PayloadData?, OffsetDelta?, + * OffsetLength?>PosVIntCount + *
  • PackedPosDeltaBlock --> {@link PackedInts PackedInts} + *
  • PositionDelta, OffsetDelta, OffsetLength --> {@link DataOutput#writeVInt VInt} + *
  • PayloadData --> {@link DataOutput#writeByte byte}PayLength + *
  • Footer --> {@link CodecUtil#writeFooter CodecFooter} + *
+ *

Notes: + *

    + *
  • TermPositions are order by term (terms are implicit, from the term dictionary), and + * position values for each term document pair are incremental, and ordered by document + * number. + *
  • PackedPosBlockNum is the number of packed blocks for current term's positions, + * payloads or offsets. In particular, PackedPosBlockNum = + * floor(totalTermFreq/PackedBlockSize) + *
  • PosVIntCount is the number of positions encoded as VInt format. In particular, + * PosVIntCount = totalTermFreq - PackedPosBlockNum*PackedBlockSize + *
  • The procedure how PackedPosDeltaBlock is generated is the same as PackedDocDeltaBlock + * in chapter Frequencies and Skip Data. + *
  • PositionDelta is, if payloads are disabled for the term's field, the difference + * between the position of the current occurrence in the document and the previous + * occurrence (or zero, if this is the first occurrence in this document). If payloads + * are enabled for the term's field, then PositionDelta/2 is the difference between the + * current and the previous position. If payloads are enabled and PositionDelta is odd, + * then PayloadLength is stored, indicating the length of the payload at the current + * term position. + *
  • For example, the TermPositions for a term which occurs as the fourth term in one + * document, and as the fifth and ninth term in a subsequent document, would be the + * following sequence of VInts (payloads disabled): + *

    4, 5, 4 + *

  • PayloadData is metadata associated with the current term position. If PayloadLength + * is stored at the current position, then it indicates the length of this payload. If + * PayloadLength is not stored, then this payload has the same length as the payload at + * the previous position. + *
  • OffsetDelta/2 is the difference between this position's startOffset from the previous + * occurrence (or zero, if this is the first occurrence in this document). If + * OffsetDelta is odd, then the length (endOffset-startOffset) differs from the previous + * occurrence and an OffsetLength follows. Offset data is only written for {@link + * IndexOptions#DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS}. + *
+ *
+ * + * + * + *
+ *
Payloads and Offsets + *

The .pay file will store payloads and offsets associated with certain term-document + * positions. Some payloads and offsets will be separated out into .pos file, for performance + * reasons. + *

    + *
  • PayFile(.pay): --> Header, <TermPayloads?, TermOffsets?> + * TermCount, Footer + *
  • Header --> {@link CodecUtil#writeIndexHeader IndexHeader} + *
  • TermPayloads --> <PackedPayLengthBlock, SumPayLength, PayData> + * PackedPayBlockNum + *
  • TermOffsets --> <PackedOffsetStartDeltaBlock, PackedOffsetLengthBlock> + * PackedPayBlockNum + *
  • PackedPayLengthBlock, PackedOffsetStartDeltaBlock, PackedOffsetLengthBlock --> + * {@link PackedInts PackedInts} + *
  • SumPayLength --> {@link DataOutput#writeVInt VInt} + *
  • PayData --> {@link DataOutput#writeByte byte}SumPayLength + *
  • Footer --> {@link CodecUtil#writeFooter CodecFooter} + *
+ *

Notes: + *

    + *
  • The order of TermPayloads/TermOffsets will be the same as TermPositions, note that + * part of payload/offsets are stored in .pos. + *
  • The procedure how PackedPayLengthBlock and PackedOffsetLengthBlock are generated is + * the same as PackedFreqBlock in chapter Frequencies and Skip + * Data. While PackedStartDeltaBlock follows a same procedure as + * PackedDocDeltaBlock. + *
  • PackedPayBlockNum is always equal to PackedPosBlockNum, for the same term. It is also + * synonym for PackedOffsetBlockNum. + *
  • SumPayLength is the total length of payloads written within one block, should be the + * sum of PayLengths in one packed block. + *
  • PayLength in PackedPayLengthBlock is the length of each payload associated with the + * current position. + *
+ *
+ * + */ +public final class ES812PostingsFormat extends PostingsFormat { + + /** + * Filename extension for document number, frequencies, and skip data. See chapter: Frequencies and Skip Data + */ + public static final String DOC_EXTENSION = "doc"; + + /** Filename extension for positions. See chapter: Positions */ + public static final String POS_EXTENSION = "pos"; + + /** + * Filename extension for payloads and offsets. See chapter: Payloads and + * Offsets + */ + public static final String PAY_EXTENSION = "pay"; + + /** Size of blocks. */ + public static final int BLOCK_SIZE = ForUtil.BLOCK_SIZE; + + /** + * Expert: The maximum number of skip levels. Smaller values result in slightly smaller indexes, + * but slower skipping in big posting lists. + */ + static final int MAX_SKIP_LEVELS = 10; + + static final String CODEC_NAME = "ES812Postings"; + static final String TERMS_CODEC = "ES812PostingsWriterTerms"; + static final String DOC_CODEC = "ES812PostingsWriterDoc"; + static final String POS_CODEC = "ES812PostingsWriterPos"; + static final String PAY_CODEC = "ES812PostingsWriterPay"; + + // Increment version to change it + static final int VERSION_START = 0; + static final int VERSION_CURRENT = VERSION_START; + + /** Creates read-only {@code ES812PostingsFormat}. */ + public ES812PostingsFormat() { + super(CODEC_NAME); + } + + @Override + public String toString() { + return getName(); + } + + @Override + public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException { + PostingsWriterBase postingsWriter = new ES812PostingsWriter(state); + boolean success = false; + try { + FieldsConsumer ret = new Lucene90BlockTreeTermsWriter( + state, + postingsWriter, + Lucene90BlockTreeTermsWriter.DEFAULT_MIN_BLOCK_SIZE, + Lucene90BlockTreeTermsWriter.DEFAULT_MAX_BLOCK_SIZE + ); + success = true; + return ret; + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(postingsWriter); + } + } + } + + @Override + public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { + PostingsReaderBase postingsReader = new ES812PostingsReader(state); + boolean success = false; + try { + FieldsProducer ret = new Lucene90BlockTreeTermsReader(postingsReader, state); + success = true; + return ret; + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(postingsReader); + } + } + } + + /** + * Holds all state required for {@link ES812PostingsReader} to produce a {@link + * org.apache.lucene.index.PostingsEnum} without re-seeking the terms dict. + * + */ + public static final class IntBlockTermState extends BlockTermState { + /** file pointer to the start of the doc ids enumeration, in {@link #DOC_EXTENSION} file */ + public long docStartFP; + + /** file pointer to the start of the positions enumeration, in {@link #POS_EXTENSION} file */ + public long posStartFP; + + /** file pointer to the start of the payloads enumeration, in {@link #PAY_EXTENSION} file */ + public long payStartFP; + + /** + * file offset for the start of the skip list, relative to docStartFP, if there are more than + * {@link ForUtil#BLOCK_SIZE} docs; otherwise -1 + */ + public long skipOffset; + + /** + * file offset for the last position in the last block, if there are more than {@link + * ForUtil#BLOCK_SIZE} positions; otherwise -1 + * + *

One might think to use total term frequency to track how many positions are left to read + * as we decode the blocks, and decode the last block differently when num_left_positions < + * BLOCK_SIZE. Unfortunately this won't work since the tracking will be messed up when we skip + * blocks as the skipper will only tell us new position offset (start of block) and number of + * positions to skip for that block, without telling us how many positions it has skipped. + */ + public long lastPosBlockOffset; + + /** + * docid when there is a single pulsed posting, otherwise -1. freq is always implicitly + * totalTermFreq in this case. + */ + public int singletonDocID; + + /** Sole constructor. */ + public IntBlockTermState() { + skipOffset = -1; + lastPosBlockOffset = -1; + singletonDocID = -1; + } + + @Override + public IntBlockTermState clone() { + IntBlockTermState other = new IntBlockTermState(); + other.copyFrom(this); + return other; + } + + @Override + public void copyFrom(TermState _other) { + super.copyFrom(_other); + IntBlockTermState other = (IntBlockTermState) _other; + docStartFP = other.docStartFP; + posStartFP = other.posStartFP; + payStartFP = other.payStartFP; + lastPosBlockOffset = other.lastPosBlockOffset; + skipOffset = other.skipOffset; + singletonDocID = other.singletonDocID; + } + + @Override + public String toString() { + return super.toString() + + " docStartFP=" + + docStartFP + + " posStartFP=" + + posStartFP + + " payStartFP=" + + payStartFP + + " lastPosBlockOffset=" + + lastPosBlockOffset + + " singletonDocID=" + + singletonDocID; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsReader.java b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsReader.java new file mode 100644 index 0000000000000..8b3d5d02a04c0 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsReader.java @@ -0,0 +1,1990 @@ +/* + * @notice + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Modifications copyright (C) 2022 Elasticsearch B.V. + */ +package org.elasticsearch.index.codec.postings; + +import org.apache.lucene.codecs.BlockTermState; +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.codecs.PostingsReaderBase; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.Impacts; +import org.apache.lucene.index.ImpactsEnum; +import org.apache.lucene.index.IndexFileNames; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.PostingsEnum; +import org.apache.lucene.index.SegmentReadState; +import org.apache.lucene.index.SlowImpactsEnum; +import org.apache.lucene.store.DataInput; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.util.ArrayUtil; +import org.apache.lucene.util.BitUtil; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.core.IOUtils; +import org.elasticsearch.index.codec.postings.ES812PostingsFormat.IntBlockTermState; + +import java.io.IOException; +import java.util.Arrays; + +import static org.elasticsearch.index.codec.postings.ES812PostingsFormat.DOC_CODEC; +import static org.elasticsearch.index.codec.postings.ES812PostingsFormat.MAX_SKIP_LEVELS; +import static org.elasticsearch.index.codec.postings.ES812PostingsFormat.PAY_CODEC; +import static org.elasticsearch.index.codec.postings.ES812PostingsFormat.POS_CODEC; +import static org.elasticsearch.index.codec.postings.ES812PostingsFormat.TERMS_CODEC; +import static org.elasticsearch.index.codec.postings.ES812PostingsFormat.VERSION_CURRENT; +import static org.elasticsearch.index.codec.postings.ES812PostingsFormat.VERSION_START; +import static org.elasticsearch.index.codec.postings.ForUtil.BLOCK_SIZE; + +/** + * Concrete class that reads docId(maybe frq,pos,offset,payloads) list with postings format. + * + */ +final class ES812PostingsReader extends PostingsReaderBase { + + private final IndexInput docIn; + private final IndexInput posIn; + private final IndexInput payIn; + + private final int version; + + /** Sole constructor. */ + ES812PostingsReader(SegmentReadState state) throws IOException { + boolean success = false; + IndexInput docIn = null; + IndexInput posIn = null; + IndexInput payIn = null; + + // NOTE: these data files are too costly to verify checksum against all the bytes on open, + // but for now we at least verify proper structure of the checksum footer: which looks + // for FOOTER_MAGIC + algorithmID. This is cheap and can detect some forms of corruption + // such as file truncation. + + String docName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, ES812PostingsFormat.DOC_EXTENSION); + try { + docIn = state.directory.openInput(docName, state.context); + version = CodecUtil.checkIndexHeader( + docIn, + DOC_CODEC, + VERSION_START, + VERSION_CURRENT, + state.segmentInfo.getId(), + state.segmentSuffix + ); + CodecUtil.retrieveChecksum(docIn); + + if (state.fieldInfos.hasProx()) { + String proxName = IndexFileNames.segmentFileName( + state.segmentInfo.name, + state.segmentSuffix, + ES812PostingsFormat.POS_EXTENSION + ); + posIn = state.directory.openInput(proxName, state.context); + CodecUtil.checkIndexHeader(posIn, POS_CODEC, version, version, state.segmentInfo.getId(), state.segmentSuffix); + CodecUtil.retrieveChecksum(posIn); + + if (state.fieldInfos.hasPayloads() || state.fieldInfos.hasOffsets()) { + String payName = IndexFileNames.segmentFileName( + state.segmentInfo.name, + state.segmentSuffix, + ES812PostingsFormat.PAY_EXTENSION + ); + payIn = state.directory.openInput(payName, state.context); + CodecUtil.checkIndexHeader(payIn, PAY_CODEC, version, version, state.segmentInfo.getId(), state.segmentSuffix); + CodecUtil.retrieveChecksum(payIn); + } + } + + this.docIn = docIn; + this.posIn = posIn; + this.payIn = payIn; + success = true; + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(docIn, posIn, payIn); + } + } + } + + @Override + public void init(IndexInput termsIn, SegmentReadState state) throws IOException { + // Make sure we are talking to the matching postings writer + CodecUtil.checkIndexHeader(termsIn, TERMS_CODEC, VERSION_START, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix); + final int indexBlockSize = termsIn.readVInt(); + if (indexBlockSize != BLOCK_SIZE) { + throw new IllegalStateException("index-time BLOCK_SIZE (" + indexBlockSize + ") != read-time BLOCK_SIZE (" + BLOCK_SIZE + ")"); + } + } + + /** Read values that have been written using variable-length encoding instead of bit-packing. */ + static void readVIntBlock(IndexInput docIn, long[] docBuffer, long[] freqBuffer, int num, boolean indexHasFreq) throws IOException { + if (indexHasFreq) { + for (int i = 0; i < num; i++) { + final int code = docIn.readVInt(); + docBuffer[i] = code >>> 1; + if ((code & 1) != 0) { + freqBuffer[i] = 1; + } else { + freqBuffer[i] = docIn.readVInt(); + } + } + } else { + for (int i = 0; i < num; i++) { + docBuffer[i] = docIn.readVInt(); + } + } + } + + static void prefixSum(long[] buffer, int count, long base) { + buffer[0] += base; + for (int i = 1; i < count; ++i) { + buffer[i] += buffer[i - 1]; + } + } + + static int findFirstGreater(long[] buffer, int target, int from) { + for (int i = from; i < BLOCK_SIZE; ++i) { + if (buffer[i] >= target) { + return i; + } + } + return BLOCK_SIZE; + } + + @Override + public BlockTermState newTermState() { + return new IntBlockTermState(); + } + + @Override + public void close() throws IOException { + IOUtils.close(docIn, posIn, payIn); + } + + @Override + public void decodeTerm(DataInput in, FieldInfo fieldInfo, BlockTermState _termState, boolean absolute) throws IOException { + final IntBlockTermState termState = (IntBlockTermState) _termState; + final boolean fieldHasPositions = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0; + final boolean fieldHasOffsets = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0; + final boolean fieldHasPayloads = fieldInfo.hasPayloads(); + + if (absolute) { + termState.docStartFP = 0; + termState.posStartFP = 0; + termState.payStartFP = 0; + } + + final long l = in.readVLong(); + if ((l & 0x01) == 0) { + termState.docStartFP += l >>> 1; + if (termState.docFreq == 1) { + termState.singletonDocID = in.readVInt(); + } else { + termState.singletonDocID = -1; + } + } else { + assert absolute == false; + assert termState.singletonDocID != -1; + termState.singletonDocID += (int) BitUtil.zigZagDecode(l >>> 1); + } + + if (fieldHasPositions) { + termState.posStartFP += in.readVLong(); + if (fieldHasOffsets || fieldHasPayloads) { + termState.payStartFP += in.readVLong(); + } + if (termState.totalTermFreq > BLOCK_SIZE) { + termState.lastPosBlockOffset = in.readVLong(); + } else { + termState.lastPosBlockOffset = -1; + } + } + + if (termState.docFreq > BLOCK_SIZE) { + termState.skipOffset = in.readVLong(); + } else { + termState.skipOffset = -1; + } + } + + @Override + public PostingsEnum postings(FieldInfo fieldInfo, BlockTermState termState, PostingsEnum reuse, int flags) throws IOException { + + boolean indexHasPositions = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0; + + if (indexHasPositions == false || PostingsEnum.featureRequested(flags, PostingsEnum.POSITIONS) == false) { + BlockDocsEnum docsEnum; + if (reuse instanceof BlockDocsEnum) { + docsEnum = (BlockDocsEnum) reuse; + if (docsEnum.canReuse(docIn, fieldInfo) == false) { + docsEnum = new BlockDocsEnum(fieldInfo); + } + } else { + docsEnum = new BlockDocsEnum(fieldInfo); + } + return docsEnum.reset((IntBlockTermState) termState, flags); + } else { + EverythingEnum everythingEnum; + if (reuse instanceof EverythingEnum) { + everythingEnum = (EverythingEnum) reuse; + if (everythingEnum.canReuse(docIn, fieldInfo) == false) { + everythingEnum = new EverythingEnum(fieldInfo); + } + } else { + everythingEnum = new EverythingEnum(fieldInfo); + } + return everythingEnum.reset((IntBlockTermState) termState, flags); + } + } + + @Override + public ImpactsEnum impacts(FieldInfo fieldInfo, BlockTermState state, int flags) throws IOException { + if (state.docFreq <= BLOCK_SIZE) { + // no skip data + return new SlowImpactsEnum(postings(fieldInfo, state, null, flags)); + } + + final boolean indexHasPositions = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0; + final boolean indexHasOffsets = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0; + final boolean indexHasPayloads = fieldInfo.hasPayloads(); + + if (indexHasPositions == false || PostingsEnum.featureRequested(flags, PostingsEnum.POSITIONS) == false) { + return new BlockImpactsDocsEnum(fieldInfo, (IntBlockTermState) state); + } + + if (indexHasPositions + && PostingsEnum.featureRequested(flags, PostingsEnum.POSITIONS) + && (indexHasOffsets == false || PostingsEnum.featureRequested(flags, PostingsEnum.OFFSETS) == false) + && (indexHasPayloads == false || PostingsEnum.featureRequested(flags, PostingsEnum.PAYLOADS) == false)) { + return new BlockImpactsPostingsEnum(fieldInfo, (IntBlockTermState) state); + } + + return new BlockImpactsEverythingEnum(fieldInfo, (IntBlockTermState) state, flags); + } + + final class BlockDocsEnum extends PostingsEnum { + + final PForUtil pforUtil = new PForUtil(new ForUtil()); + + private final long[] docBuffer = new long[BLOCK_SIZE + 1]; + private final long[] freqBuffer = new long[BLOCK_SIZE]; + + private int docBufferUpto; + + private ES812SkipReader skipper; + private boolean skipped; + + final IndexInput startDocIn; + + IndexInput docIn; + final boolean indexHasFreq; + final boolean indexHasPos; + final boolean indexHasOffsets; + final boolean indexHasPayloads; + + private int docFreq; // number of docs in this posting list + private long totalTermFreq; // sum of freqBuffer in this posting list (or docFreq when omitted) + private int blockUpto; // number of docs in or before the current block + private int doc; // doc we last read + private long accum; // accumulator for doc deltas + + // Where this term's postings start in the .doc file: + private long docTermStartFP; + + // Where this term's skip data starts (after + // docTermStartFP) in the .doc file (or -1 if there is + // no skip data for this term): + private long skipOffset; + + // docID for next skip point, we won't use skipper if + // target docID is not larger than this + private int nextSkipDoc; + + private boolean needsFreq; // true if the caller actually needs frequencies + // as we read freqBuffer lazily, isFreqsRead shows if freqBuffer are read for the current block + // always true when we don't have freqBuffer (indexHasFreq=false) or don't need freqBuffer + // (needsFreq=false) + private boolean isFreqsRead; + private int singletonDocID; // docid when there is a single pulsed posting, otherwise -1 + + BlockDocsEnum(FieldInfo fieldInfo) throws IOException { + this.startDocIn = ES812PostingsReader.this.docIn; + this.docIn = null; + indexHasFreq = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) >= 0; + indexHasPos = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0; + indexHasOffsets = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0; + indexHasPayloads = fieldInfo.hasPayloads(); + // We set the last element of docBuffer to NO_MORE_DOCS, it helps save conditionals in + // advance() + docBuffer[BLOCK_SIZE] = NO_MORE_DOCS; + } + + public boolean canReuse(IndexInput docIn, FieldInfo fieldInfo) { + return docIn == startDocIn + && indexHasFreq == (fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) >= 0) + && indexHasPos == (fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0) + && indexHasPayloads == fieldInfo.hasPayloads(); + } + + public PostingsEnum reset(IntBlockTermState termState, int flags) throws IOException { + docFreq = termState.docFreq; + totalTermFreq = indexHasFreq ? termState.totalTermFreq : docFreq; + docTermStartFP = termState.docStartFP; + skipOffset = termState.skipOffset; + singletonDocID = termState.singletonDocID; + if (docFreq > 1) { + if (docIn == null) { + // lazy init + docIn = startDocIn.clone(); + } + docIn.seek(docTermStartFP); + } + + doc = -1; + this.needsFreq = PostingsEnum.featureRequested(flags, PostingsEnum.FREQS); + this.isFreqsRead = true; + if (indexHasFreq == false || needsFreq == false) { + for (int i = 0; i < ForUtil.BLOCK_SIZE; ++i) { + freqBuffer[i] = 1; + } + } + accum = 0; + blockUpto = 0; + nextSkipDoc = BLOCK_SIZE - 1; // we won't skip if target is found in first block + docBufferUpto = BLOCK_SIZE; + skipped = false; + return this; + } + + @Override + public int freq() throws IOException { + if (isFreqsRead == false) { + pforUtil.decode(docIn, freqBuffer); // read freqBuffer for this block + isFreqsRead = true; + } + return (int) freqBuffer[docBufferUpto - 1]; + } + + @Override + public int nextPosition() throws IOException { + return -1; + } + + @Override + public int startOffset() throws IOException { + return -1; + } + + @Override + public int endOffset() throws IOException { + return -1; + } + + @Override + public BytesRef getPayload() throws IOException { + return null; + } + + @Override + public int docID() { + return doc; + } + + private void refillDocs() throws IOException { + // Check if we skipped reading the previous block of freqBuffer, and if yes, position docIn + // after it + if (isFreqsRead == false) { + pforUtil.skip(docIn); + isFreqsRead = true; + } + + final int left = docFreq - blockUpto; + assert left >= 0; + + if (left >= BLOCK_SIZE) { + pforUtil.decodeAndPrefixSum(docIn, accum, docBuffer); + + if (indexHasFreq) { + if (needsFreq) { + isFreqsRead = false; + } else { + pforUtil.skip(docIn); // skip over freqBuffer if we don't need them at all + } + } + blockUpto += BLOCK_SIZE; + } else if (docFreq == 1) { + docBuffer[0] = singletonDocID; + freqBuffer[0] = totalTermFreq; + docBuffer[1] = NO_MORE_DOCS; + blockUpto++; + } else { + // Read vInts: + readVIntBlock(docIn, docBuffer, freqBuffer, left, indexHasFreq); + prefixSum(docBuffer, left, accum); + docBuffer[left] = NO_MORE_DOCS; + blockUpto += left; + } + accum = docBuffer[BLOCK_SIZE - 1]; + docBufferUpto = 0; + assert docBuffer[BLOCK_SIZE] == NO_MORE_DOCS; + } + + @Override + public int nextDoc() throws IOException { + if (docBufferUpto == BLOCK_SIZE) { + refillDocs(); // we don't need to load freqBuffer for now (will be loaded later if + // necessary) + } + + doc = (int) docBuffer[docBufferUpto]; + docBufferUpto++; + return doc; + } + + @Override + public int advance(int target) throws IOException { + // current skip docID < docIDs generated from current buffer <= next skip docID + // we don't need to skip if target is buffered already + if (docFreq > BLOCK_SIZE && target > nextSkipDoc) { + + if (skipper == null) { + // Lazy init: first time this enum has ever been used for skipping + skipper = new ES812SkipReader(docIn.clone(), MAX_SKIP_LEVELS, indexHasPos, indexHasOffsets, indexHasPayloads); + } + + if (skipped == false) { + assert skipOffset != -1; + // This is the first time this enum has skipped + // since reset() was called; load the skip data: + skipper.init(docTermStartFP + skipOffset, docTermStartFP, 0, 0, docFreq); + skipped = true; + } + + // always plus one to fix the result, since skip position in Lucene90SkipReader + // is a little different from MultiLevelSkipListReader + final int newDocUpto = skipper.skipTo(target) + 1; + + if (newDocUpto >= blockUpto) { + // Skipper moved + assert newDocUpto % BLOCK_SIZE == 0 : "got " + newDocUpto; + blockUpto = newDocUpto; + + // Force to read next block + docBufferUpto = BLOCK_SIZE; + accum = skipper.getDoc(); // actually, this is just lastSkipEntry + docIn.seek(skipper.getDocPointer()); // now point to the block we want to search + // even if freqBuffer were not read from the previous block, we will mark them as read, + // as we don't need to skip the previous block freqBuffer in refillDocs, + // as we have already positioned docIn where in needs to be. + isFreqsRead = true; + } + // next time we call advance, this is used to + // foresee whether skipper is necessary. + nextSkipDoc = skipper.getNextSkipDoc(); + } + if (docBufferUpto == BLOCK_SIZE) { + refillDocs(); + } + + // Now scan... this is an inlined/pared down version + // of nextDoc(): + long doc; + while (true) { + doc = docBuffer[docBufferUpto]; + + if (doc >= target) { + break; + } + ++docBufferUpto; + } + + docBufferUpto++; + return this.doc = (int) doc; + } + + @Override + public long cost() { + return docFreq; + } + } + + // Also handles payloads + offsets + final class EverythingEnum extends PostingsEnum { + + final PForUtil pforUtil = new PForUtil(new ForUtil()); + + private final long[] docBuffer = new long[BLOCK_SIZE + 1]; + private final long[] freqBuffer = new long[BLOCK_SIZE + 1]; + private final long[] posDeltaBuffer = new long[BLOCK_SIZE]; + + private final long[] payloadLengthBuffer; + private final long[] offsetStartDeltaBuffer; + private final long[] offsetLengthBuffer; + + private byte[] payloadBytes; + private int payloadByteUpto; + private int payloadLength; + + private int lastStartOffset; + private int startOffset; + private int endOffset; + + private int docBufferUpto; + private int posBufferUpto; + + private ES812SkipReader skipper; + private boolean skipped; + + final IndexInput startDocIn; + + IndexInput docIn; + final IndexInput posIn; + final IndexInput payIn; + final BytesRef payload; + + final boolean indexHasOffsets; + final boolean indexHasPayloads; + + private int docFreq; // number of docs in this posting list + private long totalTermFreq; // number of positions in this posting list + private int blockUpto; // number of docs in or before the current block + private int doc; // doc we last read + private long accum; // accumulator for doc deltas + private int freq; // freq we last read + private int position; // current position + + // how many positions "behind" we are; nextPosition must + // skip these to "catch up": + private int posPendingCount; + + // Lazy pos seek: if != -1 then we must seek to this FP + // before reading positions: + private long posPendingFP; + + // Lazy pay seek: if != -1 then we must seek to this FP + // before reading payloads/offsets: + private long payPendingFP; + + // Where this term's postings start in the .doc file: + private long docTermStartFP; + + // Where this term's postings start in the .pos file: + private long posTermStartFP; + + // Where this term's payloads/offsets start in the .pay + // file: + private long payTermStartFP; + + // File pointer where the last (vInt encoded) pos delta + // block is. We need this to know whether to bulk + // decode vs vInt decode the block: + private long lastPosBlockFP; + + // Where this term's skip data starts (after + // docTermStartFP) in the .doc file (or -1 if there is + // no skip data for this term): + private long skipOffset; + + private int nextSkipDoc; + + private boolean needsOffsets; // true if we actually need offsets + private boolean needsPayloads; // true if we actually need payloads + private int singletonDocID; // docid when there is a single pulsed posting, otherwise -1 + + EverythingEnum(FieldInfo fieldInfo) throws IOException { + indexHasOffsets = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0; + indexHasPayloads = fieldInfo.hasPayloads(); + + this.startDocIn = ES812PostingsReader.this.docIn; + this.docIn = null; + this.posIn = ES812PostingsReader.this.posIn.clone(); + if (indexHasOffsets || indexHasPayloads) { + this.payIn = ES812PostingsReader.this.payIn.clone(); + } else { + this.payIn = null; + } + if (indexHasOffsets) { + offsetStartDeltaBuffer = new long[BLOCK_SIZE]; + offsetLengthBuffer = new long[BLOCK_SIZE]; + } else { + offsetStartDeltaBuffer = null; + offsetLengthBuffer = null; + startOffset = -1; + endOffset = -1; + } + + if (indexHasPayloads) { + payloadLengthBuffer = new long[BLOCK_SIZE]; + payloadBytes = new byte[128]; + payload = new BytesRef(); + } else { + payloadLengthBuffer = null; + payloadBytes = null; + payload = null; + } + + // We set the last element of docBuffer to NO_MORE_DOCS, it helps save conditionals in + // advance() + docBuffer[BLOCK_SIZE] = NO_MORE_DOCS; + } + + public boolean canReuse(IndexInput docIn, FieldInfo fieldInfo) { + return docIn == startDocIn + && indexHasOffsets == (fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0) + && indexHasPayloads == fieldInfo.hasPayloads(); + } + + public EverythingEnum reset(IntBlockTermState termState, int flags) throws IOException { + docFreq = termState.docFreq; + docTermStartFP = termState.docStartFP; + posTermStartFP = termState.posStartFP; + payTermStartFP = termState.payStartFP; + skipOffset = termState.skipOffset; + totalTermFreq = termState.totalTermFreq; + singletonDocID = termState.singletonDocID; + if (docFreq > 1) { + if (docIn == null) { + // lazy init + docIn = startDocIn.clone(); + } + docIn.seek(docTermStartFP); + } + posPendingFP = posTermStartFP; + payPendingFP = payTermStartFP; + posPendingCount = 0; + if (termState.totalTermFreq < BLOCK_SIZE) { + lastPosBlockFP = posTermStartFP; + } else if (termState.totalTermFreq == BLOCK_SIZE) { + lastPosBlockFP = -1; + } else { + lastPosBlockFP = posTermStartFP + termState.lastPosBlockOffset; + } + + this.needsOffsets = PostingsEnum.featureRequested(flags, PostingsEnum.OFFSETS); + this.needsPayloads = PostingsEnum.featureRequested(flags, PostingsEnum.PAYLOADS); + + doc = -1; + accum = 0; + blockUpto = 0; + if (docFreq > BLOCK_SIZE) { + nextSkipDoc = BLOCK_SIZE - 1; // we won't skip if target is found in first block + } else { + nextSkipDoc = NO_MORE_DOCS; // not enough docs for skipping + } + docBufferUpto = BLOCK_SIZE; + skipped = false; + return this; + } + + @Override + public int freq() throws IOException { + return freq; + } + + @Override + public int docID() { + return doc; + } + + private void refillDocs() throws IOException { + final int left = docFreq - blockUpto; + assert left >= 0; + + if (left >= BLOCK_SIZE) { + pforUtil.decodeAndPrefixSum(docIn, accum, docBuffer); + pforUtil.decode(docIn, freqBuffer); + blockUpto += BLOCK_SIZE; + } else if (docFreq == 1) { + docBuffer[0] = singletonDocID; + freqBuffer[0] = totalTermFreq; + docBuffer[1] = NO_MORE_DOCS; + blockUpto++; + } else { + readVIntBlock(docIn, docBuffer, freqBuffer, left, true); + prefixSum(docBuffer, left, accum); + docBuffer[left] = NO_MORE_DOCS; + blockUpto += left; + } + accum = docBuffer[BLOCK_SIZE - 1]; + docBufferUpto = 0; + assert docBuffer[BLOCK_SIZE] == NO_MORE_DOCS; + } + + private void refillPositions() throws IOException { + if (posIn.getFilePointer() == lastPosBlockFP) { + final int count = (int) (totalTermFreq % BLOCK_SIZE); + int payloadLength = 0; + int offsetLength = 0; + payloadByteUpto = 0; + for (int i = 0; i < count; i++) { + int code = posIn.readVInt(); + if (indexHasPayloads) { + if ((code & 1) != 0) { + payloadLength = posIn.readVInt(); + } + payloadLengthBuffer[i] = payloadLength; + posDeltaBuffer[i] = code >>> 1; + if (payloadLength != 0) { + if (payloadByteUpto + payloadLength > payloadBytes.length) { + payloadBytes = ArrayUtil.grow(payloadBytes, payloadByteUpto + payloadLength); + } + posIn.readBytes(payloadBytes, payloadByteUpto, payloadLength); + payloadByteUpto += payloadLength; + } + } else { + posDeltaBuffer[i] = code; + } + + if (indexHasOffsets) { + int deltaCode = posIn.readVInt(); + if ((deltaCode & 1) != 0) { + offsetLength = posIn.readVInt(); + } + offsetStartDeltaBuffer[i] = deltaCode >>> 1; + offsetLengthBuffer[i] = offsetLength; + } + } + payloadByteUpto = 0; + } else { + pforUtil.decode(posIn, posDeltaBuffer); + + if (indexHasPayloads) { + if (needsPayloads) { + pforUtil.decode(payIn, payloadLengthBuffer); + int numBytes = payIn.readVInt(); + + if (numBytes > payloadBytes.length) { + payloadBytes = ArrayUtil.growNoCopy(payloadBytes, numBytes); + } + payIn.readBytes(payloadBytes, 0, numBytes); + } else { + // this works, because when writing a vint block we always force the first length to be + // written + pforUtil.skip(payIn); // skip over lengths + int numBytes = payIn.readVInt(); // read length of payloadBytes + payIn.seek(payIn.getFilePointer() + numBytes); // skip over payloadBytes + } + payloadByteUpto = 0; + } + + if (indexHasOffsets) { + if (needsOffsets) { + pforUtil.decode(payIn, offsetStartDeltaBuffer); + pforUtil.decode(payIn, offsetLengthBuffer); + } else { + // this works, because when writing a vint block we always force the first length to be + // written + pforUtil.skip(payIn); // skip over starts + pforUtil.skip(payIn); // skip over lengths + } + } + } + } + + @Override + public int nextDoc() throws IOException { + if (docBufferUpto == BLOCK_SIZE) { + refillDocs(); + } + + doc = (int) docBuffer[docBufferUpto]; + freq = (int) freqBuffer[docBufferUpto]; + posPendingCount += freq; + docBufferUpto++; + + position = 0; + lastStartOffset = 0; + return doc; + } + + @Override + public int advance(int target) throws IOException { + if (target > nextSkipDoc) { + if (skipper == null) { + // Lazy init: first time this enum has ever been used for skipping + skipper = new ES812SkipReader(docIn.clone(), MAX_SKIP_LEVELS, true, indexHasOffsets, indexHasPayloads); + } + + if (skipped == false) { + assert skipOffset != -1; + // This is the first time this enum has skipped + // since reset() was called; load the skip data: + skipper.init(docTermStartFP + skipOffset, docTermStartFP, posTermStartFP, payTermStartFP, docFreq); + skipped = true; + } + + final int newDocUpto = skipper.skipTo(target) + 1; + + if (newDocUpto > blockUpto - BLOCK_SIZE + docBufferUpto) { + // Skipper moved + assert newDocUpto % BLOCK_SIZE == 0 : "got " + newDocUpto; + blockUpto = newDocUpto; + + // Force to read next block + docBufferUpto = BLOCK_SIZE; + accum = skipper.getDoc(); + docIn.seek(skipper.getDocPointer()); + posPendingFP = skipper.getPosPointer(); + payPendingFP = skipper.getPayPointer(); + posPendingCount = skipper.getPosBufferUpto(); + lastStartOffset = 0; // new document + payloadByteUpto = skipper.getPayloadByteUpto(); + } + nextSkipDoc = skipper.getNextSkipDoc(); + } + if (docBufferUpto == BLOCK_SIZE) { + refillDocs(); + } + + // Now scan: + long doc; + while (true) { + doc = docBuffer[docBufferUpto]; + freq = (int) freqBuffer[docBufferUpto]; + posPendingCount += freq; + docBufferUpto++; + + if (doc >= target) { + break; + } + } + + position = 0; + lastStartOffset = 0; + return this.doc = (int) doc; + } + + // TODO: in theory we could avoid loading frq block + // when not needed, ie, use skip data to load how far to + // seek the pos pointer ... instead of having to load frq + // blocks only to sum up how many positions to skip + private void skipPositions() throws IOException { + // Skip positions now: + int toSkip = posPendingCount - freq; + // if (DEBUG) { + // System.out.println(" FPR.skipPositions: toSkip=" + toSkip); + // } + + final int leftInBlock = BLOCK_SIZE - posBufferUpto; + if (toSkip < leftInBlock) { + int end = posBufferUpto + toSkip; + while (posBufferUpto < end) { + if (indexHasPayloads) { + payloadByteUpto += (int) payloadLengthBuffer[posBufferUpto]; + } + posBufferUpto++; + } + } else { + toSkip -= leftInBlock; + while (toSkip >= BLOCK_SIZE) { + assert posIn.getFilePointer() != lastPosBlockFP; + pforUtil.skip(posIn); + + if (indexHasPayloads) { + // Skip payloadLength block: + pforUtil.skip(payIn); + + // Skip payloadBytes block: + int numBytes = payIn.readVInt(); + payIn.seek(payIn.getFilePointer() + numBytes); + } + + if (indexHasOffsets) { + pforUtil.skip(payIn); + pforUtil.skip(payIn); + } + toSkip -= BLOCK_SIZE; + } + refillPositions(); + payloadByteUpto = 0; + posBufferUpto = 0; + while (posBufferUpto < toSkip) { + if (indexHasPayloads) { + payloadByteUpto += (int) payloadLengthBuffer[posBufferUpto]; + } + posBufferUpto++; + } + } + + position = 0; + lastStartOffset = 0; + } + + @Override + public int nextPosition() throws IOException { + assert posPendingCount > 0; + + if (posPendingFP != -1) { + posIn.seek(posPendingFP); + posPendingFP = -1; + + if (payPendingFP != -1 && payIn != null) { + payIn.seek(payPendingFP); + payPendingFP = -1; + } + + // Force buffer refill: + posBufferUpto = BLOCK_SIZE; + } + + if (posPendingCount > freq) { + skipPositions(); + posPendingCount = freq; + } + + if (posBufferUpto == BLOCK_SIZE) { + refillPositions(); + posBufferUpto = 0; + } + position += (int) posDeltaBuffer[posBufferUpto]; + + if (indexHasPayloads) { + payloadLength = (int) payloadLengthBuffer[posBufferUpto]; + payload.bytes = payloadBytes; + payload.offset = payloadByteUpto; + payload.length = payloadLength; + payloadByteUpto += payloadLength; + } + + if (indexHasOffsets) { + startOffset = lastStartOffset + (int) offsetStartDeltaBuffer[posBufferUpto]; + endOffset = startOffset + (int) offsetLengthBuffer[posBufferUpto]; + lastStartOffset = startOffset; + } + + posBufferUpto++; + posPendingCount--; + return position; + } + + @Override + public int startOffset() { + return startOffset; + } + + @Override + public int endOffset() { + return endOffset; + } + + @Override + public BytesRef getPayload() { + if (payloadLength == 0) { + return null; + } else { + return payload; + } + } + + @Override + public long cost() { + return docFreq; + } + } + + final class BlockImpactsDocsEnum extends ImpactsEnum { + + final PForUtil pforUtil = new PForUtil(new ForUtil()); + + private final long[] docBuffer = new long[BLOCK_SIZE + 1]; + private final long[] freqBuffer = new long[BLOCK_SIZE]; + + private int docBufferUpto; + + private final ES812ScoreSkipReader skipper; + + final IndexInput docIn; + + final boolean indexHasFreqs; + + private int docFreq; // number of docs in this posting list + private int blockUpto; // number of documents in or before the current block + private int doc; // doc we last read + private long accum; // accumulator for doc deltas + + private int nextSkipDoc = -1; + + private long seekTo = -1; + + // as we read freqBuffer lazily, isFreqsRead shows if freqBuffer are read for the current block + // always true when we don't have freqBuffer (indexHasFreq=false) or don't need freqBuffer + // (needsFreq=false) + private boolean isFreqsRead; + + BlockImpactsDocsEnum(FieldInfo fieldInfo, IntBlockTermState termState) throws IOException { + indexHasFreqs = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) >= 0; + final boolean indexHasPositions = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0; + final boolean indexHasOffsets = fieldInfo.getIndexOptions() + .compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0; + final boolean indexHasPayloads = fieldInfo.hasPayloads(); + + this.docIn = ES812PostingsReader.this.docIn.clone(); + + docFreq = termState.docFreq; + docIn.seek(termState.docStartFP); + + doc = -1; + accum = 0; + blockUpto = 0; + docBufferUpto = BLOCK_SIZE; + + skipper = new ES812ScoreSkipReader(docIn.clone(), MAX_SKIP_LEVELS, indexHasPositions, indexHasOffsets, indexHasPayloads); + skipper.init( + termState.docStartFP + termState.skipOffset, + termState.docStartFP, + termState.posStartFP, + termState.payStartFP, + docFreq + ); + + // We set the last element of docBuffer to NO_MORE_DOCS, it helps save conditionals in + // advance() + docBuffer[BLOCK_SIZE] = NO_MORE_DOCS; + this.isFreqsRead = true; + if (indexHasFreqs == false) { + Arrays.fill(freqBuffer, 1L); + } + } + + @Override + public int freq() throws IOException { + if (isFreqsRead == false) { + pforUtil.decode(docIn, freqBuffer); // read freqBuffer for this block + isFreqsRead = true; + } + return (int) freqBuffer[docBufferUpto - 1]; + } + + @Override + public int docID() { + return doc; + } + + private void refillDocs() throws IOException { + // Check if we skipped reading the previous block of freqBuffer, and if yes, position docIn + // after it + if (isFreqsRead == false) { + pforUtil.skip(docIn); + isFreqsRead = true; + } + + final int left = docFreq - blockUpto; + assert left >= 0; + + if (left >= BLOCK_SIZE) { + pforUtil.decodeAndPrefixSum(docIn, accum, docBuffer); + if (indexHasFreqs) { + isFreqsRead = false; + } + blockUpto += BLOCK_SIZE; + } else { + readVIntBlock(docIn, docBuffer, freqBuffer, left, indexHasFreqs); + prefixSum(docBuffer, left, accum); + docBuffer[left] = NO_MORE_DOCS; + blockUpto += left; + } + accum = docBuffer[BLOCK_SIZE - 1]; + docBufferUpto = 0; + assert docBuffer[BLOCK_SIZE] == NO_MORE_DOCS; + } + + @Override + public void advanceShallow(int target) throws IOException { + if (target > nextSkipDoc) { + // always plus one to fix the result, since skip position in Lucene90SkipReader + // is a little different from MultiLevelSkipListReader + final int newDocUpto = skipper.skipTo(target) + 1; + + if (newDocUpto >= blockUpto) { + // Skipper moved + assert newDocUpto % BLOCK_SIZE == 0 : "got " + newDocUpto; + blockUpto = newDocUpto; + + // Force to read next block + docBufferUpto = BLOCK_SIZE; + accum = skipper.getDoc(); + seekTo = skipper.getDocPointer(); // delay the seek + } + // next time we call advance, this is used to + // foresee whether skipper is necessary. + nextSkipDoc = skipper.getNextSkipDoc(); + } + assert nextSkipDoc >= target; + } + + @Override + public Impacts getImpacts() throws IOException { + // nextDoc() doesn't advance skip lists, so it's important to do it here to make sure we're + // not returning impacts over a bigger range of doc IDs than necessary. + advanceShallow(doc); + return skipper.getImpacts(); + } + + @Override + public int nextDoc() throws IOException { + if (docBufferUpto == BLOCK_SIZE) { + if (seekTo >= 0) { + docIn.seek(seekTo); + isFreqsRead = true; // reset isFreqsRead + seekTo = -1; + } + refillDocs(); + } + return this.doc = (int) docBuffer[docBufferUpto++]; + } + + @Override + public int advance(int target) throws IOException { + if (target > nextSkipDoc) { + advanceShallow(target); + } + if (docBufferUpto == BLOCK_SIZE) { + if (seekTo >= 0) { + docIn.seek(seekTo); + isFreqsRead = true; // reset isFreqsRead + seekTo = -1; + } + refillDocs(); + } + + int next = findFirstGreater(docBuffer, target, docBufferUpto); + this.doc = (int) docBuffer[next]; + docBufferUpto = next + 1; + return doc; + } + + @Override + public int nextPosition() throws IOException { + return -1; + } + + @Override + public int startOffset() { + return -1; + } + + @Override + public int endOffset() { + return -1; + } + + @Override + public BytesRef getPayload() { + return null; + } + + @Override + public long cost() { + return docFreq; + } + } + + final class BlockImpactsPostingsEnum extends ImpactsEnum { + + final PForUtil pforUtil = new PForUtil(new ForUtil()); + + private final long[] docBuffer = new long[BLOCK_SIZE]; + private final long[] freqBuffer = new long[BLOCK_SIZE]; + private final long[] posDeltaBuffer = new long[BLOCK_SIZE]; + + private int docBufferUpto; + private int posBufferUpto; + + private final ES812ScoreSkipReader skipper; + + final IndexInput docIn; + final IndexInput posIn; + + final boolean indexHasOffsets; + final boolean indexHasPayloads; + + private int docFreq; // number of docs in this posting list + private long totalTermFreq; // number of positions in this posting list + private int docUpto; // how many docs we've read + private int doc; // doc we last read + private long accum; // accumulator for doc deltas + private int freq; // freq we last read + private int position; // current position + + // how many positions "behind" we are; nextPosition must + // skip these to "catch up": + private int posPendingCount; + + // Lazy pos seek: if != -1 then we must seek to this FP + // before reading positions: + private long posPendingFP; + + // Where this term's postings start in the .doc file: + private long docTermStartFP; + + // Where this term's postings start in the .pos file: + private long posTermStartFP; + + // Where this term's payloads/offsets start in the .pay + // file: + private long payTermStartFP; + + // File pointer where the last (vInt encoded) pos delta + // block is. We need this to know whether to bulk + // decode vs vInt decode the block: + private long lastPosBlockFP; + + private int nextSkipDoc = -1; + + private long seekTo = -1; + + BlockImpactsPostingsEnum(FieldInfo fieldInfo, IntBlockTermState termState) throws IOException { + indexHasOffsets = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0; + indexHasPayloads = fieldInfo.hasPayloads(); + + this.docIn = ES812PostingsReader.this.docIn.clone(); + + this.posIn = ES812PostingsReader.this.posIn.clone(); + + docFreq = termState.docFreq; + docTermStartFP = termState.docStartFP; + posTermStartFP = termState.posStartFP; + payTermStartFP = termState.payStartFP; + totalTermFreq = termState.totalTermFreq; + docIn.seek(docTermStartFP); + posPendingFP = posTermStartFP; + posPendingCount = 0; + if (termState.totalTermFreq < BLOCK_SIZE) { + lastPosBlockFP = posTermStartFP; + } else if (termState.totalTermFreq == BLOCK_SIZE) { + lastPosBlockFP = -1; + } else { + lastPosBlockFP = posTermStartFP + termState.lastPosBlockOffset; + } + + doc = -1; + accum = 0; + docUpto = 0; + docBufferUpto = BLOCK_SIZE; + + skipper = new ES812ScoreSkipReader(docIn.clone(), MAX_SKIP_LEVELS, true, indexHasOffsets, indexHasPayloads); + skipper.init(docTermStartFP + termState.skipOffset, docTermStartFP, posTermStartFP, payTermStartFP, docFreq); + } + + @Override + public int freq() throws IOException { + return freq; + } + + @Override + public int docID() { + return doc; + } + + private void refillDocs() throws IOException { + final int left = docFreq - docUpto; + assert left >= 0; + + if (left >= BLOCK_SIZE) { + pforUtil.decodeAndPrefixSum(docIn, accum, docBuffer); + pforUtil.decode(docIn, freqBuffer); + } else { + readVIntBlock(docIn, docBuffer, freqBuffer, left, true); + prefixSum(docBuffer, left, accum); + docBuffer[left] = NO_MORE_DOCS; + } + accum = docBuffer[BLOCK_SIZE - 1]; + docBufferUpto = 0; + } + + private void refillPositions() throws IOException { + if (posIn.getFilePointer() == lastPosBlockFP) { + final int count = (int) (totalTermFreq % BLOCK_SIZE); + int payloadLength = 0; + for (int i = 0; i < count; i++) { + int code = posIn.readVInt(); + if (indexHasPayloads) { + if ((code & 1) != 0) { + payloadLength = posIn.readVInt(); + } + posDeltaBuffer[i] = code >>> 1; + if (payloadLength != 0) { + posIn.seek(posIn.getFilePointer() + payloadLength); + } + } else { + posDeltaBuffer[i] = code; + } + if (indexHasOffsets) { + if ((posIn.readVInt() & 1) != 0) { + // offset length changed + posIn.readVInt(); + } + } + } + } else { + pforUtil.decode(posIn, posDeltaBuffer); + } + } + + @Override + public void advanceShallow(int target) throws IOException { + if (target > nextSkipDoc) { + // always plus one to fix the result, since skip position in Lucene90SkipReader + // is a little different from MultiLevelSkipListReader + final int newDocUpto = skipper.skipTo(target) + 1; + + if (newDocUpto > docUpto) { + // Skipper moved + assert newDocUpto % BLOCK_SIZE == 0 : "got " + newDocUpto; + docUpto = newDocUpto; + + // Force to read next block + docBufferUpto = BLOCK_SIZE; + accum = skipper.getDoc(); + posPendingFP = skipper.getPosPointer(); + posPendingCount = skipper.getPosBufferUpto(); + seekTo = skipper.getDocPointer(); // delay the seek + } + // next time we call advance, this is used to + // foresee whether skipper is necessary. + nextSkipDoc = skipper.getNextSkipDoc(); + } + assert nextSkipDoc >= target; + } + + @Override + public Impacts getImpacts() throws IOException { + advanceShallow(doc); + return skipper.getImpacts(); + } + + @Override + public int nextDoc() throws IOException { + return advance(doc + 1); + } + + @Override + public int advance(int target) throws IOException { + if (target > nextSkipDoc) { + advanceShallow(target); + } + if (docBufferUpto == BLOCK_SIZE) { + if (seekTo >= 0) { + docIn.seek(seekTo); + seekTo = -1; + } + refillDocs(); + } + + int next = findFirstGreater(docBuffer, target, docBufferUpto); + if (next == BLOCK_SIZE) { + return doc = NO_MORE_DOCS; + } + this.doc = (int) docBuffer[next]; + this.freq = (int) freqBuffer[next]; + for (int i = docBufferUpto; i <= next; ++i) { + posPendingCount += (int) freqBuffer[i]; + } + docUpto += next - docBufferUpto + 1; + docBufferUpto = next + 1; + position = 0; + return doc; + } + + // TODO: in theory we could avoid loading frq block + // when not needed, ie, use skip data to load how far to + // seek the pos pointer ... instead of having to load frq + // blocks only to sum up how many positions to skip + private void skipPositions() throws IOException { + // Skip positions now: + int toSkip = posPendingCount - freq; + + final int leftInBlock = BLOCK_SIZE - posBufferUpto; + if (toSkip < leftInBlock) { + posBufferUpto += toSkip; + } else { + toSkip -= leftInBlock; + while (toSkip >= BLOCK_SIZE) { + assert posIn.getFilePointer() != lastPosBlockFP; + pforUtil.skip(posIn); + toSkip -= BLOCK_SIZE; + } + refillPositions(); + posBufferUpto = toSkip; + } + + position = 0; + } + + @Override + public int nextPosition() throws IOException { + assert posPendingCount > 0; + + if (posPendingFP != -1) { + posIn.seek(posPendingFP); + posPendingFP = -1; + + // Force buffer refill: + posBufferUpto = BLOCK_SIZE; + } + + if (posPendingCount > freq) { + skipPositions(); + posPendingCount = freq; + } + + if (posBufferUpto == BLOCK_SIZE) { + refillPositions(); + posBufferUpto = 0; + } + position += (int) posDeltaBuffer[posBufferUpto++]; + + posPendingCount--; + return position; + } + + @Override + public int startOffset() { + return -1; + } + + @Override + public int endOffset() { + return -1; + } + + @Override + public BytesRef getPayload() { + return null; + } + + @Override + public long cost() { + return docFreq; + } + } + + final class BlockImpactsEverythingEnum extends ImpactsEnum { + + final PForUtil pforUtil = new PForUtil(new ForUtil()); + + private final long[] docBuffer = new long[BLOCK_SIZE]; + private final long[] freqBuffer = new long[BLOCK_SIZE]; + private final long[] posDeltaBuffer = new long[BLOCK_SIZE]; + + private final long[] payloadLengthBuffer; + private final long[] offsetStartDeltaBuffer; + private final long[] offsetLengthBuffer; + + private byte[] payloadBytes; + private int payloadByteUpto; + private int payloadLength; + + private int lastStartOffset; + private int startOffset = -1; + private int endOffset = -1; + + private int docBufferUpto; + private int posBufferUpto; + + private final ES812ScoreSkipReader skipper; + + final IndexInput docIn; + final IndexInput posIn; + final IndexInput payIn; + final BytesRef payload; + + final boolean indexHasFreq; + final boolean indexHasPos; + final boolean indexHasOffsets; + final boolean indexHasPayloads; + + private int docFreq; // number of docs in this posting list + private long totalTermFreq; // number of positions in this posting list + private int docUpto; // how many docs we've read + private int posDocUpTo; // for how many docs we've read positions, offsets, and payloads + private int doc; // doc we last read + private long accum; // accumulator for doc deltas + private int position; // current position + + // how many positions "behind" we are; nextPosition must + // skip these to "catch up": + private int posPendingCount; + + // Lazy pos seek: if != -1 then we must seek to this FP + // before reading positions: + private long posPendingFP; + + // Lazy pay seek: if != -1 then we must seek to this FP + // before reading payloads/offsets: + private long payPendingFP; + + // Where this term's postings start in the .doc file: + private long docTermStartFP; + + // Where this term's postings start in the .pos file: + private long posTermStartFP; + + // Where this term's payloads/offsets start in the .pay + // file: + private long payTermStartFP; + + // File pointer where the last (vInt encoded) pos delta + // block is. We need this to know whether to bulk + // decode vs vInt decode the block: + private long lastPosBlockFP; + + private int nextSkipDoc = -1; + + private final boolean needsPositions; + private final boolean needsOffsets; // true if we actually need offsets + private final boolean needsPayloads; // true if we actually need payloads + + private boolean isFreqsRead; // shows if freqBuffer for the current doc block are read into freqBuffer + + private long seekTo = -1; + + BlockImpactsEverythingEnum(FieldInfo fieldInfo, IntBlockTermState termState, int flags) throws IOException { + indexHasFreq = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) >= 0; + indexHasPos = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0; + indexHasOffsets = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0; + indexHasPayloads = fieldInfo.hasPayloads(); + + needsPositions = PostingsEnum.featureRequested(flags, PostingsEnum.POSITIONS); + needsOffsets = PostingsEnum.featureRequested(flags, PostingsEnum.OFFSETS); + needsPayloads = PostingsEnum.featureRequested(flags, PostingsEnum.PAYLOADS); + + this.docIn = ES812PostingsReader.this.docIn.clone(); + + if (indexHasPos && needsPositions) { + this.posIn = ES812PostingsReader.this.posIn.clone(); + } else { + this.posIn = null; + } + + if ((indexHasOffsets && needsOffsets) || (indexHasPayloads && needsPayloads)) { + this.payIn = ES812PostingsReader.this.payIn.clone(); + } else { + this.payIn = null; + } + + if (indexHasOffsets) { + offsetStartDeltaBuffer = new long[BLOCK_SIZE]; + offsetLengthBuffer = new long[BLOCK_SIZE]; + } else { + offsetStartDeltaBuffer = null; + offsetLengthBuffer = null; + startOffset = -1; + endOffset = -1; + } + + if (indexHasPayloads) { + payloadLengthBuffer = new long[BLOCK_SIZE]; + payloadBytes = new byte[128]; + payload = new BytesRef(); + } else { + payloadLengthBuffer = null; + payloadBytes = null; + payload = null; + } + + docFreq = termState.docFreq; + docTermStartFP = termState.docStartFP; + posTermStartFP = termState.posStartFP; + payTermStartFP = termState.payStartFP; + totalTermFreq = termState.totalTermFreq; + docIn.seek(docTermStartFP); + posPendingFP = posTermStartFP; + payPendingFP = payTermStartFP; + posPendingCount = 0; + if (termState.totalTermFreq < BLOCK_SIZE) { + lastPosBlockFP = posTermStartFP; + } else if (termState.totalTermFreq == BLOCK_SIZE) { + lastPosBlockFP = -1; + } else { + lastPosBlockFP = posTermStartFP + termState.lastPosBlockOffset; + } + + doc = -1; + accum = 0; + docUpto = 0; + posDocUpTo = 0; + isFreqsRead = true; + docBufferUpto = BLOCK_SIZE; + + skipper = new ES812ScoreSkipReader(docIn.clone(), MAX_SKIP_LEVELS, indexHasPos, indexHasOffsets, indexHasPayloads); + skipper.init(docTermStartFP + termState.skipOffset, docTermStartFP, posTermStartFP, payTermStartFP, docFreq); + + if (indexHasFreq == false) { + for (int i = 0; i < ForUtil.BLOCK_SIZE; ++i) { + freqBuffer[i] = 1; + } + } + } + + @Override + public int freq() throws IOException { + if (indexHasFreq && (isFreqsRead == false)) { + pforUtil.decode(docIn, freqBuffer); // read freqBuffer for this block + isFreqsRead = true; + } + return (int) freqBuffer[docBufferUpto - 1]; + } + + @Override + public int docID() { + return doc; + } + + private void refillDocs() throws IOException { + if (indexHasFreq) { + if (isFreqsRead == false) { // previous freq block was not read + // check if we need to load the previous freq block to catch up on positions or we can + // skip it + if (indexHasPos && needsPositions && (posDocUpTo < docUpto)) { + pforUtil.decode(docIn, freqBuffer); // load the previous freq block + } else { + pforUtil.skip(docIn); // skip it + } + isFreqsRead = true; + } + if (indexHasPos && needsPositions) { + while (posDocUpTo < docUpto) { // catch on positions, bring posPendingCount upto the current doc + posPendingCount += (int) freqBuffer[docBufferUpto - (docUpto - posDocUpTo)]; + posDocUpTo++; + } + } + } + + final int left = docFreq - docUpto; + assert left >= 0; + + if (left >= BLOCK_SIZE) { + pforUtil.decodeAndPrefixSum(docIn, accum, docBuffer); + if (indexHasFreq) { + isFreqsRead = false; // freq block will be loaded lazily when necessary, we don't load it here + } + } else { + readVIntBlock(docIn, docBuffer, freqBuffer, left, indexHasFreq); + prefixSum(docBuffer, left, accum); + docBuffer[left] = NO_MORE_DOCS; + } + accum = docBuffer[BLOCK_SIZE - 1]; + docBufferUpto = 0; + } + + private void refillPositions() throws IOException { + if (posIn.getFilePointer() == lastPosBlockFP) { + final int count = (int) (totalTermFreq % BLOCK_SIZE); + int payloadLength = 0; + int offsetLength = 0; + payloadByteUpto = 0; + for (int i = 0; i < count; i++) { + int code = posIn.readVInt(); + if (indexHasPayloads) { + if ((code & 1) != 0) { + payloadLength = posIn.readVInt(); + } + payloadLengthBuffer[i] = payloadLength; + posDeltaBuffer[i] = code >>> 1; + if (payloadLength != 0) { + if (payloadByteUpto + payloadLength > payloadBytes.length) { + payloadBytes = ArrayUtil.grow(payloadBytes, payloadByteUpto + payloadLength); + } + posIn.readBytes(payloadBytes, payloadByteUpto, payloadLength); + payloadByteUpto += payloadLength; + } + } else { + posDeltaBuffer[i] = code; + } + + if (indexHasOffsets) { + int deltaCode = posIn.readVInt(); + if ((deltaCode & 1) != 0) { + offsetLength = posIn.readVInt(); + } + offsetStartDeltaBuffer[i] = deltaCode >>> 1; + offsetLengthBuffer[i] = offsetLength; + } + } + payloadByteUpto = 0; + } else { + pforUtil.decode(posIn, posDeltaBuffer); + + if (indexHasPayloads && payIn != null) { + if (needsPayloads) { + pforUtil.decode(payIn, payloadLengthBuffer); + int numBytes = payIn.readVInt(); + + if (numBytes > payloadBytes.length) { + payloadBytes = ArrayUtil.growNoCopy(payloadBytes, numBytes); + } + payIn.readBytes(payloadBytes, 0, numBytes); + } else { + // this works, because when writing a vint block we always force the first length to be + // written + pforUtil.skip(payIn); // skip over lengths + int numBytes = payIn.readVInt(); // read length of payloadBytes + payIn.seek(payIn.getFilePointer() + numBytes); // skip over payloadBytes + } + payloadByteUpto = 0; + } + + if (indexHasOffsets && payIn != null) { + if (needsOffsets) { + pforUtil.decode(payIn, offsetStartDeltaBuffer); + pforUtil.decode(payIn, offsetLengthBuffer); + } else { + // this works, because when writing a vint block we always force the first length to be + // written + pforUtil.skip(payIn); // skip over starts + pforUtil.skip(payIn); // skip over lengths + } + } + } + } + + @Override + public void advanceShallow(int target) throws IOException { + if (target > nextSkipDoc) { + // always plus one to fix the result, since skip position in Lucene90SkipReader + // is a little different from MultiLevelSkipListReader + final int newDocUpto = skipper.skipTo(target) + 1; + + if (newDocUpto > docUpto) { + // Skipper moved + assert newDocUpto % BLOCK_SIZE == 0 : "got " + newDocUpto; + docUpto = newDocUpto; + posDocUpTo = docUpto; + + // Force to read next block + docBufferUpto = BLOCK_SIZE; + accum = skipper.getDoc(); + posPendingFP = skipper.getPosPointer(); + payPendingFP = skipper.getPayPointer(); + posPendingCount = skipper.getPosBufferUpto(); + lastStartOffset = 0; // new document + payloadByteUpto = skipper.getPayloadByteUpto(); // actually, this is just lastSkipEntry + seekTo = skipper.getDocPointer(); // delay the seek + } + // next time we call advance, this is used to + // foresee whether skipper is necessary. + nextSkipDoc = skipper.getNextSkipDoc(); + } + assert nextSkipDoc >= target; + } + + @Override + public Impacts getImpacts() throws IOException { + advanceShallow(doc); + return skipper.getImpacts(); + } + + @Override + public int nextDoc() throws IOException { + return advance(doc + 1); + } + + @Override + public int advance(int target) throws IOException { + if (target > nextSkipDoc) { + advanceShallow(target); + } + if (docBufferUpto == BLOCK_SIZE) { + if (seekTo >= 0) { + docIn.seek(seekTo); + seekTo = -1; + isFreqsRead = true; // reset isFreqsRead + } + refillDocs(); + } + + // Now scan: + long doc; + while (true) { + doc = docBuffer[docBufferUpto]; + docBufferUpto++; + docUpto++; + + if (doc >= target) { + break; + } + + if (docBufferUpto == BLOCK_SIZE) { + return this.doc = NO_MORE_DOCS; + } + } + position = 0; + lastStartOffset = 0; + + return this.doc = (int) doc; + } + + // TODO: in theory we could avoid loading frq block + // when not needed, ie, use skip data to load how far to + // seek the pos pointer ... instead of having to load frq + // blocks only to sum up how many positions to skip + private void skipPositions() throws IOException { + // Skip positions now: + int toSkip = posPendingCount - (int) freqBuffer[docBufferUpto - 1]; + // if (DEBUG) { + // System.out.println(" FPR.skipPositions: toSkip=" + toSkip); + // } + + final int leftInBlock = BLOCK_SIZE - posBufferUpto; + if (toSkip < leftInBlock) { + int end = posBufferUpto + toSkip; + while (posBufferUpto < end) { + if (indexHasPayloads) { + payloadByteUpto += (int) payloadLengthBuffer[posBufferUpto]; + } + posBufferUpto++; + } + } else { + toSkip -= leftInBlock; + while (toSkip >= BLOCK_SIZE) { + assert posIn.getFilePointer() != lastPosBlockFP; + pforUtil.skip(posIn); + + if (indexHasPayloads && payIn != null) { + // Skip payloadLength block: + pforUtil.skip(payIn); + + // Skip payloadBytes block: + int numBytes = payIn.readVInt(); + payIn.seek(payIn.getFilePointer() + numBytes); + } + + if (indexHasOffsets && payIn != null) { + pforUtil.skip(payIn); + pforUtil.skip(payIn); + } + toSkip -= BLOCK_SIZE; + } + refillPositions(); + payloadByteUpto = 0; + posBufferUpto = 0; + while (posBufferUpto < toSkip) { + if (indexHasPayloads) { + payloadByteUpto += (int) payloadLengthBuffer[posBufferUpto]; + } + posBufferUpto++; + } + } + + position = 0; + lastStartOffset = 0; + } + + @Override + public int nextPosition() throws IOException { + if (indexHasPos == false || needsPositions == false) { + return -1; + } + + if (isFreqsRead == false) { + pforUtil.decode(docIn, freqBuffer); // read freqBuffer for this docs block + isFreqsRead = true; + } + while (posDocUpTo < docUpto) { // bring posPendingCount upto the current doc + posPendingCount += (int) freqBuffer[docBufferUpto - (docUpto - posDocUpTo)]; + posDocUpTo++; + } + + assert posPendingCount > 0; + + if (posPendingFP != -1) { + posIn.seek(posPendingFP); + posPendingFP = -1; + + if (payPendingFP != -1 && payIn != null) { + payIn.seek(payPendingFP); + payPendingFP = -1; + } + + // Force buffer refill: + posBufferUpto = BLOCK_SIZE; + } + + if (posPendingCount > freqBuffer[docBufferUpto - 1]) { + skipPositions(); + posPendingCount = (int) freqBuffer[docBufferUpto - 1]; + } + + if (posBufferUpto == BLOCK_SIZE) { + refillPositions(); + posBufferUpto = 0; + } + position += (int) posDeltaBuffer[posBufferUpto]; + + if (indexHasPayloads) { + payloadLength = (int) payloadLengthBuffer[posBufferUpto]; + payload.bytes = payloadBytes; + payload.offset = payloadByteUpto; + payload.length = payloadLength; + payloadByteUpto += payloadLength; + } + + if (indexHasOffsets && needsOffsets) { + startOffset = lastStartOffset + (int) offsetStartDeltaBuffer[posBufferUpto]; + endOffset = startOffset + (int) offsetLengthBuffer[posBufferUpto]; + lastStartOffset = startOffset; + } + + posBufferUpto++; + posPendingCount--; + return position; + } + + @Override + public int startOffset() { + return startOffset; + } + + @Override + public int endOffset() { + return endOffset; + } + + @Override + public BytesRef getPayload() { + if (payloadLength == 0) { + return null; + } else { + return payload; + } + } + + @Override + public long cost() { + return docFreq; + } + } + + @Override + public void checkIntegrity() throws IOException { + if (docIn != null) { + CodecUtil.checksumEntireFile(docIn); + } + if (posIn != null) { + CodecUtil.checksumEntireFile(posIn); + } + if (payIn != null) { + CodecUtil.checksumEntireFile(payIn); + } + } + + @Override + public String toString() { + return getClass().getSimpleName() + "(positions=" + (posIn != null) + ",payloads=" + (payIn != null) + ")"; + } +} diff --git a/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsWriter.java b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsWriter.java new file mode 100644 index 0000000000000..9ab7ed42efb09 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsWriter.java @@ -0,0 +1,523 @@ +/* + * @notice + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Modifications copyright (C) 2022 Elasticsearch B.V. + */ +package org.elasticsearch.index.codec.postings; + +import org.apache.lucene.codecs.BlockTermState; +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.codecs.CompetitiveImpactAccumulator; +import org.apache.lucene.codecs.PushPostingsWriterBase; +import org.apache.lucene.index.CorruptIndexException; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.IndexFileNames; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.index.SegmentWriteState; +import org.apache.lucene.store.DataOutput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.util.ArrayUtil; +import org.apache.lucene.util.BitUtil; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.core.IOUtils; +import org.elasticsearch.index.codec.postings.ES812PostingsFormat.IntBlockTermState; + +import java.io.IOException; + +import static org.elasticsearch.index.codec.postings.ES812PostingsFormat.BLOCK_SIZE; +import static org.elasticsearch.index.codec.postings.ES812PostingsFormat.DOC_CODEC; +import static org.elasticsearch.index.codec.postings.ES812PostingsFormat.MAX_SKIP_LEVELS; +import static org.elasticsearch.index.codec.postings.ES812PostingsFormat.PAY_CODEC; +import static org.elasticsearch.index.codec.postings.ES812PostingsFormat.POS_CODEC; +import static org.elasticsearch.index.codec.postings.ES812PostingsFormat.TERMS_CODEC; +import static org.elasticsearch.index.codec.postings.ES812PostingsFormat.VERSION_CURRENT; + +/** + * Concrete class that writes docId(maybe frq,pos,offset,payloads) list with postings format. + * + *

Postings list for each term will be stored separately. + * + * @see ES812SkipWriter for details about skipping setting and postings layout. + */ +final class ES812PostingsWriter extends PushPostingsWriterBase { + + IndexOutput docOut; + IndexOutput posOut; + IndexOutput payOut; + + static final IntBlockTermState emptyState = new IntBlockTermState(); + IntBlockTermState lastState; + + // Holds starting file pointers for current term: + private long docStartFP; + private long posStartFP; + private long payStartFP; + + final long[] docDeltaBuffer; + final long[] freqBuffer; + private int docBufferUpto; + + final long[] posDeltaBuffer; + final long[] payloadLengthBuffer; + final long[] offsetStartDeltaBuffer; + final long[] offsetLengthBuffer; + private int posBufferUpto; + + private byte[] payloadBytes; + private int payloadByteUpto; + + private int lastBlockDocID; + private long lastBlockPosFP; + private long lastBlockPayFP; + private int lastBlockPosBufferUpto; + private int lastBlockPayloadByteUpto; + + private int lastDocID; + private int lastPosition; + private int lastStartOffset; + private int docCount; + + private final PForUtil pforUtil; + private final ES812SkipWriter skipWriter; + + private boolean fieldHasNorms; + private NumericDocValues norms; + private final CompetitiveImpactAccumulator competitiveFreqNormAccumulator = new CompetitiveImpactAccumulator(); + + /** Creates a postings writer */ + ES812PostingsWriter(SegmentWriteState state) throws IOException { + + String docFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, ES812PostingsFormat.DOC_EXTENSION); + docOut = state.directory.createOutput(docFileName, state.context); + IndexOutput posOut = null; + IndexOutput payOut = null; + boolean success = false; + try { + CodecUtil.writeIndexHeader(docOut, DOC_CODEC, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix); + pforUtil = new PForUtil(new ForUtil()); + if (state.fieldInfos.hasProx()) { + posDeltaBuffer = new long[BLOCK_SIZE]; + String posFileName = IndexFileNames.segmentFileName( + state.segmentInfo.name, + state.segmentSuffix, + ES812PostingsFormat.POS_EXTENSION + ); + posOut = state.directory.createOutput(posFileName, state.context); + CodecUtil.writeIndexHeader(posOut, POS_CODEC, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix); + + if (state.fieldInfos.hasPayloads()) { + payloadBytes = new byte[128]; + payloadLengthBuffer = new long[BLOCK_SIZE]; + } else { + payloadBytes = null; + payloadLengthBuffer = null; + } + + if (state.fieldInfos.hasOffsets()) { + offsetStartDeltaBuffer = new long[BLOCK_SIZE]; + offsetLengthBuffer = new long[BLOCK_SIZE]; + } else { + offsetStartDeltaBuffer = null; + offsetLengthBuffer = null; + } + + if (state.fieldInfos.hasPayloads() || state.fieldInfos.hasOffsets()) { + String payFileName = IndexFileNames.segmentFileName( + state.segmentInfo.name, + state.segmentSuffix, + ES812PostingsFormat.PAY_EXTENSION + ); + payOut = state.directory.createOutput(payFileName, state.context); + CodecUtil.writeIndexHeader(payOut, PAY_CODEC, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix); + } + } else { + posDeltaBuffer = null; + payloadLengthBuffer = null; + offsetStartDeltaBuffer = null; + offsetLengthBuffer = null; + payloadBytes = null; + } + this.payOut = payOut; + this.posOut = posOut; + success = true; + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(docOut, posOut, payOut); + } + } + + docDeltaBuffer = new long[BLOCK_SIZE]; + freqBuffer = new long[BLOCK_SIZE]; + + // TODO: should we try skipping every 2/4 blocks...? + skipWriter = new ES812SkipWriter(MAX_SKIP_LEVELS, BLOCK_SIZE, state.segmentInfo.maxDoc(), docOut, posOut, payOut); + } + + @Override + public IntBlockTermState newTermState() { + return new IntBlockTermState(); + } + + @Override + public void init(IndexOutput termsOut, SegmentWriteState state) throws IOException { + CodecUtil.writeIndexHeader(termsOut, TERMS_CODEC, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix); + termsOut.writeVInt(BLOCK_SIZE); + } + + @Override + public void setField(FieldInfo fieldInfo) { + super.setField(fieldInfo); + skipWriter.setField(writePositions, writeOffsets, writePayloads); + lastState = emptyState; + fieldHasNorms = fieldInfo.hasNorms(); + } + + @Override + public void startTerm(NumericDocValues norms) { + docStartFP = docOut.getFilePointer(); + if (writePositions) { + posStartFP = posOut.getFilePointer(); + if (writePayloads || writeOffsets) { + payStartFP = payOut.getFilePointer(); + } + } + lastDocID = 0; + lastBlockDocID = -1; + skipWriter.resetSkip(); + this.norms = norms; + competitiveFreqNormAccumulator.clear(); + } + + @Override + public void startDoc(int docID, int termDocFreq) throws IOException { + // Have collected a block of docs, and get a new doc. + // Should write skip data as well as postings list for + // current block. + if (lastBlockDocID != -1 && docBufferUpto == 0) { + skipWriter.bufferSkip( + lastBlockDocID, + competitiveFreqNormAccumulator, + docCount, + lastBlockPosFP, + lastBlockPayFP, + lastBlockPosBufferUpto, + lastBlockPayloadByteUpto + ); + competitiveFreqNormAccumulator.clear(); + } + + final int docDelta = docID - lastDocID; + + if (docID < 0 || (docCount > 0 && docDelta <= 0)) { + throw new CorruptIndexException("docs out of order (" + docID + " <= " + lastDocID + " )", docOut); + } + + docDeltaBuffer[docBufferUpto] = docDelta; + if (writeFreqs) { + freqBuffer[docBufferUpto] = termDocFreq; + } + + docBufferUpto++; + docCount++; + + if (docBufferUpto == BLOCK_SIZE) { + pforUtil.encode(docDeltaBuffer, docOut); + if (writeFreqs) { + pforUtil.encode(freqBuffer, docOut); + } + // NOTE: don't set docBufferUpto back to 0 here; + // finishDoc will do so (because it needs to see that + // the block was filled so it can save skip data) + } + + lastDocID = docID; + lastPosition = 0; + lastStartOffset = 0; + + long norm; + if (fieldHasNorms) { + boolean found = norms.advanceExact(docID); + if (found == false) { + // This can happen if indexing hits a problem after adding a doc to the + // postings but before buffering the norm. Such documents are written + // deleted and will go away on the first merge. + norm = 1L; + } else { + norm = norms.longValue(); + assert norm != 0 : docID; + } + } else { + norm = 1L; + } + + competitiveFreqNormAccumulator.add(writeFreqs ? termDocFreq : 1, norm); + } + + @Override + public void addPosition(int position, BytesRef payload, int startOffset, int endOffset) throws IOException { + if (position > IndexWriter.MAX_POSITION) { + throw new CorruptIndexException( + "position=" + position + " is too large (> IndexWriter.MAX_POSITION=" + IndexWriter.MAX_POSITION + ")", + docOut + ); + } + if (position < 0) { + throw new CorruptIndexException("position=" + position + " is < 0", docOut); + } + posDeltaBuffer[posBufferUpto] = position - lastPosition; + if (writePayloads) { + if (payload == null || payload.length == 0) { + // no payload + payloadLengthBuffer[posBufferUpto] = 0; + } else { + payloadLengthBuffer[posBufferUpto] = payload.length; + if (payloadByteUpto + payload.length > payloadBytes.length) { + payloadBytes = ArrayUtil.grow(payloadBytes, payloadByteUpto + payload.length); + } + System.arraycopy(payload.bytes, payload.offset, payloadBytes, payloadByteUpto, payload.length); + payloadByteUpto += payload.length; + } + } + + if (writeOffsets) { + assert startOffset >= lastStartOffset; + assert endOffset >= startOffset; + offsetStartDeltaBuffer[posBufferUpto] = startOffset - lastStartOffset; + offsetLengthBuffer[posBufferUpto] = endOffset - startOffset; + lastStartOffset = startOffset; + } + + posBufferUpto++; + lastPosition = position; + if (posBufferUpto == BLOCK_SIZE) { + pforUtil.encode(posDeltaBuffer, posOut); + + if (writePayloads) { + pforUtil.encode(payloadLengthBuffer, payOut); + payOut.writeVInt(payloadByteUpto); + payOut.writeBytes(payloadBytes, 0, payloadByteUpto); + payloadByteUpto = 0; + } + if (writeOffsets) { + pforUtil.encode(offsetStartDeltaBuffer, payOut); + pforUtil.encode(offsetLengthBuffer, payOut); + } + posBufferUpto = 0; + } + } + + @Override + public void finishDoc() throws IOException { + // Since we don't know df for current term, we had to buffer + // those skip data for each block, and when a new doc comes, + // write them to skip file. + if (docBufferUpto == BLOCK_SIZE) { + lastBlockDocID = lastDocID; + if (posOut != null) { + if (payOut != null) { + lastBlockPayFP = payOut.getFilePointer(); + } + lastBlockPosFP = posOut.getFilePointer(); + lastBlockPosBufferUpto = posBufferUpto; + lastBlockPayloadByteUpto = payloadByteUpto; + } + docBufferUpto = 0; + } + } + + /** Called when we are done adding docs to this term */ + @Override + public void finishTerm(BlockTermState _state) throws IOException { + IntBlockTermState state = (IntBlockTermState) _state; + assert state.docFreq > 0; + + // TODO: wasteful we are counting this (counting # docs + // for this term) in two places? + assert state.docFreq == docCount : state.docFreq + " vs " + docCount; + + // docFreq == 1, don't write the single docid/freq to a separate file along with a pointer to + // it. + final int singletonDocID; + if (state.docFreq == 1) { + // pulse the singleton docid into the term dictionary, freq is implicitly totalTermFreq + singletonDocID = (int) docDeltaBuffer[0]; + } else { + singletonDocID = -1; + // vInt encode the remaining doc deltas and freqs: + for (int i = 0; i < docBufferUpto; i++) { + final int docDelta = (int) docDeltaBuffer[i]; + final int freq = (int) freqBuffer[i]; + if (writeFreqs == false) { + docOut.writeVInt(docDelta); + } else if (freq == 1) { + docOut.writeVInt((docDelta << 1) | 1); + } else { + docOut.writeVInt(docDelta << 1); + docOut.writeVInt(freq); + } + } + } + + final long lastPosBlockOffset; + + if (writePositions) { + // totalTermFreq is just total number of positions(or payloads, or offsets) + // associated with current term. + assert state.totalTermFreq != -1; + if (state.totalTermFreq > BLOCK_SIZE) { + // record file offset for last pos in last block + lastPosBlockOffset = posOut.getFilePointer() - posStartFP; + } else { + lastPosBlockOffset = -1; + } + if (posBufferUpto > 0) { + // TODO: should we send offsets/payloads to + // .pay...? seems wasteful (have to store extra + // vLong for low (< BLOCK_SIZE) DF terms = vast vast + // majority) + + // vInt encode the remaining positions/payloads/offsets: + int lastPayloadLength = -1; // force first payload length to be written + int lastOffsetLength = -1; // force first offset length to be written + int payloadBytesReadUpto = 0; + for (int i = 0; i < posBufferUpto; i++) { + final int posDelta = (int) posDeltaBuffer[i]; + if (writePayloads) { + final int payloadLength = (int) payloadLengthBuffer[i]; + if (payloadLength != lastPayloadLength) { + lastPayloadLength = payloadLength; + posOut.writeVInt((posDelta << 1) | 1); + posOut.writeVInt(payloadLength); + } else { + posOut.writeVInt(posDelta << 1); + } + + if (payloadLength != 0) { + posOut.writeBytes(payloadBytes, payloadBytesReadUpto, payloadLength); + payloadBytesReadUpto += payloadLength; + } + } else { + posOut.writeVInt(posDelta); + } + + if (writeOffsets) { + int delta = (int) offsetStartDeltaBuffer[i]; + int length = (int) offsetLengthBuffer[i]; + if (length == lastOffsetLength) { + posOut.writeVInt(delta << 1); + } else { + posOut.writeVInt(delta << 1 | 1); + posOut.writeVInt(length); + lastOffsetLength = length; + } + } + } + + if (writePayloads) { + assert payloadBytesReadUpto == payloadByteUpto; + payloadByteUpto = 0; + } + } + } else { + lastPosBlockOffset = -1; + } + + long skipOffset; + if (docCount > BLOCK_SIZE) { + skipOffset = skipWriter.writeSkip(docOut) - docStartFP; + } else { + skipOffset = -1; + } + + state.docStartFP = docStartFP; + state.posStartFP = posStartFP; + state.payStartFP = payStartFP; + state.singletonDocID = singletonDocID; + state.skipOffset = skipOffset; + state.lastPosBlockOffset = lastPosBlockOffset; + docBufferUpto = 0; + posBufferUpto = 0; + lastDocID = 0; + docCount = 0; + } + + @Override + public void encodeTerm(DataOutput out, FieldInfo fieldInfo, BlockTermState _state, boolean absolute) throws IOException { + IntBlockTermState state = (IntBlockTermState) _state; + if (absolute) { + lastState = emptyState; + assert lastState.docStartFP == 0; + } + + if (lastState.singletonDocID != -1 && state.singletonDocID != -1 && state.docStartFP == lastState.docStartFP) { + // With runs of rare values such as ID fields, the increment of pointers in the docs file is + // often 0. + // Furthermore some ID schemes like auto-increment IDs or Flake IDs are monotonic, so we + // encode the delta + // between consecutive doc IDs to save space. + final long delta = (long) state.singletonDocID - lastState.singletonDocID; + out.writeVLong((BitUtil.zigZagEncode(delta) << 1) | 0x01); + } else { + out.writeVLong((state.docStartFP - lastState.docStartFP) << 1); + if (state.singletonDocID != -1) { + out.writeVInt(state.singletonDocID); + } + } + + if (writePositions) { + out.writeVLong(state.posStartFP - lastState.posStartFP); + if (writePayloads || writeOffsets) { + out.writeVLong(state.payStartFP - lastState.payStartFP); + } + } + if (writePositions) { + if (state.lastPosBlockOffset != -1) { + out.writeVLong(state.lastPosBlockOffset); + } + } + if (state.skipOffset != -1) { + out.writeVLong(state.skipOffset); + } + lastState = state; + } + + @Override + public void close() throws IOException { + // TODO: add a finish() at least to PushBase? DV too...? + boolean success = false; + try { + if (docOut != null) { + CodecUtil.writeFooter(docOut); + } + if (posOut != null) { + CodecUtil.writeFooter(posOut); + } + if (payOut != null) { + CodecUtil.writeFooter(payOut); + } + success = true; + } finally { + if (success) { + IOUtils.close(docOut, posOut, payOut); + } else { + IOUtils.closeWhileHandlingException(docOut, posOut, payOut); + } + docOut = posOut = payOut = null; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/index/codec/postings/ES812ScoreSkipReader.java b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812ScoreSkipReader.java new file mode 100644 index 0000000000000..f76e1026945e6 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812ScoreSkipReader.java @@ -0,0 +1,157 @@ +/* + * @notice + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Modifications copyright (C) 2022 Elasticsearch B.V. + */ +package org.elasticsearch.index.codec.postings; + +import org.apache.lucene.index.Impact; +import org.apache.lucene.index.Impacts; +import org.apache.lucene.store.ByteArrayDataInput; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.util.ArrayUtil; + +import java.io.IOException; +import java.util.AbstractList; +import java.util.Arrays; +import java.util.List; +import java.util.RandomAccess; + +final class ES812ScoreSkipReader extends ES812SkipReader { + + private final byte[][] impactData; + private final int[] impactDataLength; + private final ByteArrayDataInput badi = new ByteArrayDataInput(); + private final Impacts impacts; + private int numLevels = 1; + private final MutableImpactList[] perLevelImpacts; + + ES812ScoreSkipReader(IndexInput skipStream, int maxSkipLevels, boolean hasPos, boolean hasOffsets, boolean hasPayloads) { + super(skipStream, maxSkipLevels, hasPos, hasOffsets, hasPayloads); + this.impactData = new byte[maxSkipLevels][]; + Arrays.fill(impactData, new byte[0]); + this.impactDataLength = new int[maxSkipLevels]; + this.perLevelImpacts = new MutableImpactList[maxSkipLevels]; + for (int i = 0; i < perLevelImpacts.length; ++i) { + perLevelImpacts[i] = new MutableImpactList(); + } + impacts = new Impacts() { + + @Override + public int numLevels() { + return numLevels; + } + + @Override + public int getDocIdUpTo(int level) { + return skipDoc[level]; + } + + @Override + public List getImpacts(int level) { + assert level < numLevels; + if (impactDataLength[level] > 0) { + badi.reset(impactData[level], 0, impactDataLength[level]); + perLevelImpacts[level] = readImpacts(badi, perLevelImpacts[level]); + impactDataLength[level] = 0; + } + return perLevelImpacts[level]; + } + }; + } + + @Override + public int skipTo(int target) throws IOException { + int result = super.skipTo(target); + if (numberOfSkipLevels > 0) { + numLevels = numberOfSkipLevels; + } else { + // End of postings don't have skip data anymore, so we fill with dummy data + // like SlowImpactsEnum. + numLevels = 1; + perLevelImpacts[0].length = 1; + perLevelImpacts[0].impacts[0].freq = Integer.MAX_VALUE; + perLevelImpacts[0].impacts[0].norm = 1L; + impactDataLength[0] = 0; + } + return result; + } + + Impacts getImpacts() { + return impacts; + } + + @Override + protected void readImpacts(int level, IndexInput skipStream) throws IOException { + int length = skipStream.readVInt(); + if (impactData[level].length < length) { + impactData[level] = new byte[ArrayUtil.oversize(length, Byte.BYTES)]; + } + skipStream.readBytes(impactData[level], 0, length); + impactDataLength[level] = length; + } + + static MutableImpactList readImpacts(ByteArrayDataInput in, MutableImpactList reuse) { + int maxNumImpacts = in.length(); // at most one impact per byte + if (reuse.impacts.length < maxNumImpacts) { + int oldLength = reuse.impacts.length; + reuse.impacts = ArrayUtil.grow(reuse.impacts, maxNumImpacts); + for (int i = oldLength; i < reuse.impacts.length; ++i) { + reuse.impacts[i] = new Impact(Integer.MAX_VALUE, 1L); + } + } + + int freq = 0; + long norm = 0; + int length = 0; + while (in.getPosition() < in.length()) { + int freqDelta = in.readVInt(); + if ((freqDelta & 0x01) != 0) { + freq += 1 + (freqDelta >>> 1); + try { + norm += 1 + in.readZLong(); + } catch (IOException e) { + throw new RuntimeException(e); // cannot happen on a BADI + } + } else { + freq += 1 + (freqDelta >>> 1); + norm++; + } + Impact impact = reuse.impacts[length]; + impact.freq = freq; + impact.norm = norm; + length++; + } + reuse.length = length; + return reuse; + } + + static class MutableImpactList extends AbstractList implements RandomAccess { + int length = 1; + Impact[] impacts = new Impact[] { new Impact(Integer.MAX_VALUE, 1L) }; + + @Override + public Impact get(int index) { + return impacts[index]; + } + + @Override + public int size() { + return length; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/index/codec/postings/ES812SkipReader.java b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812SkipReader.java new file mode 100644 index 0000000000000..11c0c611312fc --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812SkipReader.java @@ -0,0 +1,203 @@ +/* + * @notice + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Modifications copyright (C) 2022 Elasticsearch B.V. + */ +package org.elasticsearch.index.codec.postings; + +import org.apache.lucene.codecs.MultiLevelSkipListReader; +import org.apache.lucene.store.IndexInput; + +import java.io.IOException; +import java.util.Arrays; + +/** + * Implements the skip list reader for block postings format that stores positions and payloads. + * + *

Although this skipper uses MultiLevelSkipListReader as an interface, its definition of skip + * position will be a little different. + * + *

For example, when skipInterval = blockSize = 3, df = 2*skipInterval = 6, + * + *

+ * 0 1 2 3 4 5
+ * d d d d d d    (posting list)
+ *     ^     ^    (skip point in MultiLeveSkipWriter)
+ *       ^        (skip point in Lucene90SkipWriter)
+ * 
+ * + *

In this case, MultiLevelSkipListReader will use the last document as a skip point, while + * Lucene90SkipReader should assume no skip point will comes. + * + *

If we use the interface directly in Lucene90SkipReader, it may silly try to read another skip + * data after the only skip point is loaded. + * + *

To illustrate this, we can call skipTo(d[5]), since skip point d[3] has smaller docId, and + * numSkipped+blockSize== df, the MultiLevelSkipListReader will assume the skip list isn't exhausted + * yet, and try to load a non-existed skip point + * + *

Therefore, we'll trim df before passing it to the interface. see trim(int) + */ +class ES812SkipReader extends MultiLevelSkipListReader { + private long[] docPointer; + private long[] posPointer; + private long[] payPointer; + private int[] posBufferUpto; + private int[] payloadByteUpto; + + private long lastPosPointer; + private long lastPayPointer; + private int lastPayloadByteUpto; + private long lastDocPointer; + private int lastPosBufferUpto; + + ES812SkipReader(IndexInput skipStream, int maxSkipLevels, boolean hasPos, boolean hasOffsets, boolean hasPayloads) { + super(skipStream, maxSkipLevels, ForUtil.BLOCK_SIZE, 8); + docPointer = new long[maxSkipLevels]; + if (hasPos) { + posPointer = new long[maxSkipLevels]; + posBufferUpto = new int[maxSkipLevels]; + if (hasPayloads) { + payloadByteUpto = new int[maxSkipLevels]; + } else { + payloadByteUpto = null; + } + if (hasOffsets || hasPayloads) { + payPointer = new long[maxSkipLevels]; + } else { + payPointer = null; + } + } else { + posPointer = null; + } + } + + /** + * Trim original docFreq to tell skipReader read proper number of skip points. + * + *

Since our definition in Lucene90Skip* is a little different from MultiLevelSkip* This + * trimmed docFreq will prevent skipReader from: 1. silly reading a non-existed skip point after + * the last block boundary 2. moving into the vInt block + */ + protected int trim(int df) { + return df % ForUtil.BLOCK_SIZE == 0 ? df - 1 : df; + } + + public void init(long skipPointer, long docBasePointer, long posBasePointer, long payBasePointer, int df) throws IOException { + super.init(skipPointer, trim(df)); + lastDocPointer = docBasePointer; + lastPosPointer = posBasePointer; + lastPayPointer = payBasePointer; + + Arrays.fill(docPointer, docBasePointer); + if (posPointer != null) { + Arrays.fill(posPointer, posBasePointer); + if (payPointer != null) { + Arrays.fill(payPointer, payBasePointer); + } + } else { + assert posBasePointer == 0; + } + } + + /** + * Returns the doc pointer of the doc to which the last call of {@link + * MultiLevelSkipListReader#skipTo(int)} has skipped. + */ + public long getDocPointer() { + return lastDocPointer; + } + + public long getPosPointer() { + return lastPosPointer; + } + + public int getPosBufferUpto() { + return lastPosBufferUpto; + } + + public long getPayPointer() { + return lastPayPointer; + } + + public int getPayloadByteUpto() { + return lastPayloadByteUpto; + } + + public int getNextSkipDoc() { + return skipDoc[0]; + } + + @Override + protected void seekChild(int level) throws IOException { + super.seekChild(level); + docPointer[level] = lastDocPointer; + if (posPointer != null) { + posPointer[level] = lastPosPointer; + posBufferUpto[level] = lastPosBufferUpto; + if (payloadByteUpto != null) { + payloadByteUpto[level] = lastPayloadByteUpto; + } + if (payPointer != null) { + payPointer[level] = lastPayPointer; + } + } + } + + @Override + protected void setLastSkipData(int level) { + super.setLastSkipData(level); + lastDocPointer = docPointer[level]; + + if (posPointer != null) { + lastPosPointer = posPointer[level]; + lastPosBufferUpto = posBufferUpto[level]; + if (payPointer != null) { + lastPayPointer = payPointer[level]; + } + if (payloadByteUpto != null) { + lastPayloadByteUpto = payloadByteUpto[level]; + } + } + } + + @Override + protected int readSkipData(int level, IndexInput skipStream) throws IOException { + int delta = skipStream.readVInt(); + docPointer[level] += skipStream.readVLong(); + + if (posPointer != null) { + posPointer[level] += skipStream.readVLong(); + posBufferUpto[level] = skipStream.readVInt(); + + if (payloadByteUpto != null) { + payloadByteUpto[level] = skipStream.readVInt(); + } + + if (payPointer != null) { + payPointer[level] += skipStream.readVLong(); + } + } + readImpacts(level, skipStream); + return delta; + } + + // The default impl skips impacts + protected void readImpacts(int level, IndexInput skipStream) throws IOException { + skipStream.skipBytes(skipStream.readVInt()); + } +} diff --git a/server/src/main/java/org/elasticsearch/index/codec/postings/ES812SkipWriter.java b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812SkipWriter.java new file mode 100644 index 0000000000000..dbfb7c86a1475 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812SkipWriter.java @@ -0,0 +1,229 @@ +/* + * @notice + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Modifications copyright (C) 2022 Elasticsearch B.V. + */ + +package org.elasticsearch.index.codec.postings; + +import org.apache.lucene.codecs.CompetitiveImpactAccumulator; +import org.apache.lucene.codecs.MultiLevelSkipListWriter; +import org.apache.lucene.index.Impact; +import org.apache.lucene.store.ByteBuffersDataOutput; +import org.apache.lucene.store.DataOutput; +import org.apache.lucene.store.IndexOutput; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; + +/** + * Write skip lists with multiple levels, and support skip within block ints. + * + *

Assume that docFreq = 28, skipInterval = blockSize = 12 + * + *

+ *  |       block#0       | |      block#1        | |vInts|
+ *  d d d d d d d d d d d d d d d d d d d d d d d d d d d d (posting list)
+ *                          ^                       ^       (level 0 skip point)
+ * 
+ * + *

Note that skipWriter will ignore first document in block#0, since it is useless as a skip + * point. Also, we'll never skip into the vInts block, only record skip data at the start its start + * point(if it exist). + * + *

For each skip point, we will record: 1. docID in former position, i.e. for position 12, record + * docID[11], etc. 2. its related file points(position, payload), 3. related numbers or + * uptos(position, payload). 4. start offset. + */ +final class ES812SkipWriter extends MultiLevelSkipListWriter { + private int[] lastSkipDoc; + private long[] lastSkipDocPointer; + private long[] lastSkipPosPointer; + private long[] lastSkipPayPointer; + + private final IndexOutput docOut; + private final IndexOutput posOut; + private final IndexOutput payOut; + + private int curDoc; + private long curDocPointer; + private long curPosPointer; + private long curPayPointer; + private int curPosBufferUpto; + private int curPayloadByteUpto; + private CompetitiveImpactAccumulator[] curCompetitiveFreqNorms; + private boolean fieldHasPositions; + private boolean fieldHasOffsets; + private boolean fieldHasPayloads; + + ES812SkipWriter(int maxSkipLevels, int blockSize, int docCount, IndexOutput docOut, IndexOutput posOut, IndexOutput payOut) { + super(blockSize, 8, maxSkipLevels, docCount); + this.docOut = docOut; + this.posOut = posOut; + this.payOut = payOut; + + lastSkipDoc = new int[maxSkipLevels]; + lastSkipDocPointer = new long[maxSkipLevels]; + if (posOut != null) { + lastSkipPosPointer = new long[maxSkipLevels]; + if (payOut != null) { + lastSkipPayPointer = new long[maxSkipLevels]; + } + } + curCompetitiveFreqNorms = new CompetitiveImpactAccumulator[maxSkipLevels]; + for (int i = 0; i < maxSkipLevels; ++i) { + curCompetitiveFreqNorms[i] = new CompetitiveImpactAccumulator(); + } + } + + void setField(boolean fieldHasPositions, boolean fieldHasOffsets, boolean fieldHasPayloads) { + this.fieldHasPositions = fieldHasPositions; + this.fieldHasOffsets = fieldHasOffsets; + this.fieldHasPayloads = fieldHasPayloads; + } + + // tricky: we only skip data for blocks (terms with more than 128 docs), but re-init'ing the + // skipper + // is pretty slow for rare terms in large segments as we have to fill O(log #docs in segment) of + // junk. + // this is the vast majority of terms (worst case: ID field or similar). so in resetSkip() we + // save + // away the previous pointers, and lazy-init only if we need to buffer skip data for the term. + private boolean initialized; + long lastDocFP; + long lastPosFP; + long lastPayFP; + + @Override + public void resetSkip() { + lastDocFP = docOut.getFilePointer(); + if (fieldHasPositions) { + lastPosFP = posOut.getFilePointer(); + if (fieldHasOffsets || fieldHasPayloads) { + lastPayFP = payOut.getFilePointer(); + } + } + if (initialized) { + for (CompetitiveImpactAccumulator acc : curCompetitiveFreqNorms) { + acc.clear(); + } + } + initialized = false; + } + + private void initSkip() { + if (initialized == false) { + super.resetSkip(); + Arrays.fill(lastSkipDoc, 0); + Arrays.fill(lastSkipDocPointer, lastDocFP); + if (fieldHasPositions) { + Arrays.fill(lastSkipPosPointer, lastPosFP); + if (fieldHasOffsets || fieldHasPayloads) { + Arrays.fill(lastSkipPayPointer, lastPayFP); + } + } + // sets of competitive freq,norm pairs should be empty at this point + assert Arrays.stream(curCompetitiveFreqNorms) + .map(CompetitiveImpactAccumulator::getCompetitiveFreqNormPairs) + .mapToInt(Collection::size) + .sum() == 0; + initialized = true; + } + } + + /** Sets the values for the current skip data. */ + public void bufferSkip( + int doc, + CompetitiveImpactAccumulator competitiveFreqNorms, + int numDocs, + long posFP, + long payFP, + int posBufferUpto, + int payloadByteUpto + ) throws IOException { + initSkip(); + this.curDoc = doc; + this.curDocPointer = docOut.getFilePointer(); + this.curPosPointer = posFP; + this.curPayPointer = payFP; + this.curPosBufferUpto = posBufferUpto; + this.curPayloadByteUpto = payloadByteUpto; + this.curCompetitiveFreqNorms[0].addAll(competitiveFreqNorms); + bufferSkip(numDocs); + } + + private final ByteBuffersDataOutput freqNormOut = ByteBuffersDataOutput.newResettableInstance(); + + @Override + protected void writeSkipData(int level, DataOutput skipBuffer) throws IOException { + + int delta = curDoc - lastSkipDoc[level]; + + skipBuffer.writeVInt(delta); + lastSkipDoc[level] = curDoc; + + skipBuffer.writeVLong(curDocPointer - lastSkipDocPointer[level]); + lastSkipDocPointer[level] = curDocPointer; + + if (fieldHasPositions) { + + skipBuffer.writeVLong(curPosPointer - lastSkipPosPointer[level]); + lastSkipPosPointer[level] = curPosPointer; + skipBuffer.writeVInt(curPosBufferUpto); + + if (fieldHasPayloads) { + skipBuffer.writeVInt(curPayloadByteUpto); + } + + if (fieldHasOffsets || fieldHasPayloads) { + skipBuffer.writeVLong(curPayPointer - lastSkipPayPointer[level]); + lastSkipPayPointer[level] = curPayPointer; + } + } + + CompetitiveImpactAccumulator competitiveFreqNorms = curCompetitiveFreqNorms[level]; + assert competitiveFreqNorms.getCompetitiveFreqNormPairs().size() > 0; + if (level + 1 < numberOfSkipLevels) { + curCompetitiveFreqNorms[level + 1].addAll(competitiveFreqNorms); + } + writeImpacts(competitiveFreqNorms, freqNormOut); + skipBuffer.writeVInt(Math.toIntExact(freqNormOut.size())); + freqNormOut.copyTo(skipBuffer); + freqNormOut.reset(); + competitiveFreqNorms.clear(); + } + + static void writeImpacts(CompetitiveImpactAccumulator acc, DataOutput out) throws IOException { + Collection impacts = acc.getCompetitiveFreqNormPairs(); + Impact previous = new Impact(0, 0); + for (Impact impact : impacts) { + assert impact.freq > previous.freq; + assert Long.compareUnsigned(impact.norm, previous.norm) > 0; + int freqDelta = impact.freq - previous.freq - 1; + long normDelta = impact.norm - previous.norm - 1; + if (normDelta == 0) { + // most of time, norm only increases by 1, so we can fold everything in a single byte + out.writeVInt(freqDelta << 1); + } else { + out.writeVInt((freqDelta << 1) | 1); + out.writeZLong(normDelta); + } + previous = impact; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/index/codec/postings/ForUtil.java b/server/src/main/java/org/elasticsearch/index/codec/postings/ForUtil.java new file mode 100644 index 0000000000000..d874caab1b8c0 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/codec/postings/ForUtil.java @@ -0,0 +1,1049 @@ +/* + * @notice + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Modifications copyright (C) 2022 Elasticsearch B.V. + */ +package org.elasticsearch.index.codec.postings; + +import org.apache.lucene.store.DataInput; +import org.apache.lucene.store.DataOutput; + +import java.io.IOException; + +// Inspired from https://fulmicoton.com/posts/bitpacking/ +// Encodes multiple integers in a long to get SIMD-like speedups. +// If bitsPerValue <= 8 then we pack 8 ints per long +// else if bitsPerValue <= 16 we pack 4 ints per long +// else we pack 2 ints per long +final class ForUtil { + + static final int BLOCK_SIZE = 128; + private static final int BLOCK_SIZE_LOG2 = 7; + + private static long expandMask32(long mask32) { + return mask32 | (mask32 << 32); + } + + private static long expandMask16(long mask16) { + return expandMask32(mask16 | (mask16 << 16)); + } + + private static long expandMask8(long mask8) { + return expandMask16(mask8 | (mask8 << 8)); + } + + private static long mask32(int bitsPerValue) { + return expandMask32((1L << bitsPerValue) - 1); + } + + private static long mask16(int bitsPerValue) { + return expandMask16((1L << bitsPerValue) - 1); + } + + private static long mask8(int bitsPerValue) { + return expandMask8((1L << bitsPerValue) - 1); + } + + private static void expand8(long[] arr) { + for (int i = 0; i < 16; ++i) { + long l = arr[i]; + arr[i] = (l >>> 56) & 0xFFL; + arr[16 + i] = (l >>> 48) & 0xFFL; + arr[32 + i] = (l >>> 40) & 0xFFL; + arr[48 + i] = (l >>> 32) & 0xFFL; + arr[64 + i] = (l >>> 24) & 0xFFL; + arr[80 + i] = (l >>> 16) & 0xFFL; + arr[96 + i] = (l >>> 8) & 0xFFL; + arr[112 + i] = l & 0xFFL; + } + } + + private static void expand8To32(long[] arr) { + for (int i = 0; i < 16; ++i) { + long l = arr[i]; + arr[i] = (l >>> 24) & 0x000000FF000000FFL; + arr[16 + i] = (l >>> 16) & 0x000000FF000000FFL; + arr[32 + i] = (l >>> 8) & 0x000000FF000000FFL; + arr[48 + i] = l & 0x000000FF000000FFL; + } + } + + private static void collapse8(long[] arr) { + for (int i = 0; i < 16; ++i) { + arr[i] = (arr[i] << 56) | (arr[16 + i] << 48) | (arr[32 + i] << 40) | (arr[48 + i] << 32) | (arr[64 + i] << 24) | (arr[80 + i] + << 16) | (arr[96 + i] << 8) | arr[112 + i]; + } + } + + private static void expand16(long[] arr) { + for (int i = 0; i < 32; ++i) { + long l = arr[i]; + arr[i] = (l >>> 48) & 0xFFFFL; + arr[32 + i] = (l >>> 32) & 0xFFFFL; + arr[64 + i] = (l >>> 16) & 0xFFFFL; + arr[96 + i] = l & 0xFFFFL; + } + } + + private static void expand16To32(long[] arr) { + for (int i = 0; i < 32; ++i) { + long l = arr[i]; + arr[i] = (l >>> 16) & 0x0000FFFF0000FFFFL; + arr[32 + i] = l & 0x0000FFFF0000FFFFL; + } + } + + private static void collapse16(long[] arr) { + for (int i = 0; i < 32; ++i) { + arr[i] = (arr[i] << 48) | (arr[32 + i] << 32) | (arr[64 + i] << 16) | arr[96 + i]; + } + } + + private static void expand32(long[] arr) { + for (int i = 0; i < 64; ++i) { + long l = arr[i]; + arr[i] = l >>> 32; + arr[64 + i] = l & 0xFFFFFFFFL; + } + } + + private static void collapse32(long[] arr) { + for (int i = 0; i < 64; ++i) { + arr[i] = (arr[i] << 32) | arr[64 + i]; + } + } + + private final long[] tmp = new long[BLOCK_SIZE / 2]; + + /** Encode 128 integers from {@code longs} into {@code out}. */ + void encode(long[] longs, int bitsPerValue, DataOutput out) throws IOException { + final int nextPrimitive; + final int numLongs; + if (bitsPerValue <= 8) { + nextPrimitive = 8; + numLongs = BLOCK_SIZE / 8; + collapse8(longs); + } else if (bitsPerValue <= 16) { + nextPrimitive = 16; + numLongs = BLOCK_SIZE / 4; + collapse16(longs); + } else { + nextPrimitive = 32; + numLongs = BLOCK_SIZE / 2; + collapse32(longs); + } + + final int numLongsPerShift = bitsPerValue * 2; + int idx = 0; + int shift = nextPrimitive - bitsPerValue; + for (int i = 0; i < numLongsPerShift; ++i) { + tmp[i] = longs[idx++] << shift; + } + for (shift = shift - bitsPerValue; shift >= 0; shift -= bitsPerValue) { + for (int i = 0; i < numLongsPerShift; ++i) { + tmp[i] |= longs[idx++] << shift; + } + } + + final int remainingBitsPerLong = shift + bitsPerValue; + final long maskRemainingBitsPerLong; + if (nextPrimitive == 8) { + maskRemainingBitsPerLong = MASKS8[remainingBitsPerLong]; + } else if (nextPrimitive == 16) { + maskRemainingBitsPerLong = MASKS16[remainingBitsPerLong]; + } else { + maskRemainingBitsPerLong = MASKS32[remainingBitsPerLong]; + } + + int tmpIdx = 0; + int remainingBitsPerValue = bitsPerValue; + while (idx < numLongs) { + if (remainingBitsPerValue >= remainingBitsPerLong) { + remainingBitsPerValue -= remainingBitsPerLong; + tmp[tmpIdx++] |= (longs[idx] >>> remainingBitsPerValue) & maskRemainingBitsPerLong; + if (remainingBitsPerValue == 0) { + idx++; + remainingBitsPerValue = bitsPerValue; + } + } else { + final long mask1, mask2; + if (nextPrimitive == 8) { + mask1 = MASKS8[remainingBitsPerValue]; + mask2 = MASKS8[remainingBitsPerLong - remainingBitsPerValue]; + } else if (nextPrimitive == 16) { + mask1 = MASKS16[remainingBitsPerValue]; + mask2 = MASKS16[remainingBitsPerLong - remainingBitsPerValue]; + } else { + mask1 = MASKS32[remainingBitsPerValue]; + mask2 = MASKS32[remainingBitsPerLong - remainingBitsPerValue]; + } + tmp[tmpIdx] |= (longs[idx++] & mask1) << (remainingBitsPerLong - remainingBitsPerValue); + remainingBitsPerValue = bitsPerValue - remainingBitsPerLong + remainingBitsPerValue; + tmp[tmpIdx++] |= (longs[idx] >>> remainingBitsPerValue) & mask2; + } + } + + for (int i = 0; i < numLongsPerShift; ++i) { + out.writeLong(tmp[i]); + } + } + + /** Number of bytes required to encode 128 integers of {@code bitsPerValue} bits per value. */ + int numBytes(int bitsPerValue) { + return bitsPerValue << (BLOCK_SIZE_LOG2 - 3); + } + + private static void decodeSlow(int bitsPerValue, DataInput in, long[] tmp, long[] longs) throws IOException { + final int numLongs = bitsPerValue << 1; + in.readLongs(tmp, 0, numLongs); + final long mask = MASKS32[bitsPerValue]; + int longsIdx = 0; + int shift = 32 - bitsPerValue; + for (; shift >= 0; shift -= bitsPerValue) { + shiftLongs(tmp, numLongs, longs, longsIdx, shift, mask); + longsIdx += numLongs; + } + final int remainingBitsPerLong = shift + bitsPerValue; + final long mask32RemainingBitsPerLong = MASKS32[remainingBitsPerLong]; + int tmpIdx = 0; + int remainingBits = remainingBitsPerLong; + for (; longsIdx < BLOCK_SIZE / 2; ++longsIdx) { + int b = bitsPerValue - remainingBits; + long l = (tmp[tmpIdx++] & MASKS32[remainingBits]) << b; + while (b >= remainingBitsPerLong) { + b -= remainingBitsPerLong; + l |= (tmp[tmpIdx++] & mask32RemainingBitsPerLong) << b; + } + if (b > 0) { + l |= (tmp[tmpIdx] >>> (remainingBitsPerLong - b)) & MASKS32[b]; + remainingBits = remainingBitsPerLong - b; + } else { + remainingBits = remainingBitsPerLong; + } + longs[longsIdx] = l; + } + } + + /** + * The pattern that this shiftLongs method applies is recognized by the C2 compiler, which + * generates SIMD instructions for it in order to shift multiple longs at once. + */ + private static void shiftLongs(long[] a, int count, long[] b, int bi, int shift, long mask) { + for (int i = 0; i < count; ++i) { + b[bi + i] = (a[i] >>> shift) & mask; + } + } + + private static final long[] MASKS8 = new long[8]; + private static final long[] MASKS16 = new long[16]; + private static final long[] MASKS32 = new long[32]; + + static { + for (int i = 0; i < 8; ++i) { + MASKS8[i] = mask8(i); + } + for (int i = 0; i < 16; ++i) { + MASKS16[i] = mask16(i); + } + for (int i = 0; i < 32; ++i) { + MASKS32[i] = mask32(i); + } + } + + // mark values in array as final longs to avoid the cost of reading array, arrays should only be + // used when the idx is a variable + private static final long MASK8_1 = MASKS8[1]; + private static final long MASK8_2 = MASKS8[2]; + private static final long MASK8_3 = MASKS8[3]; + private static final long MASK8_4 = MASKS8[4]; + private static final long MASK8_5 = MASKS8[5]; + private static final long MASK8_6 = MASKS8[6]; + private static final long MASK8_7 = MASKS8[7]; + private static final long MASK16_1 = MASKS16[1]; + private static final long MASK16_2 = MASKS16[2]; + private static final long MASK16_3 = MASKS16[3]; + private static final long MASK16_4 = MASKS16[4]; + private static final long MASK16_5 = MASKS16[5]; + private static final long MASK16_6 = MASKS16[6]; + private static final long MASK16_7 = MASKS16[7]; + private static final long MASK16_9 = MASKS16[9]; + private static final long MASK16_10 = MASKS16[10]; + private static final long MASK16_11 = MASKS16[11]; + private static final long MASK16_12 = MASKS16[12]; + private static final long MASK16_13 = MASKS16[13]; + private static final long MASK16_14 = MASKS16[14]; + private static final long MASK16_15 = MASKS16[15]; + private static final long MASK32_1 = MASKS32[1]; + private static final long MASK32_2 = MASKS32[2]; + private static final long MASK32_3 = MASKS32[3]; + private static final long MASK32_4 = MASKS32[4]; + private static final long MASK32_5 = MASKS32[5]; + private static final long MASK32_6 = MASKS32[6]; + private static final long MASK32_7 = MASKS32[7]; + private static final long MASK32_8 = MASKS32[8]; + private static final long MASK32_9 = MASKS32[9]; + private static final long MASK32_10 = MASKS32[10]; + private static final long MASK32_11 = MASKS32[11]; + private static final long MASK32_12 = MASKS32[12]; + private static final long MASK32_13 = MASKS32[13]; + private static final long MASK32_14 = MASKS32[14]; + private static final long MASK32_15 = MASKS32[15]; + private static final long MASK32_17 = MASKS32[17]; + private static final long MASK32_18 = MASKS32[18]; + private static final long MASK32_19 = MASKS32[19]; + private static final long MASK32_20 = MASKS32[20]; + private static final long MASK32_21 = MASKS32[21]; + private static final long MASK32_22 = MASKS32[22]; + private static final long MASK32_23 = MASKS32[23]; + private static final long MASK32_24 = MASKS32[24]; + + /** Decode 128 integers into {@code longs}. */ + void decode(int bitsPerValue, DataInput in, long[] longs) throws IOException { + switch (bitsPerValue) { + case 1: + decode1(in, tmp, longs); + expand8(longs); + break; + case 2: + decode2(in, tmp, longs); + expand8(longs); + break; + case 3: + decode3(in, tmp, longs); + expand8(longs); + break; + case 4: + decode4(in, tmp, longs); + expand8(longs); + break; + case 5: + decode5(in, tmp, longs); + expand8(longs); + break; + case 6: + decode6(in, tmp, longs); + expand8(longs); + break; + case 7: + decode7(in, tmp, longs); + expand8(longs); + break; + case 8: + decode8(in, tmp, longs); + expand8(longs); + break; + case 9: + decode9(in, tmp, longs); + expand16(longs); + break; + case 10: + decode10(in, tmp, longs); + expand16(longs); + break; + case 11: + decode11(in, tmp, longs); + expand16(longs); + break; + case 12: + decode12(in, tmp, longs); + expand16(longs); + break; + case 13: + decode13(in, tmp, longs); + expand16(longs); + break; + case 14: + decode14(in, tmp, longs); + expand16(longs); + break; + case 15: + decode15(in, tmp, longs); + expand16(longs); + break; + case 16: + decode16(in, tmp, longs); + expand16(longs); + break; + case 17: + decode17(in, tmp, longs); + expand32(longs); + break; + case 18: + decode18(in, tmp, longs); + expand32(longs); + break; + case 19: + decode19(in, tmp, longs); + expand32(longs); + break; + case 20: + decode20(in, tmp, longs); + expand32(longs); + break; + case 21: + decode21(in, tmp, longs); + expand32(longs); + break; + case 22: + decode22(in, tmp, longs); + expand32(longs); + break; + case 23: + decode23(in, tmp, longs); + expand32(longs); + break; + case 24: + decode24(in, tmp, longs); + expand32(longs); + break; + default: + decodeSlow(bitsPerValue, in, tmp, longs); + expand32(longs); + break; + } + } + + /** + * Decodes 128 integers into 64 {@code longs} such that each long contains two values, each + * represented with 32 bits. Values [0..63] are encoded in the high-order bits of {@code longs} + * [0..63], and values [64..127] are encoded in the low-order bits of {@code longs} [0..63]. This + * representation may allow subsequent operations to be performed on two values at a time. + */ + void decodeTo32(int bitsPerValue, DataInput in, long[] longs) throws IOException { + switch (bitsPerValue) { + case 1: + decode1(in, tmp, longs); + expand8To32(longs); + break; + case 2: + decode2(in, tmp, longs); + expand8To32(longs); + break; + case 3: + decode3(in, tmp, longs); + expand8To32(longs); + break; + case 4: + decode4(in, tmp, longs); + expand8To32(longs); + break; + case 5: + decode5(in, tmp, longs); + expand8To32(longs); + break; + case 6: + decode6(in, tmp, longs); + expand8To32(longs); + break; + case 7: + decode7(in, tmp, longs); + expand8To32(longs); + break; + case 8: + decode8(in, tmp, longs); + expand8To32(longs); + break; + case 9: + decode9(in, tmp, longs); + expand16To32(longs); + break; + case 10: + decode10(in, tmp, longs); + expand16To32(longs); + break; + case 11: + decode11(in, tmp, longs); + expand16To32(longs); + break; + case 12: + decode12(in, tmp, longs); + expand16To32(longs); + break; + case 13: + decode13(in, tmp, longs); + expand16To32(longs); + break; + case 14: + decode14(in, tmp, longs); + expand16To32(longs); + break; + case 15: + decode15(in, tmp, longs); + expand16To32(longs); + break; + case 16: + decode16(in, tmp, longs); + expand16To32(longs); + break; + case 17: + decode17(in, tmp, longs); + break; + case 18: + decode18(in, tmp, longs); + break; + case 19: + decode19(in, tmp, longs); + break; + case 20: + decode20(in, tmp, longs); + break; + case 21: + decode21(in, tmp, longs); + break; + case 22: + decode22(in, tmp, longs); + break; + case 23: + decode23(in, tmp, longs); + break; + case 24: + decode24(in, tmp, longs); + break; + default: + decodeSlow(bitsPerValue, in, tmp, longs); + break; + } + } + + private static void decode1(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 2); + shiftLongs(tmp, 2, longs, 0, 7, MASK8_1); + shiftLongs(tmp, 2, longs, 2, 6, MASK8_1); + shiftLongs(tmp, 2, longs, 4, 5, MASK8_1); + shiftLongs(tmp, 2, longs, 6, 4, MASK8_1); + shiftLongs(tmp, 2, longs, 8, 3, MASK8_1); + shiftLongs(tmp, 2, longs, 10, 2, MASK8_1); + shiftLongs(tmp, 2, longs, 12, 1, MASK8_1); + shiftLongs(tmp, 2, longs, 14, 0, MASK8_1); + } + + private static void decode2(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 4); + shiftLongs(tmp, 4, longs, 0, 6, MASK8_2); + shiftLongs(tmp, 4, longs, 4, 4, MASK8_2); + shiftLongs(tmp, 4, longs, 8, 2, MASK8_2); + shiftLongs(tmp, 4, longs, 12, 0, MASK8_2); + } + + private static void decode3(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 6); + shiftLongs(tmp, 6, longs, 0, 5, MASK8_3); + shiftLongs(tmp, 6, longs, 6, 2, MASK8_3); + for (int iter = 0, tmpIdx = 0, longsIdx = 12; iter < 2; ++iter, tmpIdx += 3, longsIdx += 2) { + long l0 = (tmp[tmpIdx + 0] & MASK8_2) << 1; + l0 |= (tmp[tmpIdx + 1] >>> 1) & MASK8_1; + longs[longsIdx + 0] = l0; + long l1 = (tmp[tmpIdx + 1] & MASK8_1) << 2; + l1 |= (tmp[tmpIdx + 2] & MASK8_2) << 0; + longs[longsIdx + 1] = l1; + } + } + + private static void decode4(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 8); + shiftLongs(tmp, 8, longs, 0, 4, MASK8_4); + shiftLongs(tmp, 8, longs, 8, 0, MASK8_4); + } + + private static void decode5(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 10); + shiftLongs(tmp, 10, longs, 0, 3, MASK8_5); + for (int iter = 0, tmpIdx = 0, longsIdx = 10; iter < 2; ++iter, tmpIdx += 5, longsIdx += 3) { + long l0 = (tmp[tmpIdx + 0] & MASK8_3) << 2; + l0 |= (tmp[tmpIdx + 1] >>> 1) & MASK8_2; + longs[longsIdx + 0] = l0; + long l1 = (tmp[tmpIdx + 1] & MASK8_1) << 4; + l1 |= (tmp[tmpIdx + 2] & MASK8_3) << 1; + l1 |= (tmp[tmpIdx + 3] >>> 2) & MASK8_1; + longs[longsIdx + 1] = l1; + long l2 = (tmp[tmpIdx + 3] & MASK8_2) << 3; + l2 |= (tmp[tmpIdx + 4] & MASK8_3) << 0; + longs[longsIdx + 2] = l2; + } + } + + private static void decode6(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 12); + shiftLongs(tmp, 12, longs, 0, 2, MASK8_6); + shiftLongs(tmp, 12, tmp, 0, 0, MASK8_2); + for (int iter = 0, tmpIdx = 0, longsIdx = 12; iter < 4; ++iter, tmpIdx += 3, longsIdx += 1) { + long l0 = tmp[tmpIdx + 0] << 4; + l0 |= tmp[tmpIdx + 1] << 2; + l0 |= tmp[tmpIdx + 2] << 0; + longs[longsIdx + 0] = l0; + } + } + + private static void decode7(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 14); + shiftLongs(tmp, 14, longs, 0, 1, MASK8_7); + shiftLongs(tmp, 14, tmp, 0, 0, MASK8_1); + for (int iter = 0, tmpIdx = 0, longsIdx = 14; iter < 2; ++iter, tmpIdx += 7, longsIdx += 1) { + long l0 = tmp[tmpIdx + 0] << 6; + l0 |= tmp[tmpIdx + 1] << 5; + l0 |= tmp[tmpIdx + 2] << 4; + l0 |= tmp[tmpIdx + 3] << 3; + l0 |= tmp[tmpIdx + 4] << 2; + l0 |= tmp[tmpIdx + 5] << 1; + l0 |= tmp[tmpIdx + 6] << 0; + longs[longsIdx + 0] = l0; + } + } + + private static void decode8(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(longs, 0, 16); + } + + private static void decode9(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 18); + shiftLongs(tmp, 18, longs, 0, 7, MASK16_9); + for (int iter = 0, tmpIdx = 0, longsIdx = 18; iter < 2; ++iter, tmpIdx += 9, longsIdx += 7) { + long l0 = (tmp[tmpIdx + 0] & MASK16_7) << 2; + l0 |= (tmp[tmpIdx + 1] >>> 5) & MASK16_2; + longs[longsIdx + 0] = l0; + long l1 = (tmp[tmpIdx + 1] & MASK16_5) << 4; + l1 |= (tmp[tmpIdx + 2] >>> 3) & MASK16_4; + longs[longsIdx + 1] = l1; + long l2 = (tmp[tmpIdx + 2] & MASK16_3) << 6; + l2 |= (tmp[tmpIdx + 3] >>> 1) & MASK16_6; + longs[longsIdx + 2] = l2; + long l3 = (tmp[tmpIdx + 3] & MASK16_1) << 8; + l3 |= (tmp[tmpIdx + 4] & MASK16_7) << 1; + l3 |= (tmp[tmpIdx + 5] >>> 6) & MASK16_1; + longs[longsIdx + 3] = l3; + long l4 = (tmp[tmpIdx + 5] & MASK16_6) << 3; + l4 |= (tmp[tmpIdx + 6] >>> 4) & MASK16_3; + longs[longsIdx + 4] = l4; + long l5 = (tmp[tmpIdx + 6] & MASK16_4) << 5; + l5 |= (tmp[tmpIdx + 7] >>> 2) & MASK16_5; + longs[longsIdx + 5] = l5; + long l6 = (tmp[tmpIdx + 7] & MASK16_2) << 7; + l6 |= (tmp[tmpIdx + 8] & MASK16_7) << 0; + longs[longsIdx + 6] = l6; + } + } + + private static void decode10(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 20); + shiftLongs(tmp, 20, longs, 0, 6, MASK16_10); + for (int iter = 0, tmpIdx = 0, longsIdx = 20; iter < 4; ++iter, tmpIdx += 5, longsIdx += 3) { + long l0 = (tmp[tmpIdx + 0] & MASK16_6) << 4; + l0 |= (tmp[tmpIdx + 1] >>> 2) & MASK16_4; + longs[longsIdx + 0] = l0; + long l1 = (tmp[tmpIdx + 1] & MASK16_2) << 8; + l1 |= (tmp[tmpIdx + 2] & MASK16_6) << 2; + l1 |= (tmp[tmpIdx + 3] >>> 4) & MASK16_2; + longs[longsIdx + 1] = l1; + long l2 = (tmp[tmpIdx + 3] & MASK16_4) << 6; + l2 |= (tmp[tmpIdx + 4] & MASK16_6) << 0; + longs[longsIdx + 2] = l2; + } + } + + private static void decode11(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 22); + shiftLongs(tmp, 22, longs, 0, 5, MASK16_11); + for (int iter = 0, tmpIdx = 0, longsIdx = 22; iter < 2; ++iter, tmpIdx += 11, longsIdx += 5) { + long l0 = (tmp[tmpIdx + 0] & MASK16_5) << 6; + l0 |= (tmp[tmpIdx + 1] & MASK16_5) << 1; + l0 |= (tmp[tmpIdx + 2] >>> 4) & MASK16_1; + longs[longsIdx + 0] = l0; + long l1 = (tmp[tmpIdx + 2] & MASK16_4) << 7; + l1 |= (tmp[tmpIdx + 3] & MASK16_5) << 2; + l1 |= (tmp[tmpIdx + 4] >>> 3) & MASK16_2; + longs[longsIdx + 1] = l1; + long l2 = (tmp[tmpIdx + 4] & MASK16_3) << 8; + l2 |= (tmp[tmpIdx + 5] & MASK16_5) << 3; + l2 |= (tmp[tmpIdx + 6] >>> 2) & MASK16_3; + longs[longsIdx + 2] = l2; + long l3 = (tmp[tmpIdx + 6] & MASK16_2) << 9; + l3 |= (tmp[tmpIdx + 7] & MASK16_5) << 4; + l3 |= (tmp[tmpIdx + 8] >>> 1) & MASK16_4; + longs[longsIdx + 3] = l3; + long l4 = (tmp[tmpIdx + 8] & MASK16_1) << 10; + l4 |= (tmp[tmpIdx + 9] & MASK16_5) << 5; + l4 |= (tmp[tmpIdx + 10] & MASK16_5) << 0; + longs[longsIdx + 4] = l4; + } + } + + private static void decode12(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 24); + shiftLongs(tmp, 24, longs, 0, 4, MASK16_12); + shiftLongs(tmp, 24, tmp, 0, 0, MASK16_4); + for (int iter = 0, tmpIdx = 0, longsIdx = 24; iter < 8; ++iter, tmpIdx += 3, longsIdx += 1) { + long l0 = tmp[tmpIdx + 0] << 8; + l0 |= tmp[tmpIdx + 1] << 4; + l0 |= tmp[tmpIdx + 2] << 0; + longs[longsIdx + 0] = l0; + } + } + + private static void decode13(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 26); + shiftLongs(tmp, 26, longs, 0, 3, MASK16_13); + for (int iter = 0, tmpIdx = 0, longsIdx = 26; iter < 2; ++iter, tmpIdx += 13, longsIdx += 3) { + long l0 = (tmp[tmpIdx + 0] & MASK16_3) << 10; + l0 |= (tmp[tmpIdx + 1] & MASK16_3) << 7; + l0 |= (tmp[tmpIdx + 2] & MASK16_3) << 4; + l0 |= (tmp[tmpIdx + 3] & MASK16_3) << 1; + l0 |= (tmp[tmpIdx + 4] >>> 2) & MASK16_1; + longs[longsIdx + 0] = l0; + long l1 = (tmp[tmpIdx + 4] & MASK16_2) << 11; + l1 |= (tmp[tmpIdx + 5] & MASK16_3) << 8; + l1 |= (tmp[tmpIdx + 6] & MASK16_3) << 5; + l1 |= (tmp[tmpIdx + 7] & MASK16_3) << 2; + l1 |= (tmp[tmpIdx + 8] >>> 1) & MASK16_2; + longs[longsIdx + 1] = l1; + long l2 = (tmp[tmpIdx + 8] & MASK16_1) << 12; + l2 |= (tmp[tmpIdx + 9] & MASK16_3) << 9; + l2 |= (tmp[tmpIdx + 10] & MASK16_3) << 6; + l2 |= (tmp[tmpIdx + 11] & MASK16_3) << 3; + l2 |= (tmp[tmpIdx + 12] & MASK16_3) << 0; + longs[longsIdx + 2] = l2; + } + } + + private static void decode14(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 28); + shiftLongs(tmp, 28, longs, 0, 2, MASK16_14); + shiftLongs(tmp, 28, tmp, 0, 0, MASK16_2); + for (int iter = 0, tmpIdx = 0, longsIdx = 28; iter < 4; ++iter, tmpIdx += 7, longsIdx += 1) { + long l0 = tmp[tmpIdx + 0] << 12; + l0 |= tmp[tmpIdx + 1] << 10; + l0 |= tmp[tmpIdx + 2] << 8; + l0 |= tmp[tmpIdx + 3] << 6; + l0 |= tmp[tmpIdx + 4] << 4; + l0 |= tmp[tmpIdx + 5] << 2; + l0 |= tmp[tmpIdx + 6] << 0; + longs[longsIdx + 0] = l0; + } + } + + private static void decode15(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 30); + shiftLongs(tmp, 30, longs, 0, 1, MASK16_15); + shiftLongs(tmp, 30, tmp, 0, 0, MASK16_1); + for (int iter = 0, tmpIdx = 0, longsIdx = 30; iter < 2; ++iter, tmpIdx += 15, longsIdx += 1) { + long l0 = tmp[tmpIdx + 0] << 14; + l0 |= tmp[tmpIdx + 1] << 13; + l0 |= tmp[tmpIdx + 2] << 12; + l0 |= tmp[tmpIdx + 3] << 11; + l0 |= tmp[tmpIdx + 4] << 10; + l0 |= tmp[tmpIdx + 5] << 9; + l0 |= tmp[tmpIdx + 6] << 8; + l0 |= tmp[tmpIdx + 7] << 7; + l0 |= tmp[tmpIdx + 8] << 6; + l0 |= tmp[tmpIdx + 9] << 5; + l0 |= tmp[tmpIdx + 10] << 4; + l0 |= tmp[tmpIdx + 11] << 3; + l0 |= tmp[tmpIdx + 12] << 2; + l0 |= tmp[tmpIdx + 13] << 1; + l0 |= tmp[tmpIdx + 14] << 0; + longs[longsIdx + 0] = l0; + } + } + + private static void decode16(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(longs, 0, 32); + } + + private static void decode17(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 34); + shiftLongs(tmp, 34, longs, 0, 15, MASK32_17); + for (int iter = 0, tmpIdx = 0, longsIdx = 34; iter < 2; ++iter, tmpIdx += 17, longsIdx += 15) { + long l0 = (tmp[tmpIdx + 0] & MASK32_15) << 2; + l0 |= (tmp[tmpIdx + 1] >>> 13) & MASK32_2; + longs[longsIdx + 0] = l0; + long l1 = (tmp[tmpIdx + 1] & MASK32_13) << 4; + l1 |= (tmp[tmpIdx + 2] >>> 11) & MASK32_4; + longs[longsIdx + 1] = l1; + long l2 = (tmp[tmpIdx + 2] & MASK32_11) << 6; + l2 |= (tmp[tmpIdx + 3] >>> 9) & MASK32_6; + longs[longsIdx + 2] = l2; + long l3 = (tmp[tmpIdx + 3] & MASK32_9) << 8; + l3 |= (tmp[tmpIdx + 4] >>> 7) & MASK32_8; + longs[longsIdx + 3] = l3; + long l4 = (tmp[tmpIdx + 4] & MASK32_7) << 10; + l4 |= (tmp[tmpIdx + 5] >>> 5) & MASK32_10; + longs[longsIdx + 4] = l4; + long l5 = (tmp[tmpIdx + 5] & MASK32_5) << 12; + l5 |= (tmp[tmpIdx + 6] >>> 3) & MASK32_12; + longs[longsIdx + 5] = l5; + long l6 = (tmp[tmpIdx + 6] & MASK32_3) << 14; + l6 |= (tmp[tmpIdx + 7] >>> 1) & MASK32_14; + longs[longsIdx + 6] = l6; + long l7 = (tmp[tmpIdx + 7] & MASK32_1) << 16; + l7 |= (tmp[tmpIdx + 8] & MASK32_15) << 1; + l7 |= (tmp[tmpIdx + 9] >>> 14) & MASK32_1; + longs[longsIdx + 7] = l7; + long l8 = (tmp[tmpIdx + 9] & MASK32_14) << 3; + l8 |= (tmp[tmpIdx + 10] >>> 12) & MASK32_3; + longs[longsIdx + 8] = l8; + long l9 = (tmp[tmpIdx + 10] & MASK32_12) << 5; + l9 |= (tmp[tmpIdx + 11] >>> 10) & MASK32_5; + longs[longsIdx + 9] = l9; + long l10 = (tmp[tmpIdx + 11] & MASK32_10) << 7; + l10 |= (tmp[tmpIdx + 12] >>> 8) & MASK32_7; + longs[longsIdx + 10] = l10; + long l11 = (tmp[tmpIdx + 12] & MASK32_8) << 9; + l11 |= (tmp[tmpIdx + 13] >>> 6) & MASK32_9; + longs[longsIdx + 11] = l11; + long l12 = (tmp[tmpIdx + 13] & MASK32_6) << 11; + l12 |= (tmp[tmpIdx + 14] >>> 4) & MASK32_11; + longs[longsIdx + 12] = l12; + long l13 = (tmp[tmpIdx + 14] & MASK32_4) << 13; + l13 |= (tmp[tmpIdx + 15] >>> 2) & MASK32_13; + longs[longsIdx + 13] = l13; + long l14 = (tmp[tmpIdx + 15] & MASK32_2) << 15; + l14 |= (tmp[tmpIdx + 16] & MASK32_15) << 0; + longs[longsIdx + 14] = l14; + } + } + + private static void decode18(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 36); + shiftLongs(tmp, 36, longs, 0, 14, MASK32_18); + for (int iter = 0, tmpIdx = 0, longsIdx = 36; iter < 4; ++iter, tmpIdx += 9, longsIdx += 7) { + long l0 = (tmp[tmpIdx + 0] & MASK32_14) << 4; + l0 |= (tmp[tmpIdx + 1] >>> 10) & MASK32_4; + longs[longsIdx + 0] = l0; + long l1 = (tmp[tmpIdx + 1] & MASK32_10) << 8; + l1 |= (tmp[tmpIdx + 2] >>> 6) & MASK32_8; + longs[longsIdx + 1] = l1; + long l2 = (tmp[tmpIdx + 2] & MASK32_6) << 12; + l2 |= (tmp[tmpIdx + 3] >>> 2) & MASK32_12; + longs[longsIdx + 2] = l2; + long l3 = (tmp[tmpIdx + 3] & MASK32_2) << 16; + l3 |= (tmp[tmpIdx + 4] & MASK32_14) << 2; + l3 |= (tmp[tmpIdx + 5] >>> 12) & MASK32_2; + longs[longsIdx + 3] = l3; + long l4 = (tmp[tmpIdx + 5] & MASK32_12) << 6; + l4 |= (tmp[tmpIdx + 6] >>> 8) & MASK32_6; + longs[longsIdx + 4] = l4; + long l5 = (tmp[tmpIdx + 6] & MASK32_8) << 10; + l5 |= (tmp[tmpIdx + 7] >>> 4) & MASK32_10; + longs[longsIdx + 5] = l5; + long l6 = (tmp[tmpIdx + 7] & MASK32_4) << 14; + l6 |= (tmp[tmpIdx + 8] & MASK32_14) << 0; + longs[longsIdx + 6] = l6; + } + } + + private static void decode19(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 38); + shiftLongs(tmp, 38, longs, 0, 13, MASK32_19); + for (int iter = 0, tmpIdx = 0, longsIdx = 38; iter < 2; ++iter, tmpIdx += 19, longsIdx += 13) { + long l0 = (tmp[tmpIdx + 0] & MASK32_13) << 6; + l0 |= (tmp[tmpIdx + 1] >>> 7) & MASK32_6; + longs[longsIdx + 0] = l0; + long l1 = (tmp[tmpIdx + 1] & MASK32_7) << 12; + l1 |= (tmp[tmpIdx + 2] >>> 1) & MASK32_12; + longs[longsIdx + 1] = l1; + long l2 = (tmp[tmpIdx + 2] & MASK32_1) << 18; + l2 |= (tmp[tmpIdx + 3] & MASK32_13) << 5; + l2 |= (tmp[tmpIdx + 4] >>> 8) & MASK32_5; + longs[longsIdx + 2] = l2; + long l3 = (tmp[tmpIdx + 4] & MASK32_8) << 11; + l3 |= (tmp[tmpIdx + 5] >>> 2) & MASK32_11; + longs[longsIdx + 3] = l3; + long l4 = (tmp[tmpIdx + 5] & MASK32_2) << 17; + l4 |= (tmp[tmpIdx + 6] & MASK32_13) << 4; + l4 |= (tmp[tmpIdx + 7] >>> 9) & MASK32_4; + longs[longsIdx + 4] = l4; + long l5 = (tmp[tmpIdx + 7] & MASK32_9) << 10; + l5 |= (tmp[tmpIdx + 8] >>> 3) & MASK32_10; + longs[longsIdx + 5] = l5; + long l6 = (tmp[tmpIdx + 8] & MASK32_3) << 16; + l6 |= (tmp[tmpIdx + 9] & MASK32_13) << 3; + l6 |= (tmp[tmpIdx + 10] >>> 10) & MASK32_3; + longs[longsIdx + 6] = l6; + long l7 = (tmp[tmpIdx + 10] & MASK32_10) << 9; + l7 |= (tmp[tmpIdx + 11] >>> 4) & MASK32_9; + longs[longsIdx + 7] = l7; + long l8 = (tmp[tmpIdx + 11] & MASK32_4) << 15; + l8 |= (tmp[tmpIdx + 12] & MASK32_13) << 2; + l8 |= (tmp[tmpIdx + 13] >>> 11) & MASK32_2; + longs[longsIdx + 8] = l8; + long l9 = (tmp[tmpIdx + 13] & MASK32_11) << 8; + l9 |= (tmp[tmpIdx + 14] >>> 5) & MASK32_8; + longs[longsIdx + 9] = l9; + long l10 = (tmp[tmpIdx + 14] & MASK32_5) << 14; + l10 |= (tmp[tmpIdx + 15] & MASK32_13) << 1; + l10 |= (tmp[tmpIdx + 16] >>> 12) & MASK32_1; + longs[longsIdx + 10] = l10; + long l11 = (tmp[tmpIdx + 16] & MASK32_12) << 7; + l11 |= (tmp[tmpIdx + 17] >>> 6) & MASK32_7; + longs[longsIdx + 11] = l11; + long l12 = (tmp[tmpIdx + 17] & MASK32_6) << 13; + l12 |= (tmp[tmpIdx + 18] & MASK32_13) << 0; + longs[longsIdx + 12] = l12; + } + } + + private static void decode20(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 40); + shiftLongs(tmp, 40, longs, 0, 12, MASK32_20); + for (int iter = 0, tmpIdx = 0, longsIdx = 40; iter < 8; ++iter, tmpIdx += 5, longsIdx += 3) { + long l0 = (tmp[tmpIdx + 0] & MASK32_12) << 8; + l0 |= (tmp[tmpIdx + 1] >>> 4) & MASK32_8; + longs[longsIdx + 0] = l0; + long l1 = (tmp[tmpIdx + 1] & MASK32_4) << 16; + l1 |= (tmp[tmpIdx + 2] & MASK32_12) << 4; + l1 |= (tmp[tmpIdx + 3] >>> 8) & MASK32_4; + longs[longsIdx + 1] = l1; + long l2 = (tmp[tmpIdx + 3] & MASK32_8) << 12; + l2 |= (tmp[tmpIdx + 4] & MASK32_12) << 0; + longs[longsIdx + 2] = l2; + } + } + + private static void decode21(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 42); + shiftLongs(tmp, 42, longs, 0, 11, MASK32_21); + for (int iter = 0, tmpIdx = 0, longsIdx = 42; iter < 2; ++iter, tmpIdx += 21, longsIdx += 11) { + long l0 = (tmp[tmpIdx + 0] & MASK32_11) << 10; + l0 |= (tmp[tmpIdx + 1] >>> 1) & MASK32_10; + longs[longsIdx + 0] = l0; + long l1 = (tmp[tmpIdx + 1] & MASK32_1) << 20; + l1 |= (tmp[tmpIdx + 2] & MASK32_11) << 9; + l1 |= (tmp[tmpIdx + 3] >>> 2) & MASK32_9; + longs[longsIdx + 1] = l1; + long l2 = (tmp[tmpIdx + 3] & MASK32_2) << 19; + l2 |= (tmp[tmpIdx + 4] & MASK32_11) << 8; + l2 |= (tmp[tmpIdx + 5] >>> 3) & MASK32_8; + longs[longsIdx + 2] = l2; + long l3 = (tmp[tmpIdx + 5] & MASK32_3) << 18; + l3 |= (tmp[tmpIdx + 6] & MASK32_11) << 7; + l3 |= (tmp[tmpIdx + 7] >>> 4) & MASK32_7; + longs[longsIdx + 3] = l3; + long l4 = (tmp[tmpIdx + 7] & MASK32_4) << 17; + l4 |= (tmp[tmpIdx + 8] & MASK32_11) << 6; + l4 |= (tmp[tmpIdx + 9] >>> 5) & MASK32_6; + longs[longsIdx + 4] = l4; + long l5 = (tmp[tmpIdx + 9] & MASK32_5) << 16; + l5 |= (tmp[tmpIdx + 10] & MASK32_11) << 5; + l5 |= (tmp[tmpIdx + 11] >>> 6) & MASK32_5; + longs[longsIdx + 5] = l5; + long l6 = (tmp[tmpIdx + 11] & MASK32_6) << 15; + l6 |= (tmp[tmpIdx + 12] & MASK32_11) << 4; + l6 |= (tmp[tmpIdx + 13] >>> 7) & MASK32_4; + longs[longsIdx + 6] = l6; + long l7 = (tmp[tmpIdx + 13] & MASK32_7) << 14; + l7 |= (tmp[tmpIdx + 14] & MASK32_11) << 3; + l7 |= (tmp[tmpIdx + 15] >>> 8) & MASK32_3; + longs[longsIdx + 7] = l7; + long l8 = (tmp[tmpIdx + 15] & MASK32_8) << 13; + l8 |= (tmp[tmpIdx + 16] & MASK32_11) << 2; + l8 |= (tmp[tmpIdx + 17] >>> 9) & MASK32_2; + longs[longsIdx + 8] = l8; + long l9 = (tmp[tmpIdx + 17] & MASK32_9) << 12; + l9 |= (tmp[tmpIdx + 18] & MASK32_11) << 1; + l9 |= (tmp[tmpIdx + 19] >>> 10) & MASK32_1; + longs[longsIdx + 9] = l9; + long l10 = (tmp[tmpIdx + 19] & MASK32_10) << 11; + l10 |= (tmp[tmpIdx + 20] & MASK32_11) << 0; + longs[longsIdx + 10] = l10; + } + } + + private static void decode22(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 44); + shiftLongs(tmp, 44, longs, 0, 10, MASK32_22); + for (int iter = 0, tmpIdx = 0, longsIdx = 44; iter < 4; ++iter, tmpIdx += 11, longsIdx += 5) { + long l0 = (tmp[tmpIdx + 0] & MASK32_10) << 12; + l0 |= (tmp[tmpIdx + 1] & MASK32_10) << 2; + l0 |= (tmp[tmpIdx + 2] >>> 8) & MASK32_2; + longs[longsIdx + 0] = l0; + long l1 = (tmp[tmpIdx + 2] & MASK32_8) << 14; + l1 |= (tmp[tmpIdx + 3] & MASK32_10) << 4; + l1 |= (tmp[tmpIdx + 4] >>> 6) & MASK32_4; + longs[longsIdx + 1] = l1; + long l2 = (tmp[tmpIdx + 4] & MASK32_6) << 16; + l2 |= (tmp[tmpIdx + 5] & MASK32_10) << 6; + l2 |= (tmp[tmpIdx + 6] >>> 4) & MASK32_6; + longs[longsIdx + 2] = l2; + long l3 = (tmp[tmpIdx + 6] & MASK32_4) << 18; + l3 |= (tmp[tmpIdx + 7] & MASK32_10) << 8; + l3 |= (tmp[tmpIdx + 8] >>> 2) & MASK32_8; + longs[longsIdx + 3] = l3; + long l4 = (tmp[tmpIdx + 8] & MASK32_2) << 20; + l4 |= (tmp[tmpIdx + 9] & MASK32_10) << 10; + l4 |= (tmp[tmpIdx + 10] & MASK32_10) << 0; + longs[longsIdx + 4] = l4; + } + } + + private static void decode23(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 46); + shiftLongs(tmp, 46, longs, 0, 9, MASK32_23); + for (int iter = 0, tmpIdx = 0, longsIdx = 46; iter < 2; ++iter, tmpIdx += 23, longsIdx += 9) { + long l0 = (tmp[tmpIdx + 0] & MASK32_9) << 14; + l0 |= (tmp[tmpIdx + 1] & MASK32_9) << 5; + l0 |= (tmp[tmpIdx + 2] >>> 4) & MASK32_5; + longs[longsIdx + 0] = l0; + long l1 = (tmp[tmpIdx + 2] & MASK32_4) << 19; + l1 |= (tmp[tmpIdx + 3] & MASK32_9) << 10; + l1 |= (tmp[tmpIdx + 4] & MASK32_9) << 1; + l1 |= (tmp[tmpIdx + 5] >>> 8) & MASK32_1; + longs[longsIdx + 1] = l1; + long l2 = (tmp[tmpIdx + 5] & MASK32_8) << 15; + l2 |= (tmp[tmpIdx + 6] & MASK32_9) << 6; + l2 |= (tmp[tmpIdx + 7] >>> 3) & MASK32_6; + longs[longsIdx + 2] = l2; + long l3 = (tmp[tmpIdx + 7] & MASK32_3) << 20; + l3 |= (tmp[tmpIdx + 8] & MASK32_9) << 11; + l3 |= (tmp[tmpIdx + 9] & MASK32_9) << 2; + l3 |= (tmp[tmpIdx + 10] >>> 7) & MASK32_2; + longs[longsIdx + 3] = l3; + long l4 = (tmp[tmpIdx + 10] & MASK32_7) << 16; + l4 |= (tmp[tmpIdx + 11] & MASK32_9) << 7; + l4 |= (tmp[tmpIdx + 12] >>> 2) & MASK32_7; + longs[longsIdx + 4] = l4; + long l5 = (tmp[tmpIdx + 12] & MASK32_2) << 21; + l5 |= (tmp[tmpIdx + 13] & MASK32_9) << 12; + l5 |= (tmp[tmpIdx + 14] & MASK32_9) << 3; + l5 |= (tmp[tmpIdx + 15] >>> 6) & MASK32_3; + longs[longsIdx + 5] = l5; + long l6 = (tmp[tmpIdx + 15] & MASK32_6) << 17; + l6 |= (tmp[tmpIdx + 16] & MASK32_9) << 8; + l6 |= (tmp[tmpIdx + 17] >>> 1) & MASK32_8; + longs[longsIdx + 6] = l6; + long l7 = (tmp[tmpIdx + 17] & MASK32_1) << 22; + l7 |= (tmp[tmpIdx + 18] & MASK32_9) << 13; + l7 |= (tmp[tmpIdx + 19] & MASK32_9) << 4; + l7 |= (tmp[tmpIdx + 20] >>> 5) & MASK32_4; + longs[longsIdx + 7] = l7; + long l8 = (tmp[tmpIdx + 20] & MASK32_5) << 18; + l8 |= (tmp[tmpIdx + 21] & MASK32_9) << 9; + l8 |= (tmp[tmpIdx + 22] & MASK32_9) << 0; + longs[longsIdx + 8] = l8; + } + } + + private static void decode24(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 48); + shiftLongs(tmp, 48, longs, 0, 8, MASK32_24); + shiftLongs(tmp, 48, tmp, 0, 0, MASK32_8); + for (int iter = 0, tmpIdx = 0, longsIdx = 48; iter < 16; ++iter, tmpIdx += 3, longsIdx += 1) { + long l0 = tmp[tmpIdx + 0] << 16; + l0 |= tmp[tmpIdx + 1] << 8; + l0 |= tmp[tmpIdx + 2] << 0; + longs[longsIdx + 0] = l0; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/index/codec/postings/PForUtil.java b/server/src/main/java/org/elasticsearch/index/codec/postings/PForUtil.java new file mode 100644 index 0000000000000..26a600c73eeb5 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/codec/postings/PForUtil.java @@ -0,0 +1,323 @@ +/* + * @notice + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Modifications copyright (C) 2022 Elasticsearch B.V. + */ +package org.elasticsearch.index.codec.postings; + +import org.apache.lucene.store.DataInput; +import org.apache.lucene.store.DataOutput; +import org.apache.lucene.util.LongHeap; +import org.apache.lucene.util.packed.PackedInts; + +import java.io.IOException; +import java.util.Arrays; + +/** Utility class to encode sequences of 128 small positive integers. */ +final class PForUtil { + + private static final int MAX_EXCEPTIONS = 7; + private static final int HALF_BLOCK_SIZE = ForUtil.BLOCK_SIZE / 2; + + // IDENTITY_PLUS_ONE[i] == i + 1 + private static final long[] IDENTITY_PLUS_ONE = new long[ForUtil.BLOCK_SIZE]; + + static { + for (int i = 0; i < ForUtil.BLOCK_SIZE; ++i) { + IDENTITY_PLUS_ONE[i] = i + 1; + } + } + + static boolean allEqual(long[] l) { + for (int i = 1; i < ForUtil.BLOCK_SIZE; ++i) { + if (l[i] != l[0]) { + return false; + } + } + return true; + } + + private final ForUtil forUtil; + // buffer for reading exception data; each exception uses two bytes (pos + high-order bits of the + // exception) + private final byte[] exceptionBuff = new byte[MAX_EXCEPTIONS * 2]; + + PForUtil(ForUtil forUtil) { + assert ForUtil.BLOCK_SIZE <= 256 : "blocksize must fit in one byte. got " + ForUtil.BLOCK_SIZE; + this.forUtil = forUtil; + } + + /** Encode 128 integers from {@code longs} into {@code out}. */ + void encode(long[] longs, DataOutput out) throws IOException { + // Determine the top MAX_EXCEPTIONS + 1 values + final LongHeap top = new LongHeap(MAX_EXCEPTIONS + 1); + for (int i = 0; i <= MAX_EXCEPTIONS; ++i) { + top.push(longs[i]); + } + long topValue = top.top(); + for (int i = MAX_EXCEPTIONS + 1; i < ForUtil.BLOCK_SIZE; ++i) { + if (longs[i] > topValue) { + topValue = top.updateTop(longs[i]); + } + } + + long max = 0L; + for (int i = 1; i <= top.size(); ++i) { + max = Math.max(max, top.get(i)); + } + + final int maxBitsRequired = PackedInts.bitsRequired(max); + // We store the patch on a byte, so we can't decrease the number of bits required by more than 8 + final int patchedBitsRequired = Math.max(PackedInts.bitsRequired(topValue), maxBitsRequired - 8); + int numExceptions = 0; + final long maxUnpatchedValue = (1L << patchedBitsRequired) - 1; + for (int i = 2; i <= top.size(); ++i) { + if (top.get(i) > maxUnpatchedValue) { + numExceptions++; + } + } + final byte[] exceptions = new byte[numExceptions * 2]; + if (numExceptions > 0) { + int exceptionCount = 0; + for (int i = 0; i < ForUtil.BLOCK_SIZE; ++i) { + if (longs[i] > maxUnpatchedValue) { + exceptions[exceptionCount * 2] = (byte) i; + exceptions[exceptionCount * 2 + 1] = (byte) (longs[i] >>> patchedBitsRequired); + longs[i] &= maxUnpatchedValue; + exceptionCount++; + } + } + assert exceptionCount == numExceptions : exceptionCount + " " + numExceptions; + } + + if (allEqual(longs) && maxBitsRequired <= 8) { + for (int i = 0; i < numExceptions; ++i) { + exceptions[2 * i + 1] = (byte) (Byte.toUnsignedLong(exceptions[2 * i + 1]) << patchedBitsRequired); + } + out.writeByte((byte) (numExceptions << 5)); + out.writeVLong(longs[0]); + } else { + final int token = (numExceptions << 5) | patchedBitsRequired; + out.writeByte((byte) token); + forUtil.encode(longs, patchedBitsRequired, out); + } + out.writeBytes(exceptions, exceptions.length); + } + + /** Decode 128 integers into {@code longs}. */ + void decode(DataInput in, long[] longs) throws IOException { + final int token = Byte.toUnsignedInt(in.readByte()); + final int bitsPerValue = token & 0x1f; + final int numExceptions = token >>> 5; + if (bitsPerValue == 0) { + Arrays.fill(longs, 0, ForUtil.BLOCK_SIZE, in.readVLong()); + } else { + forUtil.decode(bitsPerValue, in, longs); + } + for (int i = 0; i < numExceptions; ++i) { + longs[Byte.toUnsignedInt(in.readByte())] |= Byte.toUnsignedLong(in.readByte()) << bitsPerValue; + } + } + + /** Decode deltas, compute the prefix sum and add {@code base} to all decoded longs. */ + void decodeAndPrefixSum(DataInput in, long base, long[] longs) throws IOException { + final int token = Byte.toUnsignedInt(in.readByte()); + final int bitsPerValue = token & 0x1f; + final int numExceptions = token >>> 5; + if (numExceptions == 0) { + // when there are no exceptions to apply, we can be a bit more efficient with our decoding + if (bitsPerValue == 0) { + // a bpv of zero indicates all delta values are the same + long val = in.readVLong(); + if (val == 1) { + // this will often be the common case when working with doc IDs, so we special-case it to + // be slightly more efficient + prefixSumOfOnes(longs, base); + } else { + prefixSumOf(longs, base, val); + } + } else { + // decode the deltas then apply the prefix sum logic + forUtil.decodeTo32(bitsPerValue, in, longs); + prefixSum32(longs, base); + } + } else { + // pack two values per long so we can apply prefixes two-at-a-time + if (bitsPerValue == 0) { + fillSameValue32(longs, in.readVLong()); + } else { + forUtil.decodeTo32(bitsPerValue, in, longs); + } + applyExceptions32(bitsPerValue, numExceptions, in, longs); + prefixSum32(longs, base); + } + } + + /** Skip 128 integers. */ + void skip(DataInput in) throws IOException { + final int token = Byte.toUnsignedInt(in.readByte()); + final int bitsPerValue = token & 0x1f; + final int numExceptions = token >>> 5; + if (bitsPerValue == 0) { + in.readVLong(); + in.skipBytes((numExceptions << 1)); + } else { + in.skipBytes(forUtil.numBytes(bitsPerValue) + (numExceptions << 1)); + } + } + + /** + * Fill {@code longs} with the final values for the case of all deltas being 1. Note this assumes + * there are no exceptions to apply. + */ + private static void prefixSumOfOnes(long[] longs, long base) { + System.arraycopy(IDENTITY_PLUS_ONE, 0, longs, 0, ForUtil.BLOCK_SIZE); + // This loop gets auto-vectorized + for (int i = 0; i < ForUtil.BLOCK_SIZE; ++i) { + longs[i] += base; + } + } + + /** + * Fill {@code longs} with the final values for the case of all deltas being {@code val}. Note + * this assumes there are no exceptions to apply. + */ + private static void prefixSumOf(long[] longs, long base, long val) { + for (int i = 0; i < ForUtil.BLOCK_SIZE; i++) { + longs[i] = (i + 1) * val + base; + } + } + + /** + * Fills the {@code longs} with the provided {@code val}, packed two values per long (using 32 + * bits per value). + */ + private static void fillSameValue32(long[] longs, long val) { + final long token = val << 32 | val; + Arrays.fill(longs, 0, HALF_BLOCK_SIZE, token); + } + + /** Apply the exceptions where the values are packed two-per-long in {@code longs}. */ + private void applyExceptions32(int bitsPerValue, int numExceptions, DataInput in, long[] longs) throws IOException { + in.readBytes(exceptionBuff, 0, numExceptions * 2); + for (int i = 0; i < numExceptions; ++i) { + final int exceptionPos = Byte.toUnsignedInt(exceptionBuff[i * 2]); + final long exception = Byte.toUnsignedLong(exceptionBuff[i * 2 + 1]); + // note that we pack two values per long, so the index is [0..63] for 128 values + final int idx = exceptionPos & 0x3f; // mod 64 + // we need to shift by 1) the bpv, and 2) 32 for positions [0..63] (and no 32 shift for + // [64..127]) + final int shift = bitsPerValue + ((1 ^ (exceptionPos >>> 6)) << 5); + longs[idx] |= exception << shift; + } + } + + /** Apply prefix sum logic where the values are packed two-per-long in {@code longs}. */ + private static void prefixSum32(long[] longs, long base) { + longs[0] += base << 32; + innerPrefixSum32(longs); + expand32(longs); + final long l = longs[HALF_BLOCK_SIZE - 1]; + for (int i = HALF_BLOCK_SIZE; i < ForUtil.BLOCK_SIZE; ++i) { + longs[i] += l; + } + } + + /** + * Expand the values packed two-per-long in {@code longs} into 128 individual long values stored + * back into {@code longs}. + */ + private static void expand32(long[] longs) { + for (int i = 0; i < 64; ++i) { + final long l = longs[i]; + longs[i] = l >>> 32; + longs[64 + i] = l & 0xFFFFFFFFL; + } + } + + /** + * Unrolled "inner" prefix sum logic where the values are packed two-per-long in {@code longs}. + * After this method, the final values will be correct for all high-order bits (values [0..63]) + * but a final prefix loop will still need to run to "correct" the values of [64..127] in the + * low-order bits, which need the 64th value added to all of them. + */ + private static void innerPrefixSum32(long[] longs) { + longs[1] += longs[0]; + longs[2] += longs[1]; + longs[3] += longs[2]; + longs[4] += longs[3]; + longs[5] += longs[4]; + longs[6] += longs[5]; + longs[7] += longs[6]; + longs[8] += longs[7]; + longs[9] += longs[8]; + longs[10] += longs[9]; + longs[11] += longs[10]; + longs[12] += longs[11]; + longs[13] += longs[12]; + longs[14] += longs[13]; + longs[15] += longs[14]; + longs[16] += longs[15]; + longs[17] += longs[16]; + longs[18] += longs[17]; + longs[19] += longs[18]; + longs[20] += longs[19]; + longs[21] += longs[20]; + longs[22] += longs[21]; + longs[23] += longs[22]; + longs[24] += longs[23]; + longs[25] += longs[24]; + longs[26] += longs[25]; + longs[27] += longs[26]; + longs[28] += longs[27]; + longs[29] += longs[28]; + longs[30] += longs[29]; + longs[31] += longs[30]; + longs[32] += longs[31]; + longs[33] += longs[32]; + longs[34] += longs[33]; + longs[35] += longs[34]; + longs[36] += longs[35]; + longs[37] += longs[36]; + longs[38] += longs[37]; + longs[39] += longs[38]; + longs[40] += longs[39]; + longs[41] += longs[40]; + longs[42] += longs[41]; + longs[43] += longs[42]; + longs[44] += longs[43]; + longs[45] += longs[44]; + longs[46] += longs[45]; + longs[47] += longs[46]; + longs[48] += longs[47]; + longs[49] += longs[48]; + longs[50] += longs[49]; + longs[51] += longs[50]; + longs[52] += longs[51]; + longs[53] += longs[52]; + longs[54] += longs[53]; + longs[55] += longs[54]; + longs[56] += longs[55]; + longs[57] += longs[56]; + longs[58] += longs[57]; + longs[59] += longs[58]; + longs[60] += longs[59]; + longs[61] += longs[60]; + longs[62] += longs[61]; + longs[63] += longs[62]; + } +} diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index 43437529cd301..9b9cf8ad35c04 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -33,6 +33,8 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.SubscribableListener; +import org.elasticsearch.cluster.service.ClusterApplierService; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.lucene.Lucene; @@ -43,8 +45,10 @@ import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.common.util.concurrent.UncategorizedExecutionException; +import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.CheckedRunnable; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; import org.elasticsearch.core.TimeValue; @@ -91,7 +95,6 @@ import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.Consumer; import java.util.function.Function; @@ -123,10 +126,13 @@ public abstract class Engine implements Closeable { private final CountDownLatch closedLatch = new CountDownLatch(1); protected final EventListener eventListener; protected final ReentrantLock failEngineLock = new ReentrantLock(); - protected final ReentrantReadWriteLock rwl = new ReentrantReadWriteLock(); - protected final ReleasableLock readLock = new ReleasableLock(rwl.readLock()); - protected final ReleasableLock writeLock = new ReleasableLock(rwl.writeLock()); protected final SetOnce failedEngine = new SetOnce<>(); + + private final AtomicBoolean isClosing = new AtomicBoolean(); + private final SubscribableListener drainOnCloseListener = new SubscribableListener<>(); + private final RefCounted ensureOpenRefs = AbstractRefCounted.of(() -> drainOnCloseListener.onResponse(null)); + private final Releasable releaseEnsureOpenRef = ensureOpenRefs::decRef; // reuse this to avoid allocation for each op + /* * on {@code lastWriteNanos} we use System.nanoTime() to initialize this since: * - we use the value for figuring out if the shard / engine is active so if we startup and no write has happened yet we still @@ -1160,7 +1166,19 @@ public void flush(boolean force, boolean waitIfOngoing) throws EngineException { * request is detected, no flush will have occurred and the listener will be completed with a marker * indicating no flush and unknown generation. */ - public abstract void flush(boolean force, boolean waitIfOngoing, ActionListener listener) throws EngineException; + public final void flush(boolean force, boolean waitIfOngoing, ActionListener listener) throws EngineException { + try (var ignored = acquireEnsureOpenRef()) { + flushHoldingLock(force, waitIfOngoing, listener); + } + } + + /** + * The actual implementation of {@link #flush(boolean, boolean, ActionListener)}, to be called either when holding a ref that ensures + * the engine remains open, or holding {@code IndexShard#engineMutex} while closing the engine. + * + */ + protected abstract void flushHoldingLock(boolean force, boolean waitIfOngoing, ActionListener listener) + throws EngineException; /** * Flushes the state of the engine including the transaction log, clearing memory and persisting @@ -1853,34 +1871,82 @@ public void close() { } /** - * Method to close the engine while the write lock is held. - * Must decrement the supplied when closing work is done and resources are - * freed. + * Closes the engine without acquiring any refs or locks. The caller should either have changed {@link #isClosing} from {@code false} to + * {@code true} or else must hold the {@link #failEngineLock}. The implementation must decrement the supplied latch when done. */ protected abstract void closeNoLock(String reason, CountDownLatch closedLatch); + protected final boolean isDrainedForClose() { + return ensureOpenRefs.hasReferences() == false; + } + + protected final boolean isClosing() { + return isClosing.get(); + } + + protected final Releasable acquireEnsureOpenRef() { + if (isClosing() || ensureOpenRefs.tryIncRef() == false) { + ensureOpen(); // throws "engine is closed" exception if we're actually closed, otherwise ... + throw new AlreadyClosedException(shardId + " engine is closing", failedEngine.get()); + } + return Releasables.assertOnce(releaseEnsureOpenRef); + } + /** - * Flush the engine (committing segments to disk and truncating the - * translog) and close it. + * When called for the first time, puts the engine into a closing state in which further calls to {@link #acquireEnsureOpenRef()} will + * fail with an {@link AlreadyClosedException} and waits for all outstanding ensure-open refs to be released, before returning {@code + * true}. If called again, returns {@code false} without waiting. + * + * @return a flag indicating whether this was the first call or not. + */ + private boolean drainForClose() { + if (isClosing.compareAndSet(false, true) == false) { + logger.trace("drainForClose(): already closing"); + return false; + } + + logger.debug("drainForClose(): draining ops"); + releaseEnsureOpenRef.close(); + final var future = new PlainActionFuture() { + @Override + protected boolean blockingAllowed() { + // TODO remove this blocking, or at least do it elsewhere, see https://github.com/elastic/elasticsearch/issues/89821 + return Thread.currentThread().getName().contains(ClusterApplierService.CLUSTER_UPDATE_THREAD_NAME) + || super.blockingAllowed(); + } + }; + drainOnCloseListener.addListener(future); + try { + future.get(); + return true; + } catch (ExecutionException e) { + logger.error("failure while draining operations on close", e); + assert false : e; + throw new IllegalStateException(e); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + logger.error("interrupted while draining operations on close"); + throw new IllegalStateException(e); + } + } + + /** + * Flush the engine (committing segments to disk and truncating the translog) and close it. */ public void flushAndClose() throws IOException { - if (isClosed.get() == false) { - logger.trace("flushAndClose now acquire writeLock"); - try (ReleasableLock lock = writeLock.acquire()) { - logger.trace("flushAndClose now acquired writeLock"); + logger.trace("flushAndClose() maybe draining ops"); + if (isClosed.get() == false && drainForClose()) { + logger.trace("flushAndClose drained ops"); + try { + logger.debug("flushing shard on close - this might take some time to sync files to disk"); try { - logger.debug("flushing shard on close - this might take some time to sync files to disk"); - try { - // TODO we might force a flush in the future since we have the write lock already even though recoveries - // are running. - // TODO: We are not waiting for full durability here atm because we are on the cluster state update thread - flush(false, false, ActionListener.noop()); - } catch (AlreadyClosedException ex) { - logger.debug("engine already closed - skipping flushAndClose"); - } - } finally { - close(); // double close is not a problem + // TODO: We are not waiting for full durability here atm because we are on the cluster state update thread + flushHoldingLock(false, false, ActionListener.noop()); + } catch (AlreadyClosedException ex) { + logger.debug("engine already closed - skipping flushAndClose"); } + } finally { + closeNoLock("flushAndClose", closedLatch); } } awaitPendingClose(); @@ -1888,12 +1954,10 @@ public void flushAndClose() throws IOException { @Override public void close() throws IOException { - if (isClosed.get() == false) { // don't acquire the write lock if we are already closed - logger.debug("close now acquiring writeLock"); - try (ReleasableLock lock = writeLock.acquire()) { - logger.debug("close acquired writeLock"); - closeNoLock("api", closedLatch); - } + logger.debug("close() maybe draining ops"); + if (isClosed.get() == false && drainForClose()) { + logger.debug("close drained ops"); + closeNoLock("api", closedLatch); } awaitPendingClose(); } diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 58a3c02316430..08fc9e55fd408 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -58,7 +58,6 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.AsyncIOProcessor; import org.elasticsearch.common.util.concurrent.KeyedLock; -import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Assertions; import org.elasticsearch.core.Booleans; @@ -516,8 +515,7 @@ final boolean assertSearcherIsWarmedUp(String source, SearcherScope scope) { @Override public int restoreLocalHistoryFromTranslog(TranslogRecoveryRunner translogRecoveryRunner) throws IOException { - try (ReleasableLock ignored = readLock.acquire()) { - ensureOpen(); + try (var ignored = acquireEnsureOpenRef()) { final long localCheckpoint = localCheckpointTracker.getProcessedCheckpoint(); try (Translog.Snapshot snapshot = getTranslog().newSnapshot(localCheckpoint + 1, Long.MAX_VALUE)) { return translogRecoveryRunner.run(this, snapshot); @@ -527,8 +525,7 @@ public int restoreLocalHistoryFromTranslog(TranslogRecoveryRunner translogRecove @Override public int fillSeqNoGaps(long primaryTerm) throws IOException { - try (ReleasableLock ignored = readLock.acquire()) { - ensureOpen(); + try (var ignored = acquireEnsureOpenRef()) { final long localCheckpoint = localCheckpointTracker.getProcessedCheckpoint(); final long maxSeqNo = localCheckpointTracker.getMaxSeqNo(); int numNoOpsAdded = 0; @@ -568,8 +565,7 @@ private void bootstrapAppendOnlyInfoFromWriter(IndexWriter writer) { @Override public void recoverFromTranslog(TranslogRecoveryRunner translogRecoveryRunner, long recoverUpToSeqNo, ActionListener listener) { ActionListener.run(listener, l -> { - try (ReleasableLock lock = readLock.acquire()) { - ensureOpen(); + try (var ignored = acquireEnsureOpenRef()) { if (pendingTranslogRecovery.get() == false) { throw new IllegalStateException("Engine has already been recovered"); } @@ -840,8 +836,7 @@ public GetResult get( Function searcherWrapper ) { assert assertGetUsesIdField(get); - try (ReleasableLock ignored = readLock.acquire()) { - ensureOpen(); + try (var ignored = acquireEnsureOpenRef()) { if (get.realtime()) { var result = realtimeGetUnderLock(get, mappingLookup, documentParser, searcherWrapper, true); assert result != null : "real-time get result must not be null"; @@ -861,8 +856,7 @@ public GetResult getFromTranslog( Function searcherWrapper ) { assert assertGetUsesIdField(get); - try (ReleasableLock ignored = readLock.acquire()) { - ensureOpen(); + try (var ignored = acquireEnsureOpenRef()) { return realtimeGetUnderLock(get, mappingLookup, documentParser, searcherWrapper, false); } } @@ -878,71 +872,75 @@ protected GetResult realtimeGetUnderLock( Function searcherWrapper, boolean getFromSearcher ) { - assert readLock.isHeldByCurrentThread(); + assert isDrainedForClose() == false; assert get.realtime(); final VersionValue versionValue; try (Releasable ignore = versionMap.acquireLock(get.uid().bytes())) { // we need to lock here to access the version map to do this truly in RT versionValue = getVersionFromMap(get.uid().bytes()); } - boolean getFromSearcherIfNotInTranslog = getFromSearcher; - if (versionValue != null) { - /* - * Once we've seen the ID in the live version map, in two cases it is still possible not to - * be able to follow up with serving the get from the translog: - * 1. It is possible that once attempt handling the get, we won't see the doc in the translog - * since it might have been moved out. - * TODO: ideally we should keep around translog entries long enough to cover this case - * 2. We might not be tracking translog locations in the live version map (see @link{trackTranslogLocation}) - * - * In these cases, we should always fall back to get the doc from the internal searcher. - */ + try { + boolean getFromSearcherIfNotInTranslog = getFromSearcher; + if (versionValue != null) { + /* + * Once we've seen the ID in the live version map, in two cases it is still possible not to + * be able to follow up with serving the get from the translog: + * 1. It is possible that once attempt handling the get, we won't see the doc in the translog + * since it might have been moved out. + * TODO: ideally we should keep around translog entries long enough to cover this case + * 2. We might not be tracking translog locations in the live version map (see @link{trackTranslogLocation}) + * + * In these cases, we should always fall back to get the doc from the internal searcher. + */ - getFromSearcherIfNotInTranslog = true; - if (versionValue.isDelete()) { - return GetResult.NOT_EXISTS; - } - if (get.versionType().isVersionConflictForReads(versionValue.version, get.version())) { - throw new VersionConflictEngineException( - shardId, - "[" + get.id() + "]", - get.versionType().explainConflictForReads(versionValue.version, get.version()) - ); - } - if (get.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO - && (get.getIfSeqNo() != versionValue.seqNo || get.getIfPrimaryTerm() != versionValue.term)) { - throw new VersionConflictEngineException( - shardId, - get.id(), - get.getIfSeqNo(), - get.getIfPrimaryTerm(), - versionValue.seqNo, - versionValue.term - ); - } - if (get.isReadFromTranslog()) { - if (versionValue.getLocation() != null) { - try { - final Translog.Operation operation = translog.readOperation(versionValue.getLocation()); - if (operation != null) { - return getFromTranslog(get, (Translog.Index) operation, mappingLookup, documentParser, searcherWrapper); + getFromSearcherIfNotInTranslog = true; + if (versionValue.isDelete()) { + return GetResult.NOT_EXISTS; + } + if (get.versionType().isVersionConflictForReads(versionValue.version, get.version())) { + throw new VersionConflictEngineException( + shardId, + "[" + get.id() + "]", + get.versionType().explainConflictForReads(versionValue.version, get.version()) + ); + } + if (get.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO + && (get.getIfSeqNo() != versionValue.seqNo || get.getIfPrimaryTerm() != versionValue.term)) { + throw new VersionConflictEngineException( + shardId, + get.id(), + get.getIfSeqNo(), + get.getIfPrimaryTerm(), + versionValue.seqNo, + versionValue.term + ); + } + if (get.isReadFromTranslog()) { + if (versionValue.getLocation() != null) { + try { + final Translog.Operation operation = translog.readOperation(versionValue.getLocation()); + if (operation != null) { + return getFromTranslog(get, (Translog.Index) operation, mappingLookup, documentParser, searcherWrapper); + } + } catch (IOException e) { + maybeFailEngine("realtime_get", e); // lets check if the translog has failed with a tragic event + throw new EngineException(shardId, "failed to read operation from translog", e); } - } catch (IOException e) { - maybeFailEngine("realtime_get", e); // lets check if the translog has failed with a tragic event - throw new EngineException(shardId, "failed to read operation from translog", e); + } else { + // We need to start tracking translog locations in the live version map. + trackTranslogLocation.set(true); } - } else { - // We need to start tracking translog locations in the live version map. - trackTranslogLocation.set(true); } + assert versionValue.seqNo >= 0 : versionValue; + refreshIfNeeded(REAL_TIME_GET_REFRESH_SOURCE, versionValue.seqNo); } - assert versionValue.seqNo >= 0 : versionValue; - refreshIfNeeded(REAL_TIME_GET_REFRESH_SOURCE, versionValue.seqNo); - } - if (getFromSearcherIfNotInTranslog) { - return getFromSearcher(get, acquireSearcher("realtime_get", SearcherScope.INTERNAL, searcherWrapper), false); + if (getFromSearcherIfNotInTranslog) { + return getFromSearcher(get, acquireSearcher("realtime_get", SearcherScope.INTERNAL, searcherWrapper), false); + } + return null; + } finally { + assert isDrainedForClose() == false; } - return null; } /** @@ -1140,8 +1138,7 @@ long doGenerateSeqNoForOperation(final Operation operation) { public IndexResult index(Index index) throws IOException { assert Objects.equals(index.uid().field(), IdFieldMapper.NAME) : index.uid().field(); final boolean doThrottle = index.origin().isRecovery() == false; - try (ReleasableLock releasableLock = readLock.acquire()) { - ensureOpen(); + try (var ignored1 = acquireEnsureOpenRef()) { assert assertIncomingSequenceNumber(index.origin(), index.seqNo()); int reservedDocs = 0; try ( @@ -1607,8 +1604,7 @@ public DeleteResult delete(Delete delete) throws IOException { final DeleteResult deleteResult; int reservedDocs = 0; // NOTE: we don't throttle this when merges fall behind because delete-by-id does not create new segments: - try (ReleasableLock ignored = readLock.acquire(); Releasable ignored2 = versionMap.acquireLock(delete.uid().bytes())) { - ensureOpen(); + try (var ignored = acquireEnsureOpenRef(); Releasable ignored2 = versionMap.acquireLock(delete.uid().bytes())) { lastWriteNanos = delete.startTime(); final DeletionStrategy plan = deletionStrategyForOperation(delete); reservedDocs = plan.reservedDocs; @@ -1935,8 +1931,7 @@ public void maybePruneDeletes() { @Override public NoOpResult noOp(final NoOp noOp) throws IOException { final NoOpResult noOpResult; - try (ReleasableLock ignored = readLock.acquire()) { - ensureOpen(); + try (var ignored = acquireEnsureOpenRef()) { noOpResult = innerNoOp(noOp); } catch (final Exception e) { try { @@ -1950,7 +1945,7 @@ public NoOpResult noOp(final NoOp noOp) throws IOException { } private NoOpResult innerNoOp(final NoOp noOp) throws IOException { - assert readLock.isHeldByCurrentThread(); + assert isDrainedForClose() == false; assert noOp.seqNo() > SequenceNumbers.NO_OPS_PERFORMED; final long seqNo = noOp.seqNo(); try (Releasable ignored = noOpKeyedLock.acquire(seqNo)) { @@ -2005,6 +2000,8 @@ private NoOpResult innerNoOp(final NoOp noOp) throws IOException { noOpResult.setTook(System.nanoTime() - noOp.startTime()); noOpResult.freeze(); return noOpResult; + } finally { + assert isDrainedForClose() == false; } } @@ -2179,8 +2176,8 @@ private boolean shouldPeriodicallyFlush(long flushThresholdSizeInBytes, long flu } @Override - public void flush(boolean force, boolean waitIfOngoing, ActionListener listener) throws EngineException { - ensureOpen(); + protected void flushHoldingLock(boolean force, boolean waitIfOngoing, ActionListener listener) throws EngineException { + ensureOpen(); // best-effort, a concurrent failEngine() can still happen but that's ok if (force && waitIfOngoing == false) { assert false : "wait_if_ongoing must be true for a force flush: force=" + force + " wait_if_ongoing=" + waitIfOngoing; throw new IllegalArgumentException( @@ -2188,77 +2185,75 @@ public void flush(boolean force, boolean waitIfOngoing, ActionListener Long.parseLong( - lastCommittedSegmentInfos.userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY) - )) { - ensureCanFlush(); - Translog.Location commitLocation = getTranslogLastWriteLocation(); - try { - translog.rollGeneration(); - logger.trace("starting commit for flush; commitTranslog=true"); - long lastFlushTimestamp = relativeTimeInNanosSupplier.getAsLong(); - // Pre-emptively recording the upcoming segment generation so that the live version map archive records - // the correct segment generation for doc IDs that go to the archive while a flush is happening. Otherwise, - // if right after committing the IndexWriter new docs get indexed/updated and a refresh moves them to the archive, - // we clear them from the archive once we see that segment generation on the search shards, but those changes - // were not included in the commit since they happened right after it. - preCommitSegmentGeneration.set(lastCommittedSegmentInfos.getGeneration() + 1); - commitIndexWriter(indexWriter, translog); - logger.trace("finished commit for flush"); - // we need to refresh in order to clear older version values - refresh("version_table_flush", SearcherScope.INTERNAL, true); - translog.trimUnreferencedReaders(); - // Use the timestamp from when the flush started, but only update it in case of success, so that any exception in - // the above lines would not lead the engine to think that it recently flushed, when it did not. - this.lastFlushTimestamp = lastFlushTimestamp; - } catch (AlreadyClosedException e) { - failOnTragicEvent(e); - throw e; - } catch (Exception e) { - throw new FlushFailedEngineException(shardId, e); - } - refreshLastCommittedSegmentInfos(); - generation = lastCommittedSegmentInfos.getGeneration(); - flushListener.afterFlush(generation, commitLocation); - } else { - generation = lastCommittedSegmentInfos.getGeneration(); + try { + // Only flush if (1) Lucene has uncommitted docs, or (2) forced by caller, or (3) the + // newly created commit points to a different translog generation (can free translog), + // or (4) the local checkpoint information in the last commit is stale, which slows down future recoveries. + boolean hasUncommittedChanges = hasUncommittedChanges(); + if (hasUncommittedChanges + || force + || shouldPeriodicallyFlush() + || getProcessedLocalCheckpoint() > Long.parseLong( + lastCommittedSegmentInfos.userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY) + )) { + ensureCanFlush(); + Translog.Location commitLocation = getTranslogLastWriteLocation(); + try { + translog.rollGeneration(); + logger.trace("starting commit for flush; commitTranslog=true"); + long lastFlushTimestamp = relativeTimeInNanosSupplier.getAsLong(); + // Pre-emptively recording the upcoming segment generation so that the live version map archive records + // the correct segment generation for doc IDs that go to the archive while a flush is happening. Otherwise, + // if right after committing the IndexWriter new docs get indexed/updated and a refresh moves them to the archive, + // we clear them from the archive once we see that segment generation on the search shards, but those changes + // were not included in the commit since they happened right after it. + preCommitSegmentGeneration.set(lastCommittedSegmentInfos.getGeneration() + 1); + commitIndexWriter(indexWriter, translog); + logger.trace("finished commit for flush"); + // we need to refresh in order to clear older version values + refresh("version_table_flush", SearcherScope.INTERNAL, true); + translog.trimUnreferencedReaders(); + // Use the timestamp from when the flush started, but only update it in case of success, so that any exception in + // the above lines would not lead the engine to think that it recently flushed, when it did not. + this.lastFlushTimestamp = lastFlushTimestamp; + } catch (AlreadyClosedException e) { + failOnTragicEvent(e); + throw e; + } catch (Exception e) { + throw new FlushFailedEngineException(shardId, e); } - } catch (FlushFailedEngineException ex) { - maybeFailEngine("flush", ex); - listener.onFailure(ex); - return; - } catch (Exception e) { - listener.onFailure(e); - return; - } finally { - flushLock.unlock(); - logger.trace("released flush lock"); + refreshLastCommittedSegmentInfos(); + generation = lastCommittedSegmentInfos.getGeneration(); + flushListener.afterFlush(generation, commitLocation); + } else { + generation = lastCommittedSegmentInfos.getGeneration(); } + } catch (FlushFailedEngineException ex) { + maybeFailEngine("flush", ex); + listener.onFailure(ex); + return; + } catch (Exception e) { + listener.onFailure(e); + return; + } finally { + flushLock.unlock(); + logger.trace("released flush lock"); } + // We don't have to do this here; we do it defensively to make sure that even if wall clock time is misbehaving // (e.g., moves backwards) we will at least still sometimes prune deleted tombstones: if (engineConfig.isEnableGcDeletes()) { @@ -2297,8 +2292,7 @@ private void refreshLastCommittedSegmentInfos() { @Override public void rollTranslogGeneration() throws EngineException { - try (ReleasableLock ignored = readLock.acquire()) { - ensureOpen(); + try (var ignored = acquireEnsureOpenRef()) { translog.rollGeneration(); translog.trimUnreferencedReaders(); } catch (AlreadyClosedException e) { @@ -2316,8 +2310,7 @@ public void rollTranslogGeneration() throws EngineException { @Override public void trimUnreferencedTranslogFiles() throws EngineException { - try (ReleasableLock lock = readLock.acquire()) { - ensureOpen(); + try (var ignored = acquireEnsureOpenRef()) { translog.trimUnreferencedReaders(); } catch (AlreadyClosedException e) { failOnTragicEvent(e); @@ -2339,8 +2332,7 @@ public boolean shouldRollTranslogGeneration() { @Override public void trimOperationsFromTranslog(long belowTerm, long aboveSeqNo) throws EngineException { - try (ReleasableLock lock = readLock.acquire()) { - ensureOpen(); + try (var ignored = acquireEnsureOpenRef()) { translog.trimOperations(belowTerm, aboveSeqNo); } catch (AlreadyClosedException e) { failOnTragicEvent(e); @@ -2526,7 +2518,8 @@ private boolean failOnTragicEvent(AlreadyClosedException ex) { } else if (translog.isOpen() == false && translog.getTragicException() != null) { failEngine("already closed by tragic event on the translog", translog.getTragicException()); engineFailed = true; - } else if (failedEngine.get() == null && isClosed.get() == false) { // we are closed but the engine is not failed yet? + } else if (failedEngine.get() == null && isClosing() == false && isClosed.get() == false) { + // we are closed but the engine is not failed yet? // this smells like a bug - we only expect ACE if we are in a fatal case ie. either translog or IW is closed by // a tragic event or has closed itself. if that is not the case we are in a buggy state and raise an assertion error throw new AssertionError("Unexpected AlreadyClosedException", ex); @@ -2578,7 +2571,7 @@ public long getIndexBufferRAMBytesUsed() { @Override public List segments() { - try (ReleasableLock lock = readLock.acquire()) { + try (var ignored = acquireEnsureOpenRef()) { Segment[] segmentsArr = getSegmentInfo(lastCommittedSegmentInfos); // fill in the merges flag @@ -2597,16 +2590,11 @@ public List segments() { } } - /** - * Closes the engine without acquiring the write lock. This should only be - * called while the write lock is hold or in a disaster condition ie. if the engine - * is failed. - */ @Override protected final void closeNoLock(String reason, CountDownLatch closedLatch) { if (isClosed.compareAndSet(false, true)) { - assert rwl.isWriteLockedByCurrentThread() || failEngineLock.isHeldByCurrentThread() - : "Either the write lock must be held or the engine must be currently be failing itself"; + assert isDrainedForClose() || failEngineLock.isHeldByCurrentThread() + : "Either all operations must have been drained or the engine must be currently be failing itself"; try { this.versionMap.clear(); if (internalReaderManager != null) { diff --git a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java index ef0901bc17712..1cee2a90ec3f1 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java +++ b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java @@ -64,11 +64,18 @@ public static final class VersionLookup { // Modifies the map of this instance by merging with the given VersionLookup public void merge(VersionLookup versionLookup) { + long existingEntriesSize = 0; + for (var entry : versionLookup.map.entrySet()) { + var existingValue = map.get(entry.getKey()); + existingEntriesSize += existingValue == null ? 0 : mapEntryBytesUsed(entry.getKey(), existingValue); + } map.putAll(versionLookup.map); + adjustRam(versionLookup.ramBytesUsed() - existingEntriesSize); minDeleteTimestamp.accumulateAndGet(versionLookup.minDeleteTimestamp(), Math::min); } - private VersionLookup(Map map) { + // Visible for testing + VersionLookup(Map map) { this.map = map; } @@ -77,7 +84,11 @@ public VersionValue get(BytesRef key) { } VersionValue put(BytesRef key, VersionValue value) { - return map.put(key, value); + long ramAccounting = mapEntryBytesUsed(key, value); + VersionValue previousValue = map.put(key, value); + ramAccounting += previousValue == null ? 0 : -mapEntryBytesUsed(key, previousValue); + adjustRam(ramAccounting); + return previousValue; } public boolean isEmpty() { @@ -96,8 +107,12 @@ void markAsUnsafe() { unsafe = true; } - public VersionValue remove(BytesRef uid) { - return map.remove(uid); + VersionValue remove(BytesRef uid) { + VersionValue previousValue = map.remove(uid); + if (previousValue != null) { + adjustRam(-mapEntryBytesUsed(uid, previousValue)); + } + return previousValue; } public void updateMinDeletedTimestamp(DeleteVersionValue delete) { @@ -107,6 +122,26 @@ public void updateMinDeletedTimestamp(DeleteVersionValue delete) { public long minDeleteTimestamp() { return minDeleteTimestamp.get(); } + + void adjustRam(long value) { + if (value != 0) { + long v = ramBytesUsed.addAndGet(value); + assert v >= 0 : "bytes=" + v; + } + } + + public long ramBytesUsed() { + return ramBytesUsed.get(); + } + + public static long mapEntryBytesUsed(BytesRef key, VersionValue value) { + return (BASE_BYTES_PER_BYTESREF + key.bytes.length) + (BASE_BYTES_PER_CHM_ENTRY + value.ramBytesUsed()); + } + + // Used only for testing + Map getMap() { + return map; + } } private static final class Maps { @@ -170,27 +205,12 @@ Maps invalidateOldMap(LiveVersionMapArchive archive) { } void put(BytesRef uid, VersionValue version) { - long uidRAMBytesUsed = BASE_BYTES_PER_BYTESREF + uid.bytes.length; - long ramAccounting = BASE_BYTES_PER_CHM_ENTRY + version.ramBytesUsed() + uidRAMBytesUsed; - VersionValue previousValue = current.put(uid, version); - ramAccounting += previousValue == null ? 0 : -(BASE_BYTES_PER_CHM_ENTRY + previousValue.ramBytesUsed() + uidRAMBytesUsed); - adjustRam(ramAccounting); - } - - void adjustRam(long value) { - if (value != 0) { - long v = current.ramBytesUsed.addAndGet(value); - assert v >= 0 : "bytes=" + v; - } + current.put(uid, version); } void remove(BytesRef uid, DeleteVersionValue deleted) { - VersionValue previousValue = current.remove(uid); + current.remove(uid); current.updateMinDeletedTimestamp(deleted); - if (previousValue != null) { - long uidRAMBytesUsed = BASE_BYTES_PER_BYTESREF + uid.bytes.length; - adjustRam(-(BASE_BYTES_PER_CHM_ENTRY + previousValue.ramBytesUsed() + uidRAMBytesUsed)); - } if (old != VersionLookup.EMPTY) { // we also need to remove it from the old map here to make sure we don't read this stale value while // we are in the middle of a refresh. Most of the time the old map is an empty map so we can skip it there. @@ -452,7 +472,7 @@ synchronized void clear() { @Override public long ramBytesUsed() { - return maps.ramBytesUsed() + ramBytesUsedTombstones.get(); + return maps.ramBytesUsed() + ramBytesUsedTombstones.get() + ramBytesUsedForArchive(); } /** @@ -463,6 +483,13 @@ long ramBytesUsedForRefresh() { return maps.current.ramBytesUsed.get(); } + /** + * Returns how much RAM would be freed up by cleaning out the LiveVersionMapArchive. + */ + long ramBytesUsedForArchive() { + return archive.getMemoryBytesUsed(); + } + /** * Returns how much RAM is current being freed up by refreshing. This is the RAM usage of the previous version map that needs to stay * around until operations are safely recorded in the Lucene index. diff --git a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMapArchive.java b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMapArchive.java index a68a1cea368d4..9ccbf6ac16fed 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMapArchive.java +++ b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMapArchive.java @@ -39,6 +39,14 @@ default boolean isUnsafe() { return false; } + /** + * Returns how much memory is currently being used by the archive and would be freed up after + * unpromotables are refreshed. + */ + default long getMemoryBytesUsed() { + return 0L; + } + LiveVersionMapArchive NOOP_ARCHIVE = new LiveVersionMapArchive() { @Override public void afterRefresh(LiveVersionMap.VersionLookup old) {} diff --git a/server/src/main/java/org/elasticsearch/index/engine/NoOpEngine.java b/server/src/main/java/org/elasticsearch/index/engine/NoOpEngine.java index e8ae9d605b3f6..a666138492a20 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/NoOpEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/NoOpEngine.java @@ -16,7 +16,6 @@ import org.apache.lucene.index.SegmentReader; import org.apache.lucene.store.Directory; import org.elasticsearch.common.lucene.Lucene; -import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.seqno.SequenceNumbers; @@ -147,8 +146,7 @@ public DocsStats docStats() { public void trimUnreferencedTranslogFiles() { final Store store = this.engineConfig.getStore(); store.incRef(); - try (ReleasableLock lock = readLock.acquire()) { - ensureOpen(); + try (var ignored = acquireEnsureOpenRef()) { final List commits = DirectoryReader.listCommits(store.directory()); if (commits.size() == 1 && translogStats.getTranslogSizeInBytes() > translogStats.getUncommittedSizeInBytes()) { final Map commitUserData = getLastCommittedSegmentInfos().getUserData(); diff --git a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java index aa9bddf414296..7d5410cf488d7 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java @@ -23,7 +23,6 @@ import org.elasticsearch.common.hash.MessageDigests; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; -import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.IndexVersion; @@ -450,7 +449,7 @@ public boolean shouldPeriodicallyFlush() { } @Override - public void flush(boolean force, boolean waitIfOngoing, ActionListener listener) throws EngineException { + protected void flushHoldingLock(boolean force, boolean waitIfOngoing, ActionListener listener) throws EngineException { listener.onResponse(new FlushResult(true, lastCommittedSegmentInfos.getGeneration())); } @@ -527,8 +526,7 @@ public void recoverFromTranslog( ActionListener listener ) { ActionListener.run(listener, l -> { - try (ReleasableLock lock = readLock.acquire()) { - ensureOpen(); + try (var ignored = acquireEnsureOpenRef()) { try { translogRecoveryRunner.run(this, Translog.Snapshot.EMPTY); } catch (final Exception e) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java index 097a59c7fc1c7..187d59a88e2fd 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java @@ -87,6 +87,7 @@ private static XContentParser wrapObject(Object sourceMap) throws IOException { public abstract static class AbstractGeometryFieldType extends MappedFieldType { protected final Parser geometryParser; + protected final T nullValue; protected AbstractGeometryFieldType( String name, @@ -94,9 +95,11 @@ protected AbstractGeometryFieldType( boolean stored, boolean hasDocValues, Parser geometryParser, + T nullValue, Map meta ) { super(name, indexed, stored, hasDocValues, TextSearchInfo.NONE, meta); + this.nullValue = nullValue; this.geometryParser = geometryParser; } @@ -127,7 +130,7 @@ protected Object parseSourceValue(Object value) { public ValueFetcher valueFetcher(Set sourcePaths, Object nullValue, String format) { Function, List> formatter = getFormatter(format != null ? format : GeometryFormatterFactory.GEOJSON); - return new ArraySourceValueFetcher(sourcePaths, nullValue) { + return new ArraySourceValueFetcher(sourcePaths, nullValueAsSource(nullValue)) { @Override protected Object parseSourceValue(Object value) { final List values = new ArrayList<>(); @@ -136,6 +139,8 @@ protected Object parseSourceValue(Object value) { } }; } + + protected abstract Object nullValueAsSource(Object nullValue); } private final Explicit ignoreMalformed; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/AbstractPointGeometryFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/AbstractPointGeometryFieldMapper.java index 2434cad2ea996..031b67c263505 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/AbstractPointGeometryFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/AbstractPointGeometryFieldMapper.java @@ -10,11 +10,14 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.TriFunction; +import org.elasticsearch.common.geo.GeometryFormatterFactory; +import org.elasticsearch.common.geo.SpatialPoint; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; +import java.util.Map; import java.util.Objects; import java.util.function.Consumer; import java.util.function.Function; @@ -155,4 +158,36 @@ private void parseAndConsumeFromObject( } } } + + public abstract static class AbstractPointFieldType extends AbstractGeometryFieldType { + + protected AbstractPointFieldType( + String name, + boolean indexed, + boolean stored, + boolean hasDocValues, + Parser geometryParser, + T nullValue, + Map meta + ) { + super(name, indexed, stored, hasDocValues, geometryParser, nullValue, meta); + } + + @Override + protected Object nullValueAsSource(Object nullValue) { + if (nullValue == null) { + return null; + } + SpatialPoint point = (SpatialPoint) nullValue; + return point.toWKT(); + } + + @Override + public BlockLoader blockLoader(BlockLoaderContext blContext) { + // Currently we can only load from source in ESQL + ValueFetcher fetcher = valueFetcher(blContext.sourcePaths(name()), nullValue, GeometryFormatterFactory.WKB); + // TODO consider optimization using BlockSourceReader.lookupFromFieldNames(blContext.fieldNames(), name()) + return new BlockSourceReader.GeometriesBlockLoader(fetcher, BlockSourceReader.lookupMatchingAll()); + } + } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/AbstractShapeGeometryFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/AbstractShapeGeometryFieldMapper.java index 22b75c8262193..c18c4db955a43 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/AbstractShapeGeometryFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/AbstractShapeGeometryFieldMapper.java @@ -55,13 +55,25 @@ protected AbstractShapeGeometryFieldType( Orientation orientation, Map meta ) { - super(name, isSearchable, isStored, hasDocValues, parser, meta); + super(name, isSearchable, isStored, hasDocValues, parser, null, meta); this.orientation = orientation; } public Orientation orientation() { return this.orientation; } + + @Override + public BlockLoader blockLoader(BlockLoaderContext blContext) { + // TODO: Support shapes in ESQL + return null; + } + + @Override + protected Object nullValueAsSource(Object nullValue) { + // TODO: When we support shapes in ESQL; we need to return a shape in source format here + return nullValue; + } } protected Explicit coerce; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BlockLoader.java b/server/src/main/java/org/elasticsearch/index/mapper/BlockLoader.java index 6e0329a61c51e..fbb742ea7a7e9 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BlockLoader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BlockLoader.java @@ -55,22 +55,25 @@ interface RowStrideReader extends Reader { interface AllReader extends ColumnAtATimeReader, RowStrideReader {} interface StoredFields { - Source source(); + /** + * The {@code _source} of the document. + */ + Source source() throws IOException; /** * @return the ID for the current document */ - String id(); + String id() throws IOException; /** * @return the routing path for the current document */ - String routing(); + String routing() throws IOException; /** * @return stored fields for the current document */ - Map> storedFields(); + Map> storedFields() throws IOException; } ColumnAtATimeReader columnAtATimeReader(LeafReaderContext context) throws IOException; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BlockLoaderStoredFieldsFromLeafLoader.java b/server/src/main/java/org/elasticsearch/index/mapper/BlockLoaderStoredFieldsFromLeafLoader.java index 0090935f51bc3..c04bafc7bb152 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BlockLoaderStoredFieldsFromLeafLoader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BlockLoaderStoredFieldsFromLeafLoader.java @@ -19,36 +19,53 @@ public class BlockLoaderStoredFieldsFromLeafLoader implements BlockLoader.Stored private final LeafStoredFieldLoader loader; private final SourceLoader.Leaf sourceLoader; private Source source; + private int docId = -1; + private int loaderDocId = -1; + private int sourceDocId = -1; public BlockLoaderStoredFieldsFromLeafLoader(LeafStoredFieldLoader loader, SourceLoader.Leaf sourceLoader) { this.loader = loader; this.sourceLoader = sourceLoader; } - public void advanceTo(int doc) throws IOException { - loader.advanceTo(doc); - if (sourceLoader != null) { - source = sourceLoader.source(loader, doc); + public void advanceTo(int docId) { + this.docId = docId; + } + + private void advanceIfNeeded() throws IOException { + if (loaderDocId != docId) { + loader.advanceTo(docId); + loaderDocId = docId; } } @Override - public Source source() { + public Source source() throws IOException { + advanceIfNeeded(); + if (sourceLoader != null) { + if (sourceDocId != docId) { + source = sourceLoader.source(loader, docId); + sourceDocId = docId; + } + } return source; } @Override - public String id() { + public String id() throws IOException { + advanceIfNeeded(); return loader.id(); } @Override - public String routing() { + public String routing() throws IOException { + advanceIfNeeded(); return loader.routing(); } @Override - public Map> storedFields() { + public Map> storedFields() throws IOException { + advanceIfNeeded(); return loader.storedFields(); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BlockSourceReader.java b/server/src/main/java/org/elasticsearch/index/mapper/BlockSourceReader.java index 12b5ff0e82a03..4aeb386ae8328 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BlockSourceReader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BlockSourceReader.java @@ -9,7 +9,11 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.PostingsEnum; import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.index.Terms; +import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.UnicodeUtil; import org.elasticsearch.search.fetch.StoredFieldsSpec; @@ -26,13 +30,22 @@ public abstract class BlockSourceReader implements BlockLoader.RowStrideReader { private final ValueFetcher fetcher; private final List ignoredValues = new ArrayList<>(); + private final DocIdSetIterator iter; + private final Thread creationThread; + private int docId = -1; - BlockSourceReader(ValueFetcher fetcher) { + private BlockSourceReader(ValueFetcher fetcher, DocIdSetIterator iter) { this.fetcher = fetcher; + this.iter = iter; + this.creationThread = Thread.currentThread(); } @Override public final void read(int docId, BlockLoader.StoredFields storedFields, BlockLoader.Builder builder) throws IOException { + if (canSkipLoading(docId)) { + builder.appendNull(); + return; + } List values = fetcher.fetchValues(storedFields.source(), docId, ignoredValues); ignoredValues.clear(); // TODO do something with these? if (values == null || values.isEmpty()) { @@ -52,12 +65,37 @@ public final void read(int docId, BlockLoader.StoredFields storedFields, BlockLo protected abstract void append(BlockLoader.Builder builder, Object v); + /** + * Returns {@code true} if we are sure there are no values + * for this field. + */ + private boolean canSkipLoading(int docId) throws IOException { + assert docId >= this.docId; + this.docId = docId; + if (docId == iter.docID()) { + return false; + } + return (docId > iter.docID() && iter.advance(docId) == docId) == false; + } + @Override - public boolean canReuse(int startingDocID) { - return true; + public final boolean canReuse(int startingDocID) { + return creationThread == Thread.currentThread() && docId <= startingDocID; + } + + public interface LeafIteratorLookup { + DocIdSetIterator lookup(LeafReaderContext ctx) throws IOException; } private abstract static class SourceBlockLoader implements BlockLoader { + protected final ValueFetcher fetcher; + private final LeafIteratorLookup lookup; + + private SourceBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup) { + this.fetcher = fetcher; + this.lookup = lookup; + } + @Override public final ColumnAtATimeReader columnAtATimeReader(LeafReaderContext context) throws IOException { return null; @@ -77,13 +115,32 @@ public final boolean supportsOrdinals() { public final SortedSetDocValues ordinals(LeafReaderContext context) { throw new UnsupportedOperationException(); } + + @Override + public final RowStrideReader rowStrideReader(LeafReaderContext context) throws IOException { + DocIdSetIterator iter = lookup.lookup(context); + if (iter == null) { + return new ConstantNullsReader(); + } + return rowStrideReader(context, iter); + } + + protected abstract RowStrideReader rowStrideReader(LeafReaderContext context, DocIdSetIterator iter) throws IOException; + + @Override + public final String toString() { + return "BlockSourceReader." + name() + "[" + lookup + "]"; + } + + protected abstract String name(); } + /** + * Load {@code boolean}s from {@code _source}. + */ public static class BooleansBlockLoader extends SourceBlockLoader { - private final ValueFetcher fetcher; - - public BooleansBlockLoader(ValueFetcher fetcher) { - this.fetcher = fetcher; + public BooleansBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup) { + super(fetcher, lookup); } @Override @@ -92,14 +149,19 @@ public Builder builder(BlockFactory factory, int expectedCount) { } @Override - public RowStrideReader rowStrideReader(LeafReaderContext context) { - return new Booleans(fetcher); + public RowStrideReader rowStrideReader(LeafReaderContext context, DocIdSetIterator iter) { + return new Booleans(fetcher, iter); + } + + @Override + protected String name() { + return "Booleans"; } } private static class Booleans extends BlockSourceReader { - Booleans(ValueFetcher fetcher) { - super(fetcher); + Booleans(ValueFetcher fetcher, DocIdSetIterator iter) { + super(fetcher, iter); } @Override @@ -113,29 +175,56 @@ public String toString() { } } + /** + * Load {@link BytesRef}s from {@code _source}. + */ public static class BytesRefsBlockLoader extends SourceBlockLoader { - private final ValueFetcher fetcher; + public BytesRefsBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup) { + super(fetcher, lookup); + } - public BytesRefsBlockLoader(ValueFetcher fetcher) { - this.fetcher = fetcher; + @Override + public final Builder builder(BlockFactory factory, int expectedCount) { + return factory.bytesRefs(expectedCount); } @Override - public Builder builder(BlockFactory factory, int expectedCount) { + protected RowStrideReader rowStrideReader(LeafReaderContext context, DocIdSetIterator iter) throws IOException { + return new BytesRefs(fetcher, iter); + } + + @Override + protected String name() { + return "Bytes"; + } + } + + public static class GeometriesBlockLoader extends SourceBlockLoader { + public GeometriesBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup) { + super(fetcher, lookup); + } + + @Override + public final Builder builder(BlockFactory factory, int expectedCount) { return factory.bytesRefs(expectedCount); } @Override - public RowStrideReader rowStrideReader(LeafReaderContext context) { - return new BytesRefs(fetcher); + protected RowStrideReader rowStrideReader(LeafReaderContext context, DocIdSetIterator iter) { + return new Geometries(fetcher, iter); + } + + @Override + protected String name() { + return "Geometries"; } } private static class BytesRefs extends BlockSourceReader { - BytesRef scratch = new BytesRef(); + private final BytesRef scratch = new BytesRef(); - BytesRefs(ValueFetcher fetcher) { - super(fetcher); + BytesRefs(ValueFetcher fetcher, DocIdSetIterator iter) { + super(fetcher, iter); } @Override @@ -149,11 +238,33 @@ public String toString() { } } - public static class DoublesBlockLoader extends SourceBlockLoader { - private final ValueFetcher fetcher; + private static class Geometries extends BlockSourceReader { - public DoublesBlockLoader(ValueFetcher fetcher) { - this.fetcher = fetcher; + Geometries(ValueFetcher fetcher, DocIdSetIterator iter) { + super(fetcher, iter); + } + + @Override + protected void append(BlockLoader.Builder builder, Object v) { + if (v instanceof byte[] wkb) { + ((BlockLoader.BytesRefBuilder) builder).appendBytesRef(new BytesRef(wkb)); + } else { + throw new IllegalArgumentException("Unsupported source type for spatial geometry: " + v.getClass().getSimpleName()); + } + } + + @Override + public String toString() { + return "BlockSourceReader.Geometries"; + } + } + + /** + * Load {@code double}s from {@code _source}. + */ + public static class DoublesBlockLoader extends SourceBlockLoader { + public DoublesBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup) { + super(fetcher, lookup); } @Override @@ -162,14 +273,19 @@ public Builder builder(BlockFactory factory, int expectedCount) { } @Override - public RowStrideReader rowStrideReader(LeafReaderContext context) { - return new Doubles(fetcher); + public RowStrideReader rowStrideReader(LeafReaderContext context, DocIdSetIterator iter) { + return new Doubles(fetcher, iter); + } + + @Override + protected String name() { + return "Doubles"; } } private static class Doubles extends BlockSourceReader { - Doubles(ValueFetcher fetcher) { - super(fetcher); + Doubles(ValueFetcher fetcher, DocIdSetIterator iter) { + super(fetcher, iter); } @Override @@ -183,11 +299,12 @@ public String toString() { } } + /** + * Load {@code int}s from {@code _source}. + */ public static class IntsBlockLoader extends SourceBlockLoader { - private final ValueFetcher fetcher; - - public IntsBlockLoader(ValueFetcher fetcher) { - this.fetcher = fetcher; + public IntsBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup) { + super(fetcher, lookup); } @Override @@ -196,14 +313,19 @@ public Builder builder(BlockFactory factory, int expectedCount) { } @Override - public RowStrideReader rowStrideReader(LeafReaderContext context) { - return new Ints(fetcher); + public RowStrideReader rowStrideReader(LeafReaderContext context, DocIdSetIterator iter) throws IOException { + return new Ints(fetcher, iter); + } + + @Override + protected String name() { + return "Ints"; } } private static class Ints extends BlockSourceReader { - Ints(ValueFetcher fetcher) { - super(fetcher); + Ints(ValueFetcher fetcher, DocIdSetIterator iter) { + super(fetcher, iter); } @Override @@ -217,11 +339,12 @@ public String toString() { } } + /** + * Load {@code long}s from {@code _source}. + */ public static class LongsBlockLoader extends SourceBlockLoader { - private final ValueFetcher fetcher; - - public LongsBlockLoader(ValueFetcher fetcher) { - this.fetcher = fetcher; + public LongsBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup) { + super(fetcher, lookup); } @Override @@ -230,14 +353,19 @@ public Builder builder(BlockFactory factory, int expectedCount) { } @Override - public RowStrideReader rowStrideReader(LeafReaderContext context) { - return new Longs(fetcher); + public RowStrideReader rowStrideReader(LeafReaderContext context, DocIdSetIterator iter) { + return new Longs(fetcher, iter); + } + + @Override + protected String name() { + return "Longs"; } } private static class Longs extends BlockSourceReader { - Longs(ValueFetcher fetcher) { - super(fetcher); + Longs(ValueFetcher fetcher, DocIdSetIterator iter) { + super(fetcher, iter); } @Override @@ -262,4 +390,69 @@ static BytesRef toBytesRef(BytesRef scratch, String v) { scratch.length = UnicodeUtil.UTF16toUTF8(v, 0, v.length(), scratch.bytes); return scratch; } + + /** + * Build a {@link LeafIteratorLookup} which checks for norms of a text field. + */ + public static LeafIteratorLookup lookupMatchingAll() { + return new LeafIteratorLookup() { + @Override + public DocIdSetIterator lookup(LeafReaderContext ctx) throws IOException { + return DocIdSetIterator.all(ctx.reader().maxDoc()); + } + + @Override + public String toString() { + return "All"; + } + }; + } + + /** + * Build a {@link LeafIteratorLookup} which checks for the field in the + * {@link FieldNamesFieldMapper field names field}. + */ + public static LeafIteratorLookup lookupFromFieldNames(FieldNamesFieldMapper.FieldNamesFieldType fieldNames, String fieldName) { + if (false == fieldNames.isEnabled()) { + return lookupMatchingAll(); + } + return new LeafIteratorLookup() { + private final BytesRef name = new BytesRef(fieldName); + + @Override + public DocIdSetIterator lookup(LeafReaderContext ctx) throws IOException { + Terms terms = ctx.reader().terms(FieldNamesFieldMapper.NAME); + if (terms == null) { + return null; + } + TermsEnum termsEnum = terms.iterator(); + if (termsEnum.seekExact(name) == false) { + return null; + } + return termsEnum.postings(null, PostingsEnum.NONE); + } + + @Override + public String toString() { + return "FieldName"; + } + }; + } + + /** + * Build a {@link LeafIteratorLookup} which checks for norms of a text field. + */ + public static LeafIteratorLookup lookupFromNorms(String fieldName) { + return new LeafIteratorLookup() { + @Override + public DocIdSetIterator lookup(LeafReaderContext ctx) throws IOException { + return ctx.reader().getNormValues(fieldName); + } + + @Override + public String toString() { + return "Norms"; + } + }; + } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BlockStoredFieldsReader.java b/server/src/main/java/org/elasticsearch/index/mapper/BlockStoredFieldsReader.java index 0a6cde773ff48..f3105dfb9a8dc 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BlockStoredFieldsReader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BlockStoredFieldsReader.java @@ -99,7 +99,7 @@ public Builder builder(BlockFactory factory, int expectedCount) { } @Override - public RowStrideReader rowStrideReader(LeafReaderContext context) throws IOException { + public RowStrideReader rowStrideReader(LeafReaderContext context) { return new Bytes(field) { private final BytesRef scratch = new BytesRef(); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java index 7f175982dc28e..43e6e662dc8f2 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java @@ -259,7 +259,11 @@ public BlockLoader blockLoader(BlockLoaderContext blContext) { if (hasDocValues()) { return new BlockDocValuesReader.BooleansBlockLoader(name()); } - return new BlockSourceReader.BooleansBlockLoader(sourceValueFetcher(blContext.sourcePaths(name()))); + ValueFetcher fetcher = sourceValueFetcher(blContext.sourcePaths(name())); + BlockSourceReader.LeafIteratorLookup lookup = isIndexed() || isStored() + ? BlockSourceReader.lookupFromFieldNames(blContext.fieldNames(), name()) + : BlockSourceReader.lookupMatchingAll(); + return new BlockSourceReader.BooleansBlockLoader(fetcher, lookup); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java index e90bea103c4cb..0c54b58aae0e3 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java @@ -777,7 +777,10 @@ public BlockLoader blockLoader(BlockLoaderContext blContext) { if (hasDocValues()) { return new BlockDocValuesReader.LongsBlockLoader(name()); } - return new BlockSourceReader.LongsBlockLoader(sourceValueFetcher(blContext.sourcePaths(name()))); + BlockSourceReader.LeafIteratorLookup lookup = isStored() || isIndexed() + ? BlockSourceReader.lookupFromFieldNames(blContext.fieldNames(), name()) + : BlockSourceReader.lookupMatchingAll(); + return new BlockSourceReader.LongsBlockLoader(sourceValueFetcher(blContext.sourcePaths(name())), lookup); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index 17af6259ca27c..4dd4521b565d8 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -251,8 +251,8 @@ static Mapping createDynamicUpdate(DocumentParserContext context) { return null; } RootObjectMapper.Builder rootBuilder = context.updateRoot(); - context.getDynamicMappers() - .forEach((name, builders) -> builders.forEach(builder -> rootBuilder.addDynamic(name, null, builder, context))); + context.getDynamicMappers().forEach(mapper -> rootBuilder.addDynamic(mapper.name(), null, mapper, context)); + for (RuntimeField runtimeField : context.getDynamicRuntimeFields()) { rootBuilder.addRuntimeField(runtimeField); } @@ -485,20 +485,13 @@ private static void parseObjectDynamic(DocumentParserContext context, String cur // not dynamic, read everything up to end object context.parser().skipChildren(); } else { - Mapper.Builder dynamicObjectBuilder = null; Mapper dynamicObjectMapper; if (context.dynamic() == ObjectMapper.Dynamic.RUNTIME) { // with dynamic:runtime all leaf fields will be runtime fields unless explicitly mapped, // hence we don't dynamically create empty objects under properties, but rather carry around an artificial object mapper dynamicObjectMapper = new NoOpObjectMapper(currentFieldName, context.path().pathAsText(currentFieldName)); } else { - dynamicObjectBuilder = DynamicFieldsBuilder.findTemplateBuilderForObject(context, currentFieldName); - if (dynamicObjectBuilder == null) { - dynamicObjectBuilder = new ObjectMapper.Builder(currentFieldName, ObjectMapper.Defaults.SUBOBJECTS).enabled( - ObjectMapper.Defaults.ENABLED - ); - } - dynamicObjectMapper = dynamicObjectBuilder.build(context.createDynamicMapperBuilderContext()); + dynamicObjectMapper = DynamicFieldsBuilder.createDynamicObjectMapper(context, currentFieldName); } if (context.parent().subobjects() == false) { if (dynamicObjectMapper instanceof NestedObjectMapper) { @@ -520,8 +513,8 @@ private static void parseObjectDynamic(DocumentParserContext context, String cur } } - if (context.dynamic() != ObjectMapper.Dynamic.RUNTIME && dynamicObjectBuilder != null) { - context.addDynamicMapper(dynamicObjectMapper.name(), dynamicObjectBuilder); + if (context.dynamic() != ObjectMapper.Dynamic.RUNTIME) { + context.addDynamicMapper(dynamicObjectMapper); } if (dynamicObjectMapper instanceof NestedObjectMapper && context.isWithinCopyTo()) { throwOnCreateDynamicNestedViaCopyTo(dynamicObjectMapper, context); @@ -558,13 +551,12 @@ private static void parseArrayDynamic(DocumentParserContext context, String curr if (context.dynamic() == ObjectMapper.Dynamic.FALSE) { context.parser().skipChildren(); } else { - Mapper.Builder objectBuilderFromTemplate = DynamicFieldsBuilder.findTemplateBuilderForObject(context, currentFieldName); - if (objectBuilderFromTemplate == null) { + Mapper objectMapperFromTemplate = DynamicFieldsBuilder.createObjectMapperFromTemplate(context, currentFieldName); + if (objectMapperFromTemplate == null) { parseNonDynamicArray(context, currentFieldName, currentFieldName); } else { - Mapper objectMapperFromTemplate = objectBuilderFromTemplate.build(context.createDynamicMapperBuilderContext()); if (parsesArrayValue(objectMapperFromTemplate)) { - context.addDynamicMapper(objectMapperFromTemplate.name(), objectBuilderFromTemplate); + context.addDynamicMapper(objectMapperFromTemplate); context.path().add(currentFieldName); parseObjectOrField(context, objectMapperFromTemplate); context.path().remove(); @@ -607,7 +599,7 @@ private static void postProcessDynamicArrayMapping(DocumentParserContext context if (context.indexSettings().getIndexVersionCreated().onOrAfter(DYNAMICALLY_MAP_DENSE_VECTORS_INDEX_VERSION)) { final MapperBuilderContext builderContext = context.createDynamicMapperBuilderContext(); final String fullFieldName = builderContext.buildFullName(fieldName); - final List mappers = context.getDynamicMappers(fullFieldName); + final List mappers = context.getDynamicMappers(fullFieldName); if (mappers == null || context.isFieldAppliedFromTemplate(fullFieldName) || context.isCopyToField(fullFieldName) @@ -616,8 +608,7 @@ private static void postProcessDynamicArrayMapping(DocumentParserContext context // Anything that is NOT a number or anything that IS a number but not mapped to `float` should NOT be mapped to dense_vector || mappers.stream() .anyMatch( - m -> m instanceof NumberFieldMapper.Builder == false - || ((NumberFieldMapper.Builder) m).type != NumberFieldMapper.NumberType.FLOAT + m -> m instanceof NumberFieldMapper == false || ((NumberFieldMapper) m).type() != NumberFieldMapper.NumberType.FLOAT )) { return; } @@ -626,7 +617,8 @@ private static void postProcessDynamicArrayMapping(DocumentParserContext context fieldName, context.indexSettings().getIndexVersionCreated() ); - context.updateDynamicMappers(fullFieldName, builder); + DenseVectorFieldMapper denseVectorFieldMapper = builder.build(builderContext); + context.updateDynamicMappers(fullFieldName, List.of(denseVectorFieldMapper)); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java index 9d5cb374a9a89..700f0e492af73 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java @@ -22,8 +22,8 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; -import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Set; @@ -84,9 +84,9 @@ protected void addDoc(LuceneDocument doc) { private final MappingParserContext mappingParserContext; private final SourceToParse sourceToParse; private final Set ignoredFields; - private final Map> dynamicMappers; + private final Map> dynamicMappers; private final Set newFieldsSeen; - private final Map dynamicObjectMappers; + private final Map dynamicObjectMappers; private final List dynamicRuntimeFields; private final DocumentDimensions dimensions; private final ObjectMapper parent; @@ -102,9 +102,9 @@ private DocumentParserContext( MappingParserContext mappingParserContext, SourceToParse sourceToParse, Set ignoreFields, - Map> dynamicMappers, + Map> dynamicMappers, Set newFieldsSeen, - Map dynamicObjectMappers, + Map dynamicObjectMappers, List dynamicRuntimeFields, String id, Field version, @@ -166,9 +166,9 @@ protected DocumentParserContext( mappingParserContext, source, new HashSet<>(), - new LinkedHashMap<>(), + new HashMap<>(), new HashSet<>(), - new LinkedHashMap<>(), + new HashMap<>(), new ArrayList<>(), null, null, @@ -304,29 +304,29 @@ public boolean isCopyToField(String name) { /** * Add a new mapper dynamically created while parsing. */ - public final void addDynamicMapper(String fullName, Mapper.Builder builder) { + public final void addDynamicMapper(Mapper mapper) { // eagerly check object depth limit here to avoid stack overflow errors - if (builder instanceof ObjectMapper.Builder) { - MappingLookup.checkObjectDepthLimit(indexSettings().getMappingDepthLimit(), fullName); + if (mapper instanceof ObjectMapper) { + MappingLookup.checkObjectDepthLimit(indexSettings().getMappingDepthLimit(), mapper.name()); } // eagerly check field name limit here to avoid OOM errors // only check fields that are not already mapped or tracked in order to avoid hitting field limit too early via double-counting // note that existing fields can also receive dynamic mapping updates (e.g. constant_keyword to fix the value) - if (mappingLookup.getMapper(fullName) == null - && mappingLookup.objectMappers().containsKey(fullName) == false - && newFieldsSeen.add(fullName)) { + if (mappingLookup.getMapper(mapper.name()) == null + && mappingLookup.objectMappers().containsKey(mapper.name()) == false + && newFieldsSeen.add(mapper.name())) { mappingLookup.checkFieldLimit(indexSettings().getMappingTotalFieldsLimit(), newFieldsSeen.size()); } - if (builder instanceof ObjectMapper.Builder objectMapper) { - dynamicObjectMappers.put(fullName, objectMapper); + if (mapper instanceof ObjectMapper objectMapper) { + dynamicObjectMappers.put(objectMapper.name(), objectMapper); // dynamic object mappers may have been obtained from applying a dynamic template, in which case their definition may contain // sub-fields as well as sub-objects that need to be added to the mappings - for (Mapper.Builder submapper : objectMapper.subBuilders()) { + for (Mapper submapper : objectMapper.mappers.values()) { // we could potentially skip the step of adding these to the dynamic mappers, because their parent is already added to // that list, and what is important is that all of the intermediate objects are added to the dynamic object mappers so that // they can be looked up once sub-fields need to be added to them. For simplicity, we treat these like any other object - addDynamicMapper(fullName + "." + submapper.name, submapper); + addDynamicMapper(submapper); } } @@ -336,7 +336,7 @@ public final void addDynamicMapper(String fullName, Mapper.Builder builder) { // dynamically mapped objects when the incoming document defines no sub-fields in them: // 1) by default, they would be empty containers in the mappings, is it then important to map them? // 2) they can be the result of applying a dynamic template which may define sub-fields or set dynamic, enabled or subobjects. - dynamicMappers.computeIfAbsent(fullName, k -> new ArrayList<>()).add(builder); + dynamicMappers.computeIfAbsent(mapper.name(), k -> new ArrayList<>()).add(mapper); } /** @@ -345,8 +345,8 @@ public final void addDynamicMapper(String fullName, Mapper.Builder builder) { * Consists of a all {@link Mapper}s that will need to be added to their respective parent {@link ObjectMapper}s in order * to become part of the resulting dynamic mapping update. */ - public final Map> getDynamicMappers() { - return dynamicMappers; + public final List getDynamicMappers() { + return dynamicMappers.values().stream().flatMap(List::stream).toList(); } /** @@ -355,13 +355,13 @@ public final Map> getDynamicMappers() { * @param fieldName Full field name with dot-notation. * @return List of Mappers or null */ - public final List getDynamicMappers(String fieldName) { + public final List getDynamicMappers(String fieldName) { return dynamicMappers.get(fieldName); } - public void updateDynamicMappers(String name, Mapper.Builder mapper) { + public void updateDynamicMappers(String name, List mappers) { dynamicMappers.remove(name); - dynamicMappers.put(name, List.of(mapper)); + mappers.forEach(this::addDynamicMapper); } /** @@ -371,7 +371,7 @@ public void updateDynamicMappers(String name, Mapper.Builder mapper) { * Holds a flat set of object mappers, meaning that an object field named foo.bar can be looked up directly with its * dotted name. */ - final ObjectMapper.Builder getDynamicObjectMapper(String name) { + final ObjectMapper getDynamicObjectMapper(String name) { return dynamicObjectMappers.get(name); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DynamicFieldsBuilder.java b/server/src/main/java/org/elasticsearch/index/mapper/DynamicFieldsBuilder.java index 620b972ee04bf..f2d1b8058f115 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DynamicFieldsBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DynamicFieldsBuilder.java @@ -155,6 +155,25 @@ void createDynamicFieldFromValue(final DocumentParserContext context, String nam } } + /** + * Returns a dynamically created object mapper, eventually based on a matching dynamic template. + */ + static Mapper createDynamicObjectMapper(DocumentParserContext context, String name) { + Mapper mapper = createObjectMapperFromTemplate(context, name); + return mapper != null + ? mapper + : new ObjectMapper.Builder(name, ObjectMapper.Defaults.SUBOBJECTS).enabled(ObjectMapper.Defaults.ENABLED) + .build(context.createDynamicMapperBuilderContext()); + } + + /** + * Returns a dynamically created object mapper, based exclusively on a matching dynamic template, null otherwise. + */ + static Mapper createObjectMapperFromTemplate(DocumentParserContext context, String name) { + Mapper.Builder templateBuilder = findTemplateBuilderForObject(context, name); + return templateBuilder == null ? null : templateBuilder.build(context.createDynamicMapperBuilderContext()); + } + /** * Creates a dynamic string field based on a matching dynamic template. * No field is created in case there is no matching dynamic template. @@ -234,10 +253,7 @@ private static boolean applyMatchingTemplate( return true; } - /** - * Returns a dynamically created object builder, based exclusively on a matching dynamic template, null otherwise. - */ - static Mapper.Builder findTemplateBuilderForObject(DocumentParserContext context, String name) { + private static Mapper.Builder findTemplateBuilderForObject(DocumentParserContext context, String name) { DynamicTemplate.XContentFieldType matchType = DynamicTemplate.XContentFieldType.OBJECT; DynamicTemplate dynamicTemplate = context.findDynamicTemplate(name, matchType); if (dynamicTemplate == null) { @@ -293,7 +309,7 @@ private static final class Concrete implements Strategy { void createDynamicField(Mapper.Builder builder, DocumentParserContext context) throws IOException { Mapper mapper = builder.build(context.createDynamicMapperBuilderContext()); - context.addDynamicMapper(mapper.name(), builder); + context.addDynamicMapper(mapper); parseField.accept(context, mapper); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java index 56b65dbf84c6b..4effc380646ff 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java @@ -357,14 +357,13 @@ protected String contentType() { return CONTENT_TYPE; } - public static class GeoPointFieldType extends AbstractGeometryFieldType implements GeoShapeQueryable { + public static class GeoPointFieldType extends AbstractPointFieldType implements GeoShapeQueryable { private final TimeSeriesParams.MetricType metricType; public static final GeoFormatterFactory GEO_FORMATTER_FACTORY = new GeoFormatterFactory<>( List.of(new SimpleVectorTileFormatter()) ); - private final GeoPoint nullValue; private final FieldValues scriptValues; private final IndexMode indexMode; @@ -380,8 +379,7 @@ private GeoPointFieldType( TimeSeriesParams.MetricType metricType, IndexMode indexMode ) { - super(name, indexed, stored, hasDocValues, parser, meta); - this.nullValue = nullValue; + super(name, indexed, stored, hasDocValues, parser, nullValue, meta); this.scriptValues = scriptValues; this.metricType = metricType; this.indexMode = indexMode; @@ -482,17 +480,6 @@ public IndexFieldData.Builder fielddataBuilder(FieldDataContext fieldDataContext throw new IllegalStateException("unknown field data type [" + operation.name() + "]"); } - @Override - public BlockLoader blockLoader(BlockLoaderContext blContext) { - if (hasDocValues()) { - return new BlockDocValuesReader.LongsBlockLoader(name()); - } - // TODO: Currently we use longs in the compute engine and render to WKT in ESQL - return new BlockSourceReader.LongsBlockLoader( - valueFetcher(blContext.sourcePaths(name()), nullValue, GeometryFormatterFactory.WKT) - ); - } - @Override public Query distanceFeatureQuery(Object origin, String pivot, SearchExecutionContext context) { failIfNotIndexedNorDocValuesFallback(context); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IgnoreMalformedStoredValues.java b/server/src/main/java/org/elasticsearch/index/mapper/IgnoreMalformedStoredValues.java index f9e2c55e5085a..b7990648539c1 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IgnoreMalformedStoredValues.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IgnoreMalformedStoredValues.java @@ -11,7 +11,6 @@ import org.apache.lucene.document.StoredField; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefIterator; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.util.ByteUtils; import org.elasticsearch.xcontent.XContentBuilder; @@ -190,8 +189,9 @@ private static void decodeAndWrite(XContentBuilder b, BytesRef r) throws IOExcep } private static void decodeAndWriteXContent(XContentBuilder b, XContentType type, BytesRef r) throws IOException { - BytesReference ref = new BytesArray(r.bytes, r.offset + 1, r.length - 1); - try (XContentParser parser = type.xContent().createParser(XContentParserConfiguration.EMPTY, ref.streamInput())) { + try ( + XContentParser parser = type.xContent().createParser(XContentParserConfiguration.EMPTY, r.bytes, r.offset + 1, r.length - 1) + ) { b.copyCurrentStructure(parser); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java index b62113a586bba..c3a740e4cbfe6 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java @@ -592,7 +592,18 @@ public BlockLoader blockLoader(BlockLoaderContext blContext) { } return new BlockStoredFieldsReader.BytesFromBytesRefsBlockLoader(name()); } - return new BlockSourceReader.BytesRefsBlockLoader(sourceValueFetcher(blContext.sourcePaths(name()))); + SourceValueFetcher fetcher = sourceValueFetcher(blContext.sourcePaths(name())); + return new BlockSourceReader.BytesRefsBlockLoader(fetcher, sourceBlockLoaderLookup(blContext)); + } + + private BlockSourceReader.LeafIteratorLookup sourceBlockLoaderLookup(BlockLoaderContext blContext) { + if (getTextSearchInfo().hasNorms()) { + return BlockSourceReader.lookupFromNorms(name()); + } + if (isIndexed() || isStored()) { + return BlockSourceReader.lookupFromFieldNames(blContext.fieldNames(), name()); + } + return BlockSourceReader.lookupMatchingAll(); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java index 376cb1a10e2e6..6f561bdb2dc4c 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java @@ -663,6 +663,11 @@ public interface BlockLoaderContext { * If field is a leaf multi-field return the path to the parent field. Otherwise, return null. */ String parentField(String field); + + /** + * The {@code _field_names} field mapper, mostly used to check if it is enabled. + */ + FieldNamesFieldMapper.FieldNamesFieldType fieldNames(); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index cbf2dd872da2f..b714eabbd2636 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -130,6 +130,7 @@ public enum MergeReason { private final Supplier mappingParserContextSupplier; private volatile DocumentMapper mapper; + private volatile long mappingVersion; public MapperService( ClusterService clusterService, @@ -298,6 +299,7 @@ public void updateMapping(final IndexMetadata currentIndexMetadata, final IndexM previousMapper = this.mapper; assert assertRefreshIsNotNeeded(previousMapper, type, incomingMapping); this.mapper = newDocumentMapper(incomingMapping, MergeReason.MAPPING_RECOVERY, incomingMappingSource); + this.mappingVersion = newIndexMetadata.getMappingVersion(); } String op = previousMapper != null ? "updated" : "added"; if (logger.isDebugEnabled() && incomingMappingSource.compressed().length < 512) { @@ -590,6 +592,10 @@ public DocumentMapper documentMapper() { return mapper; } + public long mappingVersion() { + return mappingVersion; + } + /** * Returns {@code true} if the given {@code mappingSource} includes a type * as a top-level object. diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java index 091e3c61764b0..d25832a28d318 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java @@ -118,7 +118,7 @@ public static final class Builder extends FieldMapper.Builder { private final Parameter> meta = Parameter.metaParam(); private final ScriptCompiler scriptCompiler; - public final NumberType type; + private final NumberType type; private boolean allowMultipleValues = true; private final IndexVersion indexCreatedVersion; @@ -444,8 +444,8 @@ BlockLoader blockLoaderFromDocValues(String fieldName) { } @Override - BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher) { - return new BlockSourceReader.DoublesBlockLoader(sourceValueFetcher); + BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher, BlockSourceReader.LeafIteratorLookup lookup) { + return new BlockSourceReader.DoublesBlockLoader(sourceValueFetcher, lookup); } }, FLOAT("float", NumericType.FLOAT) { @@ -606,8 +606,8 @@ BlockLoader blockLoaderFromDocValues(String fieldName) { } @Override - BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher) { - return new BlockSourceReader.DoublesBlockLoader(sourceValueFetcher); + BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher, BlockSourceReader.LeafIteratorLookup lookup) { + return new BlockSourceReader.DoublesBlockLoader(sourceValueFetcher, lookup); } }, DOUBLE("double", NumericType.DOUBLE) { @@ -746,8 +746,8 @@ BlockLoader blockLoaderFromDocValues(String fieldName) { } @Override - BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher) { - return new BlockSourceReader.DoublesBlockLoader(sourceValueFetcher); + BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher, BlockSourceReader.LeafIteratorLookup lookup) { + return new BlockSourceReader.DoublesBlockLoader(sourceValueFetcher, lookup); } }, BYTE("byte", NumericType.BYTE) { @@ -849,8 +849,8 @@ BlockLoader blockLoaderFromDocValues(String fieldName) { } @Override - BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher) { - return new BlockSourceReader.IntsBlockLoader(sourceValueFetcher); + BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher, BlockSourceReader.LeafIteratorLookup lookup) { + return new BlockSourceReader.IntsBlockLoader(sourceValueFetcher, lookup); } }, SHORT("short", NumericType.SHORT) { @@ -948,8 +948,8 @@ BlockLoader blockLoaderFromDocValues(String fieldName) { } @Override - BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher) { - return new BlockSourceReader.IntsBlockLoader(sourceValueFetcher); + BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher, BlockSourceReader.LeafIteratorLookup lookup) { + return new BlockSourceReader.IntsBlockLoader(sourceValueFetcher, lookup); } }, INTEGER("integer", NumericType.INT) { @@ -1115,8 +1115,8 @@ BlockLoader blockLoaderFromDocValues(String fieldName) { } @Override - BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher) { - return new BlockSourceReader.IntsBlockLoader(sourceValueFetcher); + BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher, BlockSourceReader.LeafIteratorLookup lookup) { + return new BlockSourceReader.IntsBlockLoader(sourceValueFetcher, lookup); } }, LONG("long", NumericType.LONG) { @@ -1252,8 +1252,8 @@ BlockLoader blockLoaderFromDocValues(String fieldName) { } @Override - BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher) { - return new BlockSourceReader.LongsBlockLoader(sourceValueFetcher); + BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher, BlockSourceReader.LeafIteratorLookup lookup) { + return new BlockSourceReader.LongsBlockLoader(sourceValueFetcher, lookup); } }; @@ -1521,7 +1521,7 @@ protected void writeValue(XContentBuilder b, long value) throws IOException { abstract BlockLoader blockLoaderFromDocValues(String fieldName); - abstract BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher); + abstract BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher, BlockSourceReader.LeafIteratorLookup lookup); } public static class NumberFieldType extends SimpleMappedFieldType { @@ -1661,7 +1661,10 @@ public BlockLoader blockLoader(BlockLoaderContext blContext) { if (hasDocValues()) { return type.blockLoaderFromDocValues(name()); } - return type.blockLoaderFromSource(sourceValueFetcher(blContext.sourcePaths(name()))); + BlockSourceReader.LeafIteratorLookup lookup = isStored() || isIndexed() + ? BlockSourceReader.lookupFromFieldNames(blContext.fieldNames(), name()) + : BlockSourceReader.lookupMatchingAll(); + return type.blockLoaderFromSource(sourceValueFetcher(blContext.sourcePaths(name())), lookup); } @Override @@ -1817,6 +1820,10 @@ public NumberFieldType fieldType() { return (NumberFieldType) super.fieldType(); } + public NumberType type() { + return type; + } + @Override protected String contentType() { return fieldType().type.typeName(); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java index d67763879433f..068f5882f5eb3 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java @@ -26,12 +26,10 @@ import java.util.Collection; import java.util.Comparator; import java.util.HashMap; -import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.Map; -import java.util.Set; import java.util.stream.Stream; public class ObjectMapper extends Mapper { @@ -80,7 +78,6 @@ public static class Builder extends Mapper.Builder { protected Explicit enabled = Explicit.IMPLICIT_TRUE; protected Dynamic dynamic; protected final List mappersBuilders = new ArrayList<>(); - private final Set subMapperNames = new HashSet<>(); // keeps track of dynamically added subfields public Builder(String name, Explicit subobjects) { super(name); @@ -99,27 +96,31 @@ public Builder dynamic(Dynamic dynamic) { public Builder add(Mapper.Builder builder) { mappersBuilders.add(builder); - subMapperNames.add(builder.name); return this; } - public Collection subBuilders() { - return mappersBuilders; + private void add(String name, Mapper mapper) { + add(new Mapper.Builder(name) { + @Override + public Mapper build(MapperBuilderContext context) { + return mapper; + } + }); } /** - * Adds a dynamically created {@link Mapper.Builder} to this builder. + * Adds a dynamically created {@link Mapper} to this builder. * * @param name the name of the Mapper, including object prefixes * @param prefix the object prefix of this mapper * @param mapper the mapper to add * @param context the DocumentParserContext in which the mapper has been built */ - public final void addDynamic(String name, String prefix, Mapper.Builder mapper, DocumentParserContext context) { + public final void addDynamic(String name, String prefix, Mapper mapper, DocumentParserContext context) { // If the mapper to add has no dots, or the current object mapper has subobjects set to false, // we just add it as it is for sure a leaf mapper if (name.contains(".") == false || subobjects.value() == false) { - add(mapper); + add(name, mapper); } // otherwise we strip off the first object path of the mapper name, load or create // the relevant object mapper, and then recurse down into it, passing the remainder @@ -129,28 +130,22 @@ public final void addDynamic(String name, String prefix, Mapper.Builder mapper, int firstDotIndex = name.indexOf("."); String immediateChild = name.substring(0, firstDotIndex); String immediateChildFullName = prefix == null ? immediateChild : prefix + "." + immediateChild; - ObjectMapper.Builder parentBuilder = findObjectBuilder(immediateChild, immediateChildFullName, context); + ObjectMapper.Builder parentBuilder = findObjectBuilder(immediateChildFullName, context); parentBuilder.addDynamic(name.substring(firstDotIndex + 1), immediateChildFullName, mapper, context); + add(parentBuilder); } } - private ObjectMapper.Builder findObjectBuilder(String leafName, String fullName, DocumentParserContext context) { + private static ObjectMapper.Builder findObjectBuilder(String fullName, DocumentParserContext context) { // does the object mapper already exist? if so, use that ObjectMapper objectMapper = context.mappingLookup().objectMappers().get(fullName); if (objectMapper != null) { - ObjectMapper.Builder builder = objectMapper.newBuilder(context.indexSettings().getIndexVersionCreated()); - add(builder); - return builder; + return objectMapper.newBuilder(context.indexSettings().getIndexVersionCreated()); } // has the object mapper been added as a dynamic update already? - ObjectMapper.Builder builder = context.getDynamicObjectMapper(fullName); - if (builder != null) { - // we re-use builder instances so if the builder has already been - // added we don't need to do so again - if (subMapperNames.contains(leafName) == false) { - add(builder); - } - return builder; + objectMapper = context.getDynamicObjectMapper(fullName); + if (objectMapper != null) { + return objectMapper.newBuilder(context.indexSettings().getIndexVersionCreated()); } throw new IllegalStateException("Missing intermediate object " + fullName); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SourceLoader.java b/server/src/main/java/org/elasticsearch/index/mapper/SourceLoader.java index 607ba2b261f5d..c07821f3c9ae7 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SourceLoader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SourceLoader.java @@ -20,6 +20,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -79,12 +80,15 @@ public Set requiredStoredFields() { * Load {@code _source} from doc values. */ class Synthetic implements SourceLoader { - private final SyntheticFieldLoader loader; - private final Map storedFieldLoaders; + private final Supplier syntheticFieldLoaderLeafSupplier; + private final Set requiredStoredFields; public Synthetic(Mapping mapping) { - loader = mapping.syntheticFieldLoader(); - storedFieldLoaders = Map.copyOf(loader.storedFieldLoaders().collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))); + this.syntheticFieldLoaderLeafSupplier = mapping::syntheticFieldLoader; + this.requiredStoredFields = syntheticFieldLoaderLeafSupplier.get() + .storedFieldLoaders() + .map(Map.Entry::getKey) + .collect(Collectors.toSet()); } @Override @@ -94,19 +98,26 @@ public boolean reordersFieldValues() { @Override public Set requiredStoredFields() { - return storedFieldLoaders.keySet(); + return requiredStoredFields; } @Override public Leaf leaf(LeafReader reader, int[] docIdsInLeaf) throws IOException { - return new SyntheticLeaf(loader.docValuesLoader(reader, docIdsInLeaf)); + SyntheticFieldLoader loader = syntheticFieldLoaderLeafSupplier.get(); + return new SyntheticLeaf(loader, loader.docValuesLoader(reader, docIdsInLeaf)); } - private class SyntheticLeaf implements Leaf { + private static class SyntheticLeaf implements Leaf { + private final SyntheticFieldLoader loader; private final SyntheticFieldLoader.DocValuesLoader docValuesLoader; + private final Map storedFieldLoaders; - private SyntheticLeaf(SyntheticFieldLoader.DocValuesLoader docValuesLoader) { + private SyntheticLeaf(SyntheticFieldLoader loader, SyntheticFieldLoader.DocValuesLoader docValuesLoader) { + this.loader = loader; this.docValuesLoader = docValuesLoader; + this.storedFieldLoaders = Map.copyOf( + loader.storedFieldLoaders().collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)) + ); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java index 1ae0489173ce3..99efa5c6b896b 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java @@ -937,9 +937,15 @@ public boolean isAggregatable() { return fielddata; } + public boolean canUseSyntheticSourceDelegateForQuerying() { + return syntheticSourceDelegate != null + && syntheticSourceDelegate.ignoreAbove() == Integer.MAX_VALUE + && (syntheticSourceDelegate.isIndexed() || syntheticSourceDelegate.isStored()); + } + @Override public BlockLoader blockLoader(BlockLoaderContext blContext) { - if (syntheticSourceDelegate != null) { + if (canUseSyntheticSourceDelegateForQuerying()) { return new BlockLoader.Delegating(syntheticSourceDelegate.blockLoader(blContext)) { @Override protected String delegatingTo() { @@ -979,7 +985,22 @@ protected String delegatingTo() { */ return null; } - return new BlockSourceReader.BytesRefsBlockLoader(SourceValueFetcher.toString(blContext.sourcePaths(name()))); + SourceValueFetcher fetcher = SourceValueFetcher.toString(blContext.sourcePaths(name())); + return new BlockSourceReader.BytesRefsBlockLoader(fetcher, blockReaderDisiLookup(blContext)); + } + + /** + * Build an iterator of documents that have the field. This mirrors parseCreateField, + * using whatever + */ + private BlockSourceReader.LeafIteratorLookup blockReaderDisiLookup(BlockLoaderContext blContext) { + if (getTextSearchInfo().hasNorms()) { + return BlockSourceReader.lookupFromNorms(name()); + } + if (isIndexed() == false && isStored() == false) { + return BlockSourceReader.lookupMatchingAll(); + } + return BlockSourceReader.lookupFromFieldNames(blContext.fieldNames(), name()); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java index 482f10b39fc9c..27424d4591ba6 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java @@ -45,6 +45,7 @@ import org.elasticsearch.index.mapper.DocumentParserContext; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperBuilderContext; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MappingParser; @@ -1176,9 +1177,24 @@ public void parse(DocumentParserContext context) throws IOException { } if (fieldType().dims == null) { int dims = fieldType().elementType.parseDimensionCount(context); - DenseVectorFieldMapper.Builder update = (DenseVectorFieldMapper.Builder) getMergeBuilder(); - update.dims.setValue(dims); - context.addDynamicMapper(name(), update); + DenseVectorFieldType updatedDenseVectorFieldType = new DenseVectorFieldType( + fieldType().name(), + indexCreatedVersion, + fieldType().elementType, + dims, + fieldType().indexed, + fieldType().similarity, + fieldType().meta() + ); + Mapper update = new DenseVectorFieldMapper( + simpleName(), + updatedDenseVectorFieldType, + indexOptions, + indexCreatedVersion, + multiFields(), + copyTo + ); + context.addDynamicMapper(update); return; } if (fieldType().indexed) { diff --git a/server/src/main/java/org/elasticsearch/index/query/MatchNoneQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MatchNoneQueryBuilder.java index fae3dd4069076..04ae0bb498841 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MatchNoneQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MatchNoneQueryBuilder.java @@ -39,14 +39,14 @@ public MatchNoneQueryBuilder(String rewriteReason) { */ public MatchNoneQueryBuilder(StreamInput in) throws IOException { super(in); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_040)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { rewriteReason = in.readOptionalString(); } } @Override protected void doWriteTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_040)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { out.writeOptionalString(rewriteReason); } } diff --git a/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java b/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java index b2067549fab67..5a2b01838e27b 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java @@ -91,7 +91,7 @@ public final class SimpleQueryStringBuilder extends AbstractQueryBuilder source) throws IOExcept return BytesReference.bytes(builder.map(map)); } + private static final XContentParserConfiguration PARSER_CONFIGURATION = XContentParserConfiguration.EMPTY.withRegistry( + NamedXContentRegistry.EMPTY + ).withDeprecationHandler(DeprecationHandler.THROW_UNSUPPORTED_OPERATION); + private static boolean isQueryJson(BytesReference bytesReference) { try ( - XContentParser parser = QUERY_CONTENT_TYPE.createParser( - NamedXContentRegistry.EMPTY, - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - bytesReference.streamInput() + XContentParser parser = XContentHelper.createParserNotCompressed( + PARSER_CONFIGURATION, + bytesReference, + QUERY_CONTENT_TYPE.type() ) ) { Map query = parser.map(); diff --git a/server/src/main/java/org/elasticsearch/index/shard/DocsStats.java b/server/src/main/java/org/elasticsearch/index/shard/DocsStats.java index 1b4b6405df71c..0d8fc52cddacf 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/DocsStats.java +++ b/server/src/main/java/org/elasticsearch/index/shard/DocsStats.java @@ -63,7 +63,7 @@ public long getDeleted() { /** * Returns the total size in bytes of all documents in this stats. - * This value may be more reliable than {@link StoreStats#getSizeInBytes()} in estimating the index size. + * This value may be more reliable than {@link StoreStats#sizeInBytes()} in estimating the index size. */ public long getTotalSizeInBytes() { return totalSizeInBytes; diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index d7d67b3af159e..aa6e3e1d45003 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -1970,24 +1970,28 @@ private void loadGlobalCheckpointToReplicationTracker() throws IOException { * opens the engine on top of the existing lucene engine and translog. * Operations from the translog will be replayed to bring lucene up to date. **/ - public void openEngineAndRecoverFromTranslog() throws IOException { - recoveryState.validateCurrentStage(RecoveryState.Stage.INDEX); - maybeCheckIndex(); - recoveryState.setLocalTranslogStage(); - final RecoveryState.Translog translogRecoveryStats = recoveryState.getTranslog(); - final Engine.TranslogRecoveryRunner translogRecoveryRunner = (engine, snapshot) -> { - translogRecoveryStats.totalOperations(snapshot.totalOperations()); - translogRecoveryStats.totalOperationsOnStart(snapshot.totalOperations()); - return runTranslogRecovery( - engine, - snapshot, - Engine.Operation.Origin.LOCAL_TRANSLOG_RECOVERY, - translogRecoveryStats::incrementRecoveredOperations - ); - }; - loadGlobalCheckpointToReplicationTracker(); - innerOpenEngineAndTranslog(replicationTracker); - getEngine().recoverFromTranslog(translogRecoveryRunner, Long.MAX_VALUE); + public void openEngineAndRecoverFromTranslog(ActionListener listener) { + try { + recoveryState.validateCurrentStage(RecoveryState.Stage.INDEX); + maybeCheckIndex(); + recoveryState.setLocalTranslogStage(); + final RecoveryState.Translog translogRecoveryStats = recoveryState.getTranslog(); + final Engine.TranslogRecoveryRunner translogRecoveryRunner = (engine, snapshot) -> { + translogRecoveryStats.totalOperations(snapshot.totalOperations()); + translogRecoveryStats.totalOperationsOnStart(snapshot.totalOperations()); + return runTranslogRecovery( + engine, + snapshot, + Engine.Operation.Origin.LOCAL_TRANSLOG_RECOVERY, + translogRecoveryStats::incrementRecoveredOperations + ); + }; + loadGlobalCheckpointToReplicationTracker(); + innerOpenEngineAndTranslog(replicationTracker); + getEngine().recoverFromTranslog(translogRecoveryRunner, Long.MAX_VALUE, listener); + } catch (Exception e) { + listener.onFailure(e); + } } /** diff --git a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index bc5a4b02116a7..0acddcf0e45b2 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -482,7 +482,10 @@ private void internalRecoverFromStore(IndexShard indexShard, ActionListenerandThen((l, ignored) -> { indexShard.getEngine().fillSeqNoGaps(indexShard.getPendingPrimaryTerm()); indexShard.finalizeRecovery(); indexShard.postRecovery("post recovery from shard_store", l); @@ -583,7 +586,10 @@ record ShardAndIndexIds(IndexId indexId, ShardId shardId) {} bootstrap(indexShard, store); assert indexShard.shardRouting.primary() : "only primary shards can recover from store"; writeEmptyRetentionLeasesFile(indexShard); - indexShard.openEngineAndRecoverFromTranslog(); + indexShard.openEngineAndRecoverFromTranslog(l); + }) + + .andThen((l, ignored) -> { indexShard.getEngine().fillSeqNoGaps(indexShard.getPendingPrimaryTerm()); indexShard.finalizeRecovery(); indexShard.postRecovery("restore done", l); diff --git a/server/src/main/java/org/elasticsearch/index/similarity/ScriptedSimilarityProvider.java b/server/src/main/java/org/elasticsearch/index/similarity/ScriptedSimilarityProvider.java index 28b93edac3895..492f28b52e6c7 100644 --- a/server/src/main/java/org/elasticsearch/index/similarity/ScriptedSimilarityProvider.java +++ b/server/src/main/java/org/elasticsearch/index/similarity/ScriptedSimilarityProvider.java @@ -35,9 +35,9 @@ public Similarity apply(Settings settings, IndexVersion indexCreatedVersion, Scr } return new ScriptedSimilarity( weightScript == null ? null : weightScript.toString(), - weightScriptFactory == null ? null : weightScriptFactory::newInstance, + weightScriptFactory, script.toString(), - scriptFactory::newInstance, + scriptFactory, discountOverlaps ); } diff --git a/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java b/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java index 451af25dfa649..7f6cf305a7428 100644 --- a/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java +++ b/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java @@ -10,12 +10,14 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.SubscribableListener; +import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.common.Strings; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; import org.elasticsearch.repositories.ShardGeneration; import org.elasticsearch.repositories.ShardSnapshotResult; import org.elasticsearch.snapshots.AbortedSnapshotException; +import org.elasticsearch.snapshots.PausedSnapshotException; import java.util.Objects; import java.util.concurrent.atomic.AtomicReference; @@ -50,6 +52,14 @@ public enum Stage { * Snapshot failed */ FAILURE, + /** + * Snapshot pausing because of node removal + */ + PAUSING, + /** + * Snapshot paused because of node removal + */ + PAUSED, /** * Snapshot aborted */ @@ -57,8 +67,8 @@ public enum Stage { } /** - * Used to complete listeners added via {@link #addAbortListener} when the shard snapshot is either aborted or it gets past the stages - * where an abort could have occurred. + * Used to complete listeners added via {@link #addAbortListener} when the shard snapshot is either aborted/paused or it gets past the + * stages where an abort/pause could have occurred. */ public enum AbortStatus { /** @@ -146,6 +156,7 @@ public synchronized Copy moveToFinalize() { yield asCopy(); } case ABORTED -> throw new AbortedSnapshotException(); + case PAUSING -> throw new PausedSnapshotException(); default -> { final var message = Strings.format( "Unable to move the shard snapshot status to [FINALIZE]: expecting [STARTED] but got [%s]", @@ -176,8 +187,21 @@ public void addAbortListener(ActionListener listener) { abortListeners.addListener(listener); } - public synchronized void abortIfNotCompleted(final String failure, Consumer> notifyRunner) { - if (stage.compareAndSet(Stage.INIT, Stage.ABORTED) || stage.compareAndSet(Stage.STARTED, Stage.ABORTED)) { + public void abortIfNotCompleted(final String failure, Consumer> notifyRunner) { + abortAndMoveToStageIfNotCompleted(Stage.ABORTED, failure, notifyRunner); + } + + public void pauseIfNotCompleted(Consumer> notifyRunner) { + abortAndMoveToStageIfNotCompleted(Stage.PAUSING, "paused for removal of node holding primary", notifyRunner); + } + + private synchronized void abortAndMoveToStageIfNotCompleted( + final Stage newStage, + final String failure, + final Consumer> notifyRunner + ) { + assert newStage == Stage.ABORTED || newStage == Stage.PAUSING : newStage; + if (stage.compareAndSet(Stage.INIT, newStage) || stage.compareAndSet(Stage.STARTED, newStage)) { this.failure = failure; notifyRunner.accept(abortListeners.map(r -> { Releasables.closeExpectNoException(r); @@ -186,6 +210,18 @@ public synchronized void abortIfNotCompleted(final String failure, Consumer throw new AbortedSnapshotException(); + case PAUSING -> throw new PausedSnapshotException(); } } + public boolean isPaused() { + return stage.get() == Stage.PAUSED; + } + /** * Increments number of processed files */ diff --git a/server/src/main/java/org/elasticsearch/index/store/StoreStats.java b/server/src/main/java/org/elasticsearch/index/store/StoreStats.java index cda87a421bd32..f0df51d4cb78b 100644 --- a/server/src/main/java/org/elasticsearch/index/store/StoreStats.java +++ b/server/src/main/java/org/elasticsearch/index/store/StoreStats.java @@ -33,7 +33,7 @@ public class StoreStats implements Writeable, ToXContentFragment { private long sizeInBytes; private long totalDataSetSizeInBytes; - private long reservedSize; + private long reservedSizeInBytes; public StoreStats() { @@ -47,9 +47,9 @@ public StoreStats(StreamInput in) throws IOException { totalDataSetSizeInBytes = sizeInBytes; } if (in.getTransportVersion().onOrAfter(RESERVED_BYTES_VERSION)) { - reservedSize = in.readZLong(); + reservedSizeInBytes = in.readZLong(); } else { - reservedSize = UNKNOWN_RESERVED_BYTES; + reservedSizeInBytes = UNKNOWN_RESERVED_BYTES; } } @@ -63,7 +63,7 @@ public StoreStats(long sizeInBytes, long totalDataSetSizeInBytes, long reservedS assert reservedSize == UNKNOWN_RESERVED_BYTES || reservedSize >= 0 : reservedSize; this.sizeInBytes = sizeInBytes; this.totalDataSetSizeInBytes = totalDataSetSizeInBytes; - this.reservedSize = reservedSize; + this.reservedSizeInBytes = reservedSize; } public void add(StoreStats stats) { @@ -72,7 +72,7 @@ public void add(StoreStats stats) { } sizeInBytes += stats.sizeInBytes; totalDataSetSizeInBytes += stats.totalDataSetSizeInBytes; - reservedSize = ignoreIfUnknown(reservedSize) + ignoreIfUnknown(stats.reservedSize); + reservedSizeInBytes = ignoreIfUnknown(reservedSizeInBytes) + ignoreIfUnknown(stats.reservedSizeInBytes); } private static long ignoreIfUnknown(long reservedSize) { @@ -83,28 +83,20 @@ public long sizeInBytes() { return sizeInBytes; } - public long getSizeInBytes() { - return sizeInBytes; - } - public ByteSizeValue size() { return ByteSizeValue.ofBytes(sizeInBytes); } - public ByteSizeValue getSize() { - return size(); + public long totalDataSetSizeInBytes() { + return totalDataSetSizeInBytes; } public ByteSizeValue totalDataSetSize() { return ByteSizeValue.ofBytes(totalDataSetSizeInBytes); } - public ByteSizeValue getTotalDataSetSize() { - return totalDataSetSize(); - } - - public long totalDataSetSizeInBytes() { - return totalDataSetSizeInBytes; + public long reservedSizeInBytes() { + return reservedSizeInBytes; } /** @@ -113,7 +105,7 @@ public long totalDataSetSizeInBytes() { * the reserved size is unknown. */ public ByteSizeValue getReservedSize() { - return ByteSizeValue.ofBytes(reservedSize); + return ByteSizeValue.ofBytes(reservedSizeInBytes); } @Override @@ -123,7 +115,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(totalDataSetSizeInBytes); } if (out.getTransportVersion().onOrAfter(RESERVED_BYTES_VERSION)) { - out.writeZLong(reservedSize); + out.writeZLong(reservedSizeInBytes); } } @@ -144,12 +136,12 @@ public boolean equals(Object o) { StoreStats that = (StoreStats) o; return sizeInBytes == that.sizeInBytes && totalDataSetSizeInBytes == that.totalDataSetSizeInBytes - && reservedSize == that.reservedSize; + && reservedSizeInBytes == that.reservedSizeInBytes; } @Override public int hashCode() { - return Objects.hash(sizeInBytes, totalDataSetSizeInBytes, reservedSize); + return Objects.hash(sizeInBytes, totalDataSetSizeInBytes, reservedSizeInBytes); } static final class Fields { diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index dbbf2bb98212a..1e81ca71c5395 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -18,9 +18,9 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.mapping.put.AutoPutMappingAction; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.action.admin.indices.mapping.put.TransportAutoPutMappingAction; +import org.elasticsearch.action.admin.indices.mapping.put.TransportPutMappingAction; import org.elasticsearch.action.admin.indices.stats.CommonStats; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags.Flag; @@ -64,6 +64,7 @@ import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor; import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.CheckedFunction; @@ -138,14 +139,12 @@ import org.elasticsearch.search.query.QueryPhase; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import java.io.Closeable; import java.io.IOException; -import java.io.InputStream; import java.io.UncheckedIOException; import java.nio.file.Files; import java.util.ArrayList; @@ -883,8 +882,8 @@ public void createShard( : "mapping update consumer only required by local shards recovery"; client.execute( featureService.clusterHasFeature(clusterService.state(), SUPPORTS_AUTO_PUT) - ? AutoPutMappingAction.INSTANCE - : PutMappingAction.INSTANCE, + ? TransportAutoPutMappingAction.TYPE + : TransportPutMappingAction.TYPE, new PutMappingRequest().setConcreteIndex(shardRouting.index()) .setConcreteIndex(shardRouting.index()) // concrete index - no name clash, it uses uuid .source(mapping.source().string(), XContentType.JSON) @@ -1654,8 +1653,7 @@ public AliasFilter buildAliasFilter(ClusterState state, String index, Set filterParser = bytes -> { try ( - InputStream inputStream = bytes.streamInput(); - XContentParser parser = XContentFactory.xContentType(inputStream).xContent().createParser(parserConfig, inputStream) + XContentParser parser = XContentHelper.createParserNotCompressed(parserConfig, bytes, XContentHelper.xContentType(bytes)) ) { return parseTopLevelQuery(parser); } diff --git a/server/src/main/java/org/elasticsearch/indices/analysis/wrappers/StableApiWrappers.java b/server/src/main/java/org/elasticsearch/indices/analysis/wrappers/StableApiWrappers.java index 86304f1a5b362..34dcce85da7a2 100644 --- a/server/src/main/java/org/elasticsearch/indices/analysis/wrappers/StableApiWrappers.java +++ b/server/src/main/java/org/elasticsearch/indices/analysis/wrappers/StableApiWrappers.java @@ -204,7 +204,9 @@ private static T createInstance( } return (T) constructor.newInstance(parameters); } else { - throw new IllegalStateException("Missing @Inject annotation for constructor with settings."); + throw new IllegalStateException( + "Missing @" + Inject.class.getCanonicalName() + " annotation for constructor with settings." + ); } } diff --git a/server/src/main/java/org/elasticsearch/indices/breaker/CircuitBreakerMetrics.java b/server/src/main/java/org/elasticsearch/indices/breaker/CircuitBreakerMetrics.java index 3e018385ccc7a..0b858368f0b0b 100644 --- a/server/src/main/java/org/elasticsearch/indices/breaker/CircuitBreakerMetrics.java +++ b/server/src/main/java/org/elasticsearch/indices/breaker/CircuitBreakerMetrics.java @@ -8,111 +8,36 @@ package org.elasticsearch.indices.breaker; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.telemetry.TelemetryProvider; import org.elasticsearch.telemetry.metric.LongCounter; -import org.elasticsearch.telemetry.metric.MeterRegistry; -import java.util.Collections; -import java.util.Map; import java.util.Objects; /** - * A class collecting trip counters for circuit breakers (parent, field data, request, in flight requests and custom child circuit + * A class collecting trip count metrics for circuit breakers (parent, field data, request, in flight requests and custom child circuit * breakers). * - * The circuit breaker name is part of the (long) counter metric name instead of being an attribute because aggregating distinct circuit - * breakers trip counter values does not make sense, as for instance, summing es.breaker.field_data.trip.total and - * es.breaker.in_flight_requests.trip.total. - * Those counters trip for different reasons even if the underlying reason is "too much memory usage". Aggregating them together results in - * losing the ability to understand where the underlying issue is (too much field data, too many concurrent requests, too large concurrent - * requests?). Aggregating each one of them separately to get, for instance, cluster level or cloud region level statistics is perfectly - * fine, instead. - * - * NOTE: here we have the ability to register custom trip counters too. This ability is something a few plugins take advantage of nowadays. - * At the time of writing this class it is just "Eql" and "MachineLearning" which track memory used to store "things" that are - * application/plugin specific such as eql sequence query objects and inference model objects. As a result, we just have a couple of this - * custom counters. This means we have 6 circuit breaker counter metrics per node (parent, field_data, request, in_flight_requests, - * eql_sequence and model_inference). We register them a bit differently to keep the ability for plugins to define their own circuit breaker - * trip counters. + * The circuit breaker name is used as an attribute so that we define a single counter metric where the name is mapped to a 'type' + * attribute. The counter trips for different reasons even if the underlying reason is "too much memory usage". Aggregating them together + * results in losing the ability to understand where the underlying issue is (too much field data, too many concurrent requests, too large + * concurrent requests?). As a result weadvise in aggregations queries not to "aggregate away" the type attribute so that you treat each + * circuit breaker as a separate counter. */ public class CircuitBreakerMetrics { - public static final CircuitBreakerMetrics NOOP = new CircuitBreakerMetrics(TelemetryProvider.NOOP, Collections.emptyMap()); - public static final String ES_BREAKER_PARENT_TRIP_COUNT_TOTAL = "es.breaker.parent.trip.total"; - public static final String ES_BREAKER_FIELD_DATA_TRIP_COUNT_TOTAL = "es.breaker.field_data.trip.total"; - public static final String ES_BREAKER_REQUEST_TRIP_COUNT_TOTAL = "es.breaker.request.trip.total"; - public static final String ES_BREAKER_IN_FLIGHT_REQUESTS_TRIP_COUNT_TOTAL = "es.breaker.in_flight_requests.trip.total"; - - private static final String ES_BREAKER_CUSTOM_TRIP_COUNT_TOTAL_TEMPLATE = "es.breaker.%s.trip.total"; - private final MeterRegistry registry; - private final LongCounter parentTripCountTotal; - private final LongCounter fielddataTripCountTotal; - private final LongCounter requestTripCountTotal; - private final LongCounter inFlightRequestsCountTotal; - private final Map customTripCountsTotal; - - private CircuitBreakerMetrics( - final MeterRegistry registry, - final LongCounter parentTripCountTotal, - final LongCounter fielddataTripCountTotal, - final LongCounter requestTripCountTotal, - final LongCounter inFlightRequestsCountTotal, - final Map customTripCountsTotal - ) { - this.registry = registry; - this.parentTripCountTotal = parentTripCountTotal; - this.fielddataTripCountTotal = fielddataTripCountTotal; - this.requestTripCountTotal = requestTripCountTotal; - this.inFlightRequestsCountTotal = inFlightRequestsCountTotal; - this.customTripCountsTotal = customTripCountsTotal; - } - - public CircuitBreakerMetrics(final TelemetryProvider telemetryProvider, final Map customTripCounters) { - this( - telemetryProvider.getMeterRegistry(), - telemetryProvider.getMeterRegistry() - .registerLongCounter(ES_BREAKER_PARENT_TRIP_COUNT_TOTAL, "Parent circuit breaker trip count", "count"), - telemetryProvider.getMeterRegistry() - .registerLongCounter(ES_BREAKER_FIELD_DATA_TRIP_COUNT_TOTAL, "Field data circuit breaker trip count", "count"), - telemetryProvider.getMeterRegistry() - .registerLongCounter(ES_BREAKER_REQUEST_TRIP_COUNT_TOTAL, "Request circuit breaker trip count", "count"), - telemetryProvider.getMeterRegistry() - .registerLongCounter( - ES_BREAKER_IN_FLIGHT_REQUESTS_TRIP_COUNT_TOTAL, - "In-flight requests circuit breaker trip count", - "count" - ), - customTripCounters - ); - } - - public LongCounter getParentTripCountTotal() { - return parentTripCountTotal; - } + public static final CircuitBreakerMetrics NOOP = new CircuitBreakerMetrics(TelemetryProvider.NOOP); + public static final String ES_BREAKER_TRIP_COUNT_TOTAL = "es.breaker.trip.total"; + private final LongCounter tripCount; - public LongCounter getFielddataTripCountTotal() { - return fielddataTripCountTotal; + private CircuitBreakerMetrics(final LongCounter tripCount) { + this.tripCount = tripCount; } - public LongCounter getRequestTripCountTotal() { - return requestTripCountTotal; + public CircuitBreakerMetrics(final TelemetryProvider telemetryProvider) { + this(telemetryProvider.getMeterRegistry().registerLongCounter(ES_BREAKER_TRIP_COUNT_TOTAL, "Circuit breaker trip count", "count")); } - public LongCounter getInFlightRequestsCountTotal() { - return inFlightRequestsCountTotal; - } - - public Map getCustomTripCountsTotal() { - return customTripCountsTotal; - } - - public LongCounter getCustomTripCount(final String name, final LongCounter theDefault) { - return this.customTripCountsTotal.getOrDefault(name, theDefault); - } - - public LongCounter getCustomTripCount(final String name) { - return this.customTripCountsTotal.getOrDefault(name, LongCounter.NOOP); + public LongCounter getTripCount() { + return tripCount; } @Override @@ -120,53 +45,17 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; CircuitBreakerMetrics that = (CircuitBreakerMetrics) o; - return Objects.equals(registry, that.registry) - && Objects.equals(parentTripCountTotal, that.parentTripCountTotal) - && Objects.equals(fielddataTripCountTotal, that.fielddataTripCountTotal) - && Objects.equals(requestTripCountTotal, that.requestTripCountTotal) - && Objects.equals(inFlightRequestsCountTotal, that.inFlightRequestsCountTotal) - && Objects.equals(customTripCountsTotal, that.customTripCountsTotal); + return Objects.equals(tripCount, that.tripCount); } @Override public int hashCode() { - return Objects.hash( - registry, - parentTripCountTotal, - fielddataTripCountTotal, - requestTripCountTotal, - inFlightRequestsCountTotal, - customTripCountsTotal - ); + return Objects.hash(tripCount); } @Override public String toString() { - return "CircuitBreakerMetrics{" - + "registry=" - + registry - + ", parentTripCountTotal=" - + parentTripCountTotal - + ", fielddataTripCountTotal=" - + fielddataTripCountTotal - + ", requestTripCountTotal=" - + requestTripCountTotal - + ", inFlightRequestsCountTotal=" - + inFlightRequestsCountTotal - + ", customTripCountsTotal=" - + customTripCountsTotal - + '}'; - } - - public void addCustomCircuitBreaker(final CircuitBreaker circuitBreaker) { - if (this.customTripCountsTotal.containsKey(circuitBreaker.getName())) { - throw new IllegalArgumentException("A circuit circuitBreaker named [" + circuitBreaker.getName() + " already exists"); - } - final String canonicalName = Strings.format(ES_BREAKER_CUSTOM_TRIP_COUNT_TOTAL_TEMPLATE, circuitBreaker.getName()); - this.customTripCountsTotal.put( - canonicalName, - registry.registerLongCounter(canonicalName, "A custom circuit circuitBreaker [" + circuitBreaker.getName() + "]", "count") - ); + return "CircuitBreakerMetrics{" + ", tripCount=" + tripCount + '}'; } } diff --git a/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java b/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java index 9e995c084a555..5a33af26e4a3f 100644 --- a/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java +++ b/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.core.Booleans; +import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; import org.elasticsearch.monitor.jvm.GcNames; import org.elasticsearch.monitor.jvm.JvmInfo; @@ -170,7 +171,7 @@ public HierarchyCircuitBreakerService( childCircuitBreakers.put( CircuitBreaker.FIELDDATA, validateAndCreateBreaker( - metrics.getFielddataTripCountTotal(), + metrics.getTripCount(), new BreakerSettings( CircuitBreaker.FIELDDATA, FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).getBytes(), @@ -183,7 +184,7 @@ public HierarchyCircuitBreakerService( childCircuitBreakers.put( CircuitBreaker.IN_FLIGHT_REQUESTS, validateAndCreateBreaker( - metrics.getInFlightRequestsCountTotal(), + metrics.getTripCount(), new BreakerSettings( CircuitBreaker.IN_FLIGHT_REQUESTS, IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).getBytes(), @@ -196,7 +197,7 @@ public HierarchyCircuitBreakerService( childCircuitBreakers.put( CircuitBreaker.REQUEST, validateAndCreateBreaker( - metrics.getRequestTripCountTotal(), + metrics.getTripCount(), new BreakerSettings( CircuitBreaker.REQUEST, REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).getBytes(), @@ -214,10 +215,7 @@ public HierarchyCircuitBreakerService( + "] exists. Circuit breaker names must be unique" ); } - childCircuitBreakers.put( - breakerSettings.getName(), - validateAndCreateBreaker(metrics.getCustomTripCount(breakerSettings.getName()), breakerSettings) - ); + childCircuitBreakers.put(breakerSettings.getName(), validateAndCreateBreaker(metrics.getTripCount(), breakerSettings)); } this.breakers = Map.copyOf(childCircuitBreakers); this.parentSettings = new BreakerSettings( @@ -261,7 +259,7 @@ public HierarchyCircuitBreakerService( this.overLimitStrategyFactory = overLimitStrategyFactory; this.overLimitStrategy = overLimitStrategyFactory.apply(this.trackRealMemoryUsage); - this.parentTripCountTotalMetric = metrics.getParentTripCountTotal(); + this.parentTripCountTotalMetric = metrics.getTripCount(); } private void updateCircuitBreakerSettings(String name, ByteSizeValue newLimit, Double newOverhead) { @@ -509,9 +507,12 @@ static OverLimitStrategy createOverLimitStrategy(boolean trackRealMemoryUsage) { if (trackRealMemoryUsage && jvmInfo.useG1GC().equals("true") // messing with GC is "dangerous" so we apply an escape hatch. Not intended to be used. && Booleans.parseBoolean(System.getProperty("es.real_memory_circuit_breaker.g1_over_limit_strategy.enabled"), true)) { - TimeValue lockTimeout = TimeValue.timeValueMillis( - Integer.parseInt(System.getProperty("es.real_memory_circuit_breaker.g1_over_limit_strategy.lock_timeout_ms", "500")) + + long lockTimeoutInMillis = Integer.parseInt( + System.getProperty("es.real_memory_circuit_breaker.g1_over_limit_strategy.lock_timeout_ms", "500") ); + TimeValue lockTimeout = TimeValue.timeValueMillis(lockTimeoutInMillis); + TimeValue fullGCLockTimeout = TimeValue.timeValueMillis(lockTimeoutInMillis); // hardcode interval, do not want any tuning of it outside code changes. return new G1OverLimitStrategy( jvmInfo, @@ -519,7 +520,9 @@ static OverLimitStrategy createOverLimitStrategy(boolean trackRealMemoryUsage) { createYoungGcCountSupplier(), System::currentTimeMillis, 500, - lockTimeout + 5000, + lockTimeout, + fullGCLockTimeout ); } else { return memoryUsed -> memoryUsed; @@ -552,10 +555,18 @@ static class G1OverLimitStrategy implements OverLimitStrategy { private final LongSupplier gcCountSupplier; private final LongSupplier timeSupplier; private final TimeValue lockTimeout; + + // The lock acquisition timeout when we are running a full GC + private final TimeValue fullGCLockTimeout; private final long maxHeap; private long lastCheckTime = Long.MIN_VALUE; + private long lastFullGCTime = Long.MIN_VALUE; private final long minimumInterval; + private volatile boolean performingFullGC = false; + + // Minimum interval before triggering another full GC + private final long fullGCMinimumInterval; private long blackHole; private final ReleasableLock lock = new ReleasableLock(new ReentrantLock()); @@ -568,14 +579,18 @@ static class G1OverLimitStrategy implements OverLimitStrategy { LongSupplier gcCountSupplier, LongSupplier timeSupplier, long minimumInterval, - TimeValue lockTimeout + long fullGCMinimumInterval, + TimeValue lockTimeout, + TimeValue fullGCLockTimeout ) { this.lockTimeout = lockTimeout; + this.fullGCLockTimeout = fullGCLockTimeout; assert minimumInterval > 0; this.currentMemoryUsageSupplier = currentMemoryUsageSupplier; this.gcCountSupplier = gcCountSupplier; this.timeSupplier = timeSupplier; this.minimumInterval = minimumInterval; + this.fullGCMinimumInterval = fullGCMinimumInterval; this.maxHeap = jvmInfo.getMem().getHeapMax().getBytes(); long g1RegionSize = jvmInfo.getG1RegionSize(); if (g1RegionSize <= 0) { @@ -602,50 +617,23 @@ static long fallbackRegionSize(JvmInfo jvmInfo) { return regionSize; } + @SuppressForbidden(reason = "Prefer full GC to OOM or CBE") + private static void performFullGC() { + System.gc(); + } + @Override public MemoryUsage overLimit(MemoryUsage memoryUsed) { - boolean leader = false; - int allocationIndex = 0; - long allocationDuration = 0; - long begin = 0; + + TriggerGCResult result = TriggerGCResult.EMPTY; int attemptNoCopy = 0; + try (ReleasableLock locked = lock.tryAcquire(lockTimeout)) { if (locked != null) { attemptNoCopy = ++this.attemptNo; - begin = timeSupplier.getAsLong(); - leader = begin >= lastCheckTime + minimumInterval; - overLimitTriggered(leader); - if (leader) { - long initialCollectionCount = gcCountSupplier.getAsLong(); - logger.info("attempting to trigger G1GC due to high heap usage [{}]", memoryUsed.baseUsage); - long localBlackHole = 0; - // number of allocations, corresponding to (approximately) number of free regions + 1 - int allocationCount = Math.toIntExact((maxHeap - memoryUsed.baseUsage) / g1RegionSize + 1); - // allocations of half-region size becomes single humongous alloc, thus taking up a full region. - int allocationSize = (int) (g1RegionSize >> 1); - long maxUsageObserved = memoryUsed.baseUsage; - for (; allocationIndex < allocationCount; ++allocationIndex) { - long current = currentMemoryUsageSupplier.getAsLong(); - if (current >= maxUsageObserved) { - maxUsageObserved = current; - } else { - // we observed a memory drop, so some GC must have occurred - break; - } - if (initialCollectionCount != gcCountSupplier.getAsLong()) { - break; - } - localBlackHole += new byte[allocationSize].hashCode(); - } - - blackHole += localBlackHole; - logger.trace("black hole [{}]", blackHole); - - long now = timeSupplier.getAsLong(); - this.lastCheckTime = now; - allocationDuration = now - begin; - this.attemptNo = 0; - } + result = tryTriggerGC(memoryUsed); + } else { + logger.info("could not acquire lock within {} when attempting to trigger G1GC due to high heap usage", lockTimeout); } } catch (InterruptedException e) { logger.info("could not acquire lock when attempting to trigger G1GC due to high heap usage"); @@ -653,20 +641,45 @@ public MemoryUsage overLimit(MemoryUsage memoryUsed) { // fallthrough } + if (performingFullGC && attemptNoCopy == 0) { + // Another thread is currently performing a full GC, and we were not able to try (lock acquire timeout) + // Since the full GC thread may hold the lock for longer, try again for an additional timeout + logger.info( + "could not acquire lock within {} while another thread was performing a full GC, waiting again for {}", + lockTimeout, + fullGCLockTimeout + ); + try (ReleasableLock locked = lock.tryAcquire(fullGCLockTimeout)) { + if (locked != null) { + attemptNoCopy = ++this.attemptNo; + result = tryTriggerGC(memoryUsed); + } else { + logger.info( + "could not acquire lock within {} when attempting to trigger G1GC due to high heap usage", + fullGCLockTimeout + ); + } + } catch (InterruptedException e) { + logger.info("could not acquire lock when attempting to trigger G1GC due to high heap usage"); + Thread.currentThread().interrupt(); + // fallthrough + } + } + final long current = currentMemoryUsageSupplier.getAsLong(); if (current < memoryUsed.baseUsage) { - if (leader) { + if (result.gcAttempted()) { logger.info( "GC did bring memory usage down, before [{}], after [{}], allocations [{}], duration [{}]", memoryUsed.baseUsage, current, - allocationIndex, - allocationDuration + result.allocationIndex(), + result.allocationDuration() ); } else if (attemptNoCopy < 10 || Long.bitCount(attemptNoCopy) == 1) { logger.info( "memory usage down after [{}], before [{}], after [{}]", - begin - lastCheckTime, + result.timeSinceLastCheck(), memoryUsed.baseUsage, current ); @@ -678,18 +691,18 @@ public MemoryUsage overLimit(MemoryUsage memoryUsed) { memoryUsed.permanentChildUsage ); } else { - if (leader) { + if (result.gcAttempted()) { logger.info( "GC did not bring memory usage down, before [{}], after [{}], allocations [{}], duration [{}]", memoryUsed.baseUsage, current, - allocationIndex, - allocationDuration + result.allocationIndex(), + result.allocationDuration() ); } else if (attemptNoCopy < 10 || Long.bitCount(attemptNoCopy) == 1) { logger.info( "memory usage not down after [{}], before [{}], after [{}]", - begin - lastCheckTime, + result.timeSinceLastCheck(), memoryUsed.baseUsage, current ); @@ -699,6 +712,66 @@ public MemoryUsage overLimit(MemoryUsage memoryUsed) { } } + private TriggerGCResult tryTriggerGC(MemoryUsage memoryUsed) { + long begin = timeSupplier.getAsLong(); + boolean canPerformGC = begin >= lastCheckTime + minimumInterval; + int allocationIndex = 0; + + overLimitTriggered(canPerformGC); + + if (canPerformGC) { + long initialCollectionCount = gcCountSupplier.getAsLong(); + logger.info("attempting to trigger G1GC due to high heap usage [{}]", memoryUsed.baseUsage); + long localBlackHole = 0; + // number of allocations, corresponding to (approximately) number of free regions + 1 + int allocationCount = Math.toIntExact((maxHeap - memoryUsed.baseUsage) / g1RegionSize + 1); + // allocations of half-region size becomes single humongous alloc, thus taking up a full region. + int allocationSize = (int) (g1RegionSize >> 1); + long maxUsageObserved = memoryUsed.baseUsage; + for (; allocationIndex < allocationCount; ++allocationIndex) { + long current = currentMemoryUsageSupplier.getAsLong(); + if (current >= maxUsageObserved) { + maxUsageObserved = current; + } else { + // we observed a memory drop, so some GC must have occurred + break; + } + if (initialCollectionCount != gcCountSupplier.getAsLong()) { + break; + } + localBlackHole += new byte[allocationSize].hashCode(); + } + + blackHole += localBlackHole; + logger.trace("black hole [{}]", blackHole); + + this.lastCheckTime = timeSupplier.getAsLong(); + this.attemptNo = 0; + } + + long reclaimedMemory = memoryUsed.baseUsage - currentMemoryUsageSupplier.getAsLong(); + // TODO: use a threshold? Relative to % of memory? + if (reclaimedMemory <= 0) { + long now = timeSupplier.getAsLong(); + boolean canPerformFullGC = now >= lastFullGCTime + fullGCMinimumInterval; + if (canPerformFullGC) { + // Enough time passed between 2 full GC fallbacks + performingFullGC = true; + logger.info("attempt to trigger young GC failed to bring memory down, triggering full GC"); + performFullGC(); + performingFullGC = false; + this.lastFullGCTime = timeSupplier.getAsLong(); + } + } + + long allocationDuration = timeSupplier.getAsLong() - begin; + return new TriggerGCResult(canPerformGC, allocationIndex, allocationDuration, begin - lastCheckTime); + } + + private record TriggerGCResult(boolean gcAttempted, int allocationIndex, long allocationDuration, long timeSinceLastCheck) { + private static final TriggerGCResult EMPTY = new TriggerGCResult(false, 0, 0, 0); + } + void overLimitTriggered(boolean leader) { // for tests to override. } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 84385ee04c2dd..618bc847e3a7f 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -565,19 +565,17 @@ void phase1(IndexCommit snapshot, long startingSeqNo, IntSupplier translogOps, A // but we must still create a retention lease .newForked(leaseListener -> createRetentionLease(startingSeqNo, leaseListener)) // and then compute the result of sending no files - .andThen((l, ignored) -> { + .andThenApply(ignored -> { final TimeValue took = stopWatch.totalTime(); logger.trace("recovery [phase1]: took [{}]", took); - l.onResponse( - new SendFileResult( - Collections.emptyList(), - Collections.emptyList(), - 0L, - Collections.emptyList(), - Collections.emptyList(), - 0L, - took - ) + return new SendFileResult( + Collections.emptyList(), + Collections.emptyList(), + 0L, + Collections.emptyList(), + Collections.emptyList(), + 0L, + took ); }) // and finally respond @@ -751,19 +749,17 @@ void run(ActionListener listener) { cleanFiles(store, recoverySourceMetadata, () -> translogOps, lastKnownGlobalCheckpoint, finalRecoveryPlanListener); }) // compute the result - .andThen((resultListener, ignored) -> { + .andThenApply(ignored -> { final TimeValue took = stopWatch.totalTime(); logger.trace("recovery [phase1]: took [{}]", took); - resultListener.onResponse( - new SendFileResult( - shardRecoveryPlan.getFilesToRecoverNames(), - shardRecoveryPlan.getFilesToRecoverSizes(), - shardRecoveryPlan.getTotalSize(), - shardRecoveryPlan.getFilesPresentInTargetNames(), - shardRecoveryPlan.getFilesPresentInTargetSizes(), - shardRecoveryPlan.getExistingSize(), - took - ) + return new SendFileResult( + shardRecoveryPlan.getFilesToRecoverNames(), + shardRecoveryPlan.getFilesToRecoverSizes(), + shardRecoveryPlan.getTotalSize(), + shardRecoveryPlan.getFilesPresentInTargetNames(), + shardRecoveryPlan.getFilesPresentInTargetSizes(), + shardRecoveryPlan.getExistingSize(), + took ); }) // and finally respond diff --git a/server/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java b/server/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java index df5104e9164ab..366e7c9149cc5 100644 --- a/server/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java +++ b/server/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java @@ -14,19 +14,18 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptType; import org.elasticsearch.script.TemplateScript; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonXContent; import java.io.IOException; -import java.io.InputStream; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -640,9 +639,11 @@ private static Script extractConditional(Map config) throws IOEx if (scriptSource != null) { try ( XContentBuilder builder = XContentBuilder.builder(JsonXContent.jsonXContent).map(normalizeScript(scriptSource)); - InputStream stream = BytesReference.bytes(builder).streamInput(); - XContentParser parser = XContentType.JSON.xContent() - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream) + XContentParser parser = XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG, + BytesReference.bytes(builder), + XContentType.JSON + ) ) { return Script.parse(parser); } diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java b/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java index 1b2d8056ac437..31d947d548ccf 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java @@ -19,6 +19,7 @@ import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.VersionFieldMapper; import org.elasticsearch.script.CtxMap; +import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.TemplateScript; import java.time.ZoneOffset; @@ -189,18 +190,6 @@ public T getFieldValue(String path, Class clazz, boolean ignoreMissing) { return cast(path, context, clazz); } - /** - * Returns the value contained in the document with the provided templated path - * @param pathTemplate The path within the document in dot-notation - * @param clazz The expected class of the field value - * @return the value for the provided path if existing, null otherwise - * @throws IllegalArgumentException if the pathTemplate is null, empty, invalid, if the field doesn't exist, - * or if the field that is found at the provided path is not of the expected type. - */ - public T getFieldValue(TemplateScript.Factory pathTemplate, Class clazz) { - return getFieldValue(renderTemplate(pathTemplate), clazz); - } - /** * Returns the value contained in the document for the provided path as a byte array. * If the path value is a string, a base64 decode operation will happen. @@ -239,16 +228,6 @@ public byte[] getFieldValueAsBytes(String path, boolean ignoreMissing) { } } - /** - * Checks whether the document contains a value for the provided templated path - * @param fieldPathTemplate the template for the path within the document in dot-notation - * @return true if the document contains a value for the field, false otherwise - * @throws IllegalArgumentException if the path is null, empty or invalid - */ - public boolean hasField(TemplateScript.Factory fieldPathTemplate) { - return hasField(renderTemplate(fieldPathTemplate)); - } - /** * Checks whether the document contains a value for the provided path * @param path The path within the document in dot-notation @@ -329,15 +308,6 @@ public boolean hasField(String path, boolean failOutOfRange) { return false; } - /** - * Removes the field identified by the provided path. - * @param fieldPathTemplate Resolves to the path with dot-notation within the document - * @throws IllegalArgumentException if the path is null, empty, invalid or if the field doesn't exist. - */ - public void removeField(TemplateScript.Factory fieldPathTemplate) { - removeField(renderTemplate(fieldPathTemplate)); - } - /** * Removes the field identified by the provided path. * @param path the path of the field to be removed @@ -468,17 +438,13 @@ public void appendFieldValue(String path, Object value, boolean allowDuplicates) * the provided value will be added to the newly created list. * Supports multiple values too provided in forms of list, in that case all the values will be appended to the * existing (or newly created) list. - * @param fieldPathTemplate Resolves to the path with dot-notation within the document + * @param path The path within the document in dot-notation * @param valueSource The value source that will produce the value or values to append to the existing ones * @param allowDuplicates When false, any values that already exist in the field will not be added * @throws IllegalArgumentException if the path is null, empty or invalid. */ - public void appendFieldValue(TemplateScript.Factory fieldPathTemplate, ValueSource valueSource, boolean allowDuplicates) { - appendFieldValue( - fieldPathTemplate.newInstance(templateModel).execute(), - valueSource.copyAndResolve(templateModel), - allowDuplicates - ); + public void appendFieldValue(String path, ValueSource valueSource, boolean allowDuplicates) { + appendFieldValue(path, valueSource.copyAndResolve(templateModel), allowDuplicates); } /** @@ -499,26 +465,26 @@ public void setFieldValue(String path, Object value) { * Sets the provided value to the provided path in the document. * Any non existing path element will be created. If the last element is a list, * the value will replace the existing list. - * @param fieldPathTemplate Resolves to the path with dot-notation within the document + * @param path The path within the document in dot-notation * @param valueSource The value source that will produce the value to put in for the path key * @throws IllegalArgumentException if the path is null, empty, invalid or if the value cannot be set to the * item identified by the provided path. */ - public void setFieldValue(TemplateScript.Factory fieldPathTemplate, ValueSource valueSource) { - setFieldValue(fieldPathTemplate.newInstance(templateModel).execute(), valueSource.copyAndResolve(templateModel)); + public void setFieldValue(String path, ValueSource valueSource) { + setFieldValue(path, valueSource.copyAndResolve(templateModel)); } /** * Sets the provided value to the provided path in the document. * Any non existing path element will be created. If the last element is a list, * the value will replace the existing list. - * @param fieldPathTemplate Resolves to the path with dot-notation within the document + * @param path The path within the document in dot-notation * @param valueSource The value source that will produce the value to put in for the path key * @param ignoreEmptyValue The flag to determine whether to exit quietly when the value produced by TemplatedValue is null or empty * @throws IllegalArgumentException if the path is null, empty, invalid or if the value cannot be set to the * item identified by the provided path. */ - public void setFieldValue(TemplateScript.Factory fieldPathTemplate, ValueSource valueSource, boolean ignoreEmptyValue) { + public void setFieldValue(String path, ValueSource valueSource, boolean ignoreEmptyValue) { Object value = valueSource.copyAndResolve(templateModel); if (ignoreEmptyValue && valueSource instanceof ValueSource.TemplatedValue) { if (value == null) { @@ -530,20 +496,20 @@ public void setFieldValue(TemplateScript.Factory fieldPathTemplate, ValueSource } } - setFieldValue(fieldPathTemplate.newInstance(templateModel).execute(), value); + setFieldValue(path, value); } /** * Sets the provided value to the provided path in the document. * Any non existing path element will be created. If the last element is a list, * the value will replace the existing list. - * @param fieldPathTemplate Resolves to the path with dot-notation within the document + * @param path The path within the document in dot-notation * @param value The value to put in for the path key * @param ignoreEmptyValue The flag to determine whether to exit quietly when the value produced by TemplatedValue is null or empty * @throws IllegalArgumentException if the path is null, empty, invalid or if the value cannot be set to the * item identified by the provided path. */ - public void setFieldValue(TemplateScript.Factory fieldPathTemplate, Object value, boolean ignoreEmptyValue) { + public void setFieldValue(String path, Object value, boolean ignoreEmptyValue) { if (ignoreEmptyValue) { if (value == null) { return; @@ -555,7 +521,7 @@ public void setFieldValue(TemplateScript.Factory fieldPathTemplate, Object value } } - setFieldValue(fieldPathTemplate.newInstance(templateModel).execute(), value); + setFieldValue(path, value); } private void setFieldValue(String path, Object value, boolean append, boolean allowDuplicates) { @@ -724,6 +690,21 @@ private static T cast(String path, Object object, Class clazz) { ); } + /** + * Renders a template into a string. This allows field access via both literal fields like {@code "foo.bar.baz"} and dynamic fields + * like {@code "{{other_field}}"} (that is, look up the value of the 'other_field' in the document and then use the resulting string as + * the field to operate on). + *

+ * See {@link ConfigurationUtils#compileTemplate(String, String, String, String, ScriptService)} and associated methods, which + * create these {@link TemplateScript.Factory} instances. + *

+ * Note: for clarity and efficiency reasons, it is advisable to invoke this method outside IngestDocument itself -- fields should be + * rendered by a caller (once), and then passed to an ingest document repeatedly. There are enough methods on IngestDocument that + * operate on String paths already, we don't want to mirror all of them with twin methods that accept a template. + * + * @param template the template or literal string to evaluate + * @return a literal string field path + */ public String renderTemplate(TemplateScript.Factory template) { return template.newInstance(templateModel).execute(); } diff --git a/server/src/main/java/org/elasticsearch/monitor/metrics/NodeMetrics.java b/server/src/main/java/org/elasticsearch/monitor/metrics/NodeMetrics.java index 16b0b9a10d914..476ad516aab80 100644 --- a/server/src/main/java/org/elasticsearch/monitor/metrics/NodeMetrics.java +++ b/server/src/main/java/org/elasticsearch/monitor/metrics/NodeMetrics.java @@ -62,7 +62,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { this.stats = new NodeStatsCache(TimeValue.timeValueMinutes(1)); metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.indices.get.total", + "es.indices.get.total", "Total number of get operations", "operation", () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getGet().getCount()) @@ -71,7 +71,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.indices.get.time", + "es.indices.get.time", "Time in milliseconds spent performing get operations.", "milliseconds", () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getGet().getTimeInMillis()) @@ -80,7 +80,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.indices.search.fetch.total", + "es.indices.search.fetch.total", "Total number of fetch operations.", "operation", () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getSearch().getTotal().getFetchCount()) @@ -89,7 +89,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.indices.search.fetch.time", + "es.indices.search.fetch.time", "Time in milliseconds spent performing fetch operations.", "milliseconds", () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getSearch().getTotal().getFetchTimeInMillis()) @@ -98,7 +98,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.indices.merge.total", + "es.indices.merge.total", "Total number of merge operations.", "operation", () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getMerge().getTotal()) @@ -107,7 +107,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.indices.merge.time", + "es.indices.merge.time", "Time in milliseconds spent performing merge operations.", "milliseconds", () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getMerge().getTotalTimeInMillis()) @@ -116,7 +116,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongGauge( - "es.node.stats.indices.translog.operations", + "es.translog.operations.total", "Number of transaction log operations.", "operation", () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getTranslog().estimatedNumberOfOperations()) @@ -125,7 +125,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongGauge( - "es.node.stats.indices.translog.size", + "es.translog.size", "Size, in bytes, of the transaction log.", "bytes", () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getTranslog().getTranslogSizeInBytes()) @@ -134,7 +134,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongGauge( - "es.node.stats.indices.translog.uncommitted_operations", + "es.translog.uncommitted_operations.total", "Number of uncommitted transaction log operations.", "operations", () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getTranslog().getUncommittedOperations()) @@ -143,7 +143,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongGauge( - "es.node.stats.indices.translog.uncommitted_size", + "es.translog.uncommitted_operations.size", "Size, in bytes, of uncommitted transaction log operations.", "bytes", () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getTranslog().getUncommittedSizeInBytes()) @@ -152,7 +152,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.indices.translog.earliest_last_modified_age", + "es.translog.earliest_last_modified.time", "Earliest last modified age for the transaction log.", "time", () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getTranslog().getEarliestLastModifiedAge()) @@ -161,7 +161,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.transport.rx_size", + "es.transport.rx.size", "Size, in bytes, of RX packets received by the node during internal cluster communication.", "bytes", () -> new LongWithAttributes(stats.getOrRefresh().getTransport().getRxSize().getBytes()) @@ -170,7 +170,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.transport.tx_size", + "es.transport.tx.size", "Size, in bytes, of TX packets sent by the node during internal cluster communication.", "bytes", () -> new LongWithAttributes(stats.getOrRefresh().getTransport().getTxSize().getBytes()) @@ -179,7 +179,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongGauge( - "es.node.stats.jvm.mem.pools.young.used", + "es.jvm.mem.pools.young.size", "Memory, in bytes, used by the young generation heap.", "bytes", () -> new LongWithAttributes(bytesUsedByGCGen(stats.getOrRefresh().getJvm().getMem(), GcNames.YOUNG)) @@ -188,7 +188,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongGauge( - "es.node.stats.jvm.mem.pools.survivor.used", + "es.jvm.mem.pools.survivor.size", "Memory, in bytes, used by the survivor space.", "bytes", () -> new LongWithAttributes(bytesUsedByGCGen(stats.getOrRefresh().getJvm().getMem(), GcNames.SURVIVOR)) @@ -197,7 +197,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongGauge( - "es.node.stats.jvm.mem.pools.old.used", + "es.jvm.mem.pools.old.size", "Memory, in bytes, used by the old generation heap.", "bytes", () -> new LongWithAttributes(bytesUsedByGCGen(stats.getOrRefresh().getJvm().getMem(), GcNames.OLD)) @@ -206,7 +206,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.fs.io_stats.io_time.total", + "es.fs.io_stats.time.total", "The total time in millis spent performing I/O operations across all devices used by Elasticsearch.", "milliseconds", () -> new LongWithAttributes(stats.getOrRefresh().getFs().getIoStats().getTotalIOTimeMillis()) @@ -215,7 +215,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.indices.indexing.docs.total", + "es.indexing.docs.total", "Total number of indexed documents", "documents", () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getIndexing().getTotal().getIndexCount()) @@ -224,7 +224,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongGauge( - "es.node.stats.indices.indexing.docs.current", + "es.indexing.docs.total", "Current number of indexing documents", "documents", () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getIndexing().getTotal().getIndexCurrent()) @@ -233,7 +233,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.indices.indexing.failed.total", + "es.indices.indexing.failed.total", "Total number of failed indexing operations", "operations", () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getIndexing().getTotal().getIndexFailedCount()) @@ -242,7 +242,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.indices.deletion.docs.total", + "es.indices.deletion.docs.total", "Total number of deleted documents", "documents", () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getIndexing().getTotal().getDeleteCount()) @@ -251,7 +251,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongGauge( - "es.node.stats.indices.deletion.docs.current", + "es.indices.deletion.docs.total", "Current number of deleting documents", "documents", () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getIndexing().getTotal().getDeleteCurrent()) @@ -260,7 +260,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.indices.indexing.time", + "es.indices.indexing.time", "Total indices indexing time", "milliseconds", () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getIndexing().getTotal().getIndexTime().millis()) @@ -269,7 +269,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.indices.deletion.time", + "es.indices.deletion.time", "Total indices deletion time", "milliseconds", () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getIndexing().getTotal().getDeleteTime().millis()) @@ -278,7 +278,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.indices.throttle.time", + "es.indices.throttle.time", "Total indices throttle time", "milliseconds", () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getIndexing().getTotal().getThrottleTime().millis()) @@ -287,7 +287,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.indices.noop.total", + "es.indices.noop.total", "Total number of noop shard operations", "operations", () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getIndexing().getTotal().getNoopUpdateCount()) @@ -296,7 +296,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.indices.indexing.coordinating_operations.memory.size.total", + "es.indexing.coordinating_operations.size", "Total number of memory bytes consumed by coordinating operations", "bytes", () -> new LongWithAttributes(stats.getOrRefresh().getIndexingPressureStats().getTotalCoordinatingBytes()) @@ -305,7 +305,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.indices.indexing.coordinating_operations.count.total", + "es.indexing.coordinating_operations.total", "Total number of coordinating operations", "operations", () -> new LongWithAttributes(stats.getOrRefresh().getIndexingPressureStats().getTotalCoordinatingOps()) @@ -314,7 +314,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongGauge( - "es.node.stats.indices.indexing.coordinating_operations.memory.size.current", + "es.indexing.coordinating_operations.size", "Current number of memory bytes consumed by coordinating operations", "bytes", () -> new LongWithAttributes(stats.getOrRefresh().getIndexingPressureStats().getCurrentCoordinatingBytes()) @@ -323,7 +323,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongGauge( - "es.node.stats.indices.indexing.coordinating_operations.count.current", + "es.indexing.coordinating_operations.total", "Current number of coordinating operations", "operations", () -> new LongWithAttributes(stats.getOrRefresh().getIndexingPressureStats().getCurrentCoordinatingOps()) @@ -332,7 +332,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.indices.indexing.coordinating_operations.rejections.total", + "es.indexing.coordinating_operations.rejections.total", "Total number of coordinating operations rejections", "operations", () -> new LongWithAttributes(stats.getOrRefresh().getIndexingPressureStats().getCoordinatingRejections()) @@ -341,7 +341,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.indices.indexing.primary_operations.memory.size.total", + "es.indexing.primary_operations.size", "Total number of memory bytes consumed by primary operations", "bytes", () -> new LongWithAttributes(stats.getOrRefresh().getIndexingPressureStats().getTotalPrimaryBytes()) @@ -350,7 +350,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.indices.indexing.primary_operations.count.total", + "es.indexing.primary_operations.total", "Total number of primary operations", "operations", () -> new LongWithAttributes(stats.getOrRefresh().getIndexingPressureStats().getTotalPrimaryOps()) @@ -359,7 +359,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongGauge( - "es.node.stats.indices.indexing.primary_operations.memory.size.current", + "es.indexing.primary_operations.size", "Current number of memory bytes consumed by primary operations", "bytes", () -> new LongWithAttributes(stats.getOrRefresh().getIndexingPressureStats().getCurrentPrimaryBytes()) @@ -368,7 +368,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongGauge( - "es.node.stats.indices.indexing.primary_operations.count.current", + "es.indexing.primary_operations.total", "Current number of primary operations", "operations", () -> new LongWithAttributes(stats.getOrRefresh().getIndexingPressureStats().getCurrentPrimaryOps()) @@ -377,7 +377,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.indices.indexing.primary_operations.rejections.total", + "es.indexing.primary_operations.rejections.total", "Total number of primary operations rejections", "operations", () -> new LongWithAttributes(stats.getOrRefresh().getIndexingPressureStats().getPrimaryRejections()) @@ -386,7 +386,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongGauge( - "es.node.stats.indices.indexing.memory.limit.current", + "es.indexing.memory.limit.size", "Current memory limit for primary and coordinating operations", "bytes", () -> new LongWithAttributes(stats.getOrRefresh().getIndexingPressureStats().getMemoryLimit()) @@ -441,7 +441,7 @@ private NodeStats getNodeStats() { false, false, false, - false, + true, false ); } diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 8f493977484a8..6da1dbc3e5c52 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -13,6 +13,7 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.Version; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.bootstrap.BootstrapContext; import org.elasticsearch.client.internal.Client; @@ -40,6 +41,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.core.Assertions; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.PathUtils; @@ -75,6 +77,7 @@ import org.elasticsearch.snapshots.SnapshotShardsService; import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.tasks.TaskCancellationService; +import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.tasks.TaskResultsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteClusterPortSettings; @@ -97,14 +100,16 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; -import java.util.concurrent.FutureTask; import java.util.concurrent.TimeUnit; import java.util.function.BiConsumer; import java.util.function.Function; import javax.net.ssl.SNIHostName; +import static org.elasticsearch.core.Strings.format; + /** * A node represent a node within a cluster ({@code cluster.name}). The {@link #client()} can be used * in order to use a {@link Client} to perform actions/operations against the cluster. @@ -148,6 +153,12 @@ public class Node implements Closeable { Property.NodeScope ); + public static final Setting MAXIMUM_SHUTDOWN_TIMEOUT_SETTING = Setting.positiveTimeSetting( + "node.maximum_shutdown_grace_period", + TimeValue.timeValueMillis(0), + Setting.Property.NodeScope + ); + private final Lifecycle lifecycle = new Lifecycle(); /** @@ -580,17 +591,92 @@ public synchronized void close() throws IOException { */ public void prepareForClose() { HttpServerTransport httpServerTransport = injector.getInstance(HttpServerTransport.class); - FutureTask stopper = new FutureTask<>(httpServerTransport::close, null); - new Thread(stopper, "http-server-transport-stop").start(); - + Map stoppers = new HashMap<>(); + TimeValue maxTimeout = MAXIMUM_SHUTDOWN_TIMEOUT_SETTING.get(this.settings()); + stoppers.put("http-server-transport-stop", httpServerTransport::close); + stoppers.put("async-search-stop", () -> this.awaitSearchTasksComplete(maxTimeout)); if (terminationHandler != null) { - terminationHandler.handleTermination(); + stoppers.put("termination-handler-stop", terminationHandler::handleTermination); + } + + Map> futures = new HashMap<>(stoppers.size()); + for (var stopperEntry : stoppers.entrySet()) { + var future = new CompletableFuture(); + new Thread(() -> { + try { + stopperEntry.getValue().run(); + } catch (Exception ex) { + logger.warn("unexpected exception in shutdown task [" + stopperEntry.getKey() + "]", ex); + } finally { + future.complete(null); + } + }, stopperEntry.getKey()).start(); + futures.put(stopperEntry.getKey(), future); } + @SuppressWarnings(value = "rawtypes") // Can't make an array of parameterized types, but it complains if you leave the type out + CompletableFuture allStoppers = CompletableFuture.allOf(futures.values().toArray(new CompletableFuture[stoppers.size()])); + try { - stopper.get(); - } catch (Exception e) { - logger.warn("unexpected exception while waiting for http server to close", e); + if (maxTimeout.millis() == 0) { + FutureUtils.get(allStoppers); + } else { + FutureUtils.get(allStoppers, maxTimeout.millis(), TimeUnit.MILLISECONDS); + } + + } catch (ElasticsearchTimeoutException t) { + var unfinishedTasks = futures.entrySet() + .stream() + .filter(entry -> entry.getValue().isDone() == false) + .map(Map.Entry::getKey) + .toList(); + logger.warn("timed out while waiting for graceful shutdown tasks: " + unfinishedTasks); + } + } + + private void awaitSearchTasksComplete(TimeValue asyncSearchTimeout) { + TaskManager taskManager = injector.getInstance(TransportService.class).getTaskManager(); + long millisWaited = 0; + while (true) { + long searchTasksRemaining = taskManager.getTasks() + .values() + .stream() + .filter(task -> TransportSearchAction.TYPE.name().equals(task.getAction())) + .count(); + if (searchTasksRemaining == 0) { + logger.debug("all search tasks complete"); + return; + } else { + // Let the system work on those searches for a while. We're on a dedicated thread to manage app shutdown, so we + // literally just want to wait and not take up resources on this thread for now. Poll period chosen to allow short + // response times, but checking the tasks list is relatively expensive, and we don't want to waste CPU time we could + // be spending on finishing those searches. + final TimeValue pollPeriod = TimeValue.timeValueMillis(500); + millisWaited += pollPeriod.millis(); + if (millisWaited >= asyncSearchTimeout.millis()) { + logger.warn( + format( + "timed out after waiting [%s] for [%d] search tasks to finish", + asyncSearchTimeout.toString(), + searchTasksRemaining + ) + ); + return; + } + logger.debug(format("waiting for [%s] search tasks to finish, next poll in [%s]", searchTasksRemaining, pollPeriod)); + try { + Thread.sleep(pollPeriod.millis()); + } catch (InterruptedException ex) { + logger.warn( + format( + "interrupted while waiting [%s] for [%d] search tasks to finish", + asyncSearchTimeout.toString(), + searchTasksRemaining + ) + ); + return; + } + } } } diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index 10d3d89d617fa..018abebdb7709 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.ingest.ReservedPipelineAction; import org.elasticsearch.action.search.SearchExecutionStatsCollector; import org.elasticsearch.action.search.SearchPhaseController; +import org.elasticsearch.action.search.SearchTransportAPMMetrics; import org.elasticsearch.action.search.SearchTransportService; import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.action.update.UpdateHelper; @@ -182,7 +183,6 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.telemetry.TelemetryProvider; -import org.elasticsearch.telemetry.metric.LongCounter; import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; @@ -201,11 +201,11 @@ import java.util.Collection; import java.util.LinkedHashSet; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.Set; -import java.util.TreeMap; import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.function.Supplier; @@ -383,6 +383,7 @@ private Settings createEnvironment(Environment initialEnvironment, NodeServicePr ); logger.info("JVM home [{}], using bundled JDK [{}]", System.getProperty("java.home"), jvmInfo.getUsingBundledJdk()); logger.info("JVM arguments {}", Arrays.toString(jvmInfo.getInputArguments())); + logger.info("Default Locale [{}]", Locale.getDefault()); if (Build.current().isProductionRelease() == false) { logger.warn( "version [{}] is a pre-release version of Elasticsearch and is not suitable for production", @@ -662,9 +663,8 @@ private void construct( IndicesModule indicesModule = new IndicesModule(pluginsService.filterPlugins(MapperPlugin.class).toList()); modules.add(indicesModule); - final Map customTripCounters = new TreeMap<>(); CircuitBreakerService circuitBreakerService = createCircuitBreakerService( - new CircuitBreakerMetrics(telemetryProvider, customTripCounters), + new CircuitBreakerMetrics(telemetryProvider), settingsModule.getSettings(), settingsModule.getClusterSettings() ); @@ -870,6 +870,7 @@ record PluginServiceInstances( telemetryProvider.getTracer() ); final ResponseCollectorService responseCollectorService = new ResponseCollectorService(clusterService); + final SearchTransportAPMMetrics searchTransportAPMMetrics = new SearchTransportAPMMetrics(telemetryProvider.getMeterRegistry()); final SearchTransportService searchTransportService = new SearchTransportService( transportService, client, @@ -1046,6 +1047,7 @@ record PluginServiceInstances( b.bind(MetadataCreateIndexService.class).toInstance(metadataCreateIndexService); b.bind(MetadataUpdateSettingsService.class).toInstance(metadataUpdateSettingsService); b.bind(SearchService.class).toInstance(searchService); + b.bind(SearchTransportAPMMetrics.class).toInstance(searchTransportAPMMetrics); b.bind(SearchTransportService.class).toInstance(searchTransportService); b.bind(SearchPhaseController.class).toInstance(new SearchPhaseController(searchService::aggReduceContextBuilder)); b.bind(Transport.class).toInstance(transport); @@ -1295,7 +1297,6 @@ private CircuitBreakerService createCircuitBreakerService( pluginBreakers.forEach(t -> { final CircuitBreaker circuitBreaker = circuitBreakerService.getBreaker(t.v2().getName()); t.v1().setCircuitBreaker(circuitBreaker); - metrics.addCustomCircuitBreaker(circuitBreaker); }); return circuitBreakerService; diff --git a/server/src/main/java/org/elasticsearch/plugins/ExtensionLoader.java b/server/src/main/java/org/elasticsearch/plugins/ExtensionLoader.java index 7dfb64c989ea2..5cf5f1b92e472 100644 --- a/server/src/main/java/org/elasticsearch/plugins/ExtensionLoader.java +++ b/server/src/main/java/org/elasticsearch/plugins/ExtensionLoader.java @@ -8,9 +8,10 @@ package org.elasticsearch.plugins; -import java.util.Locale; +import org.elasticsearch.core.Strings; + +import java.util.Optional; import java.util.ServiceLoader; -import java.util.function.Supplier; /** * A utility for loading SPI extensions. @@ -20,8 +21,7 @@ public class ExtensionLoader { /** * Loads a single SPI extension. * - * There should be no more than one extension found. If no service providers - * are found, the supplied fallback is used. + * There should be no more than one extension found. * * Note: A ServiceLoader is needed rather than the service class because ServiceLoaders * must be loaded by a module with the {@code uses} declaration. Since this @@ -29,21 +29,22 @@ public class ExtensionLoader { * service classes it may load. Thus, the caller must load the ServiceLoader. * * @param loader a service loader instance to find the singleton extension in - * @param fallback a supplier for an instance if no extensions are found * @return an instance of the extension * @param the SPI extension type */ - public static T loadSingleton(ServiceLoader loader, Supplier fallback) { - var extensions = loader.stream().toList(); - if (extensions.size() > 1) { + public static Optional loadSingleton(ServiceLoader loader) { + var extensions = loader.iterator(); + if (extensions.hasNext() == false) { + return Optional.empty(); + } + var ext = extensions.next(); + if (extensions.hasNext()) { // It would be really nice to give the actual extension class here directly, but that would require passing it // in effectively twice in the call site, once to ServiceLoader, and then to this method directly as well. // It's annoying that ServiceLoader hangs onto the service class, but does not expose it. It does at least // print the service class from its toString, which is better tha nothing - throw new IllegalStateException(String.format(Locale.ROOT, "More than one extension found for %s", loader)); - } else if (extensions.isEmpty()) { - return fallback.get(); + throw new IllegalStateException(Strings.format("More than one extension found for %s", loader)); } - return extensions.get(0).get(); + return Optional.of(ext); } } diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesMetrics.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesMetrics.java index 8442cf8c4a341..b4d79d89ec4c6 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesMetrics.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesMetrics.java @@ -25,22 +25,22 @@ public record RepositoriesMetrics( public static RepositoriesMetrics NOOP = new RepositoriesMetrics(MeterRegistry.NOOP); - public static final String METRIC_REQUESTS_COUNT = "es.repositories.requests.count"; - public static final String METRIC_EXCEPTIONS_COUNT = "es.repositories.exceptions.count"; - public static final String METRIC_THROTTLES_COUNT = "es.repositories.throttles.count"; - public static final String METRIC_OPERATIONS_COUNT = "es.repositories.operations.count"; - public static final String METRIC_UNSUCCESSFUL_OPERATIONS_COUNT = "es.repositories.operations.unsuccessful.count"; + public static final String METRIC_REQUESTS_TOTAL = "es.repositories.requests.total"; + public static final String METRIC_EXCEPTIONS_TOTAL = "es.repositories.exceptions.total"; + public static final String METRIC_THROTTLES_TOTAL = "es.repositories.throttles.total"; + public static final String METRIC_OPERATIONS_TOTAL = "es.repositories.operations.total"; + public static final String METRIC_UNSUCCESSFUL_OPERATIONS_TOTAL = "es.repositories.operations.unsuccessful.total"; public static final String METRIC_EXCEPTIONS_HISTOGRAM = "es.repositories.exceptions.histogram"; public static final String METRIC_THROTTLES_HISTOGRAM = "es.repositories.throttles.histogram"; public static final String HTTP_REQUEST_TIME_IN_MICROS_HISTOGRAM = "es.repositories.requests.http_request_time.histogram"; public RepositoriesMetrics(MeterRegistry meterRegistry) { this( - meterRegistry.registerLongCounter(METRIC_REQUESTS_COUNT, "repository request counter", "unit"), - meterRegistry.registerLongCounter(METRIC_EXCEPTIONS_COUNT, "repository request exception counter", "unit"), - meterRegistry.registerLongCounter(METRIC_THROTTLES_COUNT, "repository request throttle counter", "unit"), - meterRegistry.registerLongCounter(METRIC_OPERATIONS_COUNT, "repository operation counter", "unit"), - meterRegistry.registerLongCounter(METRIC_UNSUCCESSFUL_OPERATIONS_COUNT, "repository unsuccessful operation counter", "unit"), + meterRegistry.registerLongCounter(METRIC_REQUESTS_TOTAL, "repository request counter", "unit"), + meterRegistry.registerLongCounter(METRIC_EXCEPTIONS_TOTAL, "repository request exception counter", "unit"), + meterRegistry.registerLongCounter(METRIC_THROTTLES_TOTAL, "repository request throttle counter", "unit"), + meterRegistry.registerLongCounter(METRIC_OPERATIONS_TOTAL, "repository operation counter", "unit"), + meterRegistry.registerLongCounter(METRIC_UNSUCCESSFUL_OPERATIONS_TOTAL, "repository unsuccessful operation counter", "unit"), meterRegistry.registerLongHistogram(METRIC_EXCEPTIONS_HISTOGRAM, "repository request exception histogram", "unit"), meterRegistry.registerLongHistogram(METRIC_THROTTLES_HISTOGRAM, "repository request throttle histogram", "unit"), meterRegistry.registerLongHistogram( diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoryCleanupResult.java b/server/src/main/java/org/elasticsearch/repositories/RepositoryCleanupResult.java index 664b6d1cb078c..a8b01e10e51d1 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoryCleanupResult.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoryCleanupResult.java @@ -12,8 +12,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.xcontent.ObjectParser; -import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; @@ -21,28 +19,13 @@ public final class RepositoryCleanupResult implements Writeable, ToXContentObject { - public static final ObjectParser PARSER = new ObjectParser<>( - RepositoryCleanupResult.class.getName(), - true, - RepositoryCleanupResult::new - ); - private static final String DELETED_BLOBS = "deleted_blobs"; private static final String DELETED_BYTES = "deleted_bytes"; - static { - PARSER.declareLong((result, bytes) -> result.bytes = bytes, new ParseField(DELETED_BYTES)); - PARSER.declareLong((result, blobs) -> result.blobs = blobs, new ParseField(DELETED_BLOBS)); - } - - private long bytes; + private final long bytes; - private long blobs; - - private RepositoryCleanupResult() { - this(DeleteResult.ZERO); - } + private final long blobs; public RepositoryCleanupResult(DeleteResult result) { this.blobs = result.blobsDeleted(); diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index c45a048480383..48caafc6bfab8 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -114,6 +114,7 @@ import org.elasticsearch.repositories.ShardSnapshotResult; import org.elasticsearch.repositories.SnapshotShardContext; import org.elasticsearch.snapshots.AbortedSnapshotException; +import org.elasticsearch.snapshots.PausedSnapshotException; import org.elasticsearch.snapshots.SnapshotDeleteListener; import org.elasticsearch.snapshots.SnapshotException; import org.elasticsearch.snapshots.SnapshotId; @@ -177,9 +178,9 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp protected final ThreadPool threadPool; - public static final String STATELESS_SHARD_THREAD_NAME = "stateless_shard"; + public static final String STATELESS_SHARD_READ_THREAD_NAME = "stateless_shard_read"; public static final String STATELESS_TRANSLOG_THREAD_NAME = "stateless_translog"; - public static final String STATELESS_UPLOAD_THREAD_NAME = "stateless_upload"; + public static final String STATELESS_SHARD_WRITE_THREAD_NAME = "stateless_shard_write"; public static final String SNAPSHOT_PREFIX = "snap-"; @@ -1984,9 +1985,9 @@ protected void assertSnapshotOrGenericThread() { ThreadPool.Names.SNAPSHOT, ThreadPool.Names.SNAPSHOT_META, ThreadPool.Names.GENERIC, - STATELESS_SHARD_THREAD_NAME, + STATELESS_SHARD_READ_THREAD_NAME, STATELESS_TRANSLOG_THREAD_NAME, - STATELESS_UPLOAD_THREAD_NAME + STATELESS_SHARD_WRITE_THREAD_NAME ); } @@ -3254,7 +3255,7 @@ private static void ensureNotAborted(ShardId shardId, SnapshotId snapshotId, Ind snapshotStatus.ensureNotAborted(); } catch (Exception e) { logger.debug("[{}] [{}] {} on the file [{}], exiting", shardId, snapshotId, e.getMessage(), fileName); - assert e instanceof AbortedSnapshotException : e; + assert e instanceof AbortedSnapshotException || e instanceof PausedSnapshotException : e; throw e; } } diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java index ca3ff799436c2..e7ea234eae310 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.lucene.store.IndexOutputOutputStream; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParserUtils; import org.elasticsearch.core.Nullable; import org.elasticsearch.gateway.CorruptStateException; @@ -33,6 +34,7 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import java.io.FilterInputStream; @@ -144,15 +146,23 @@ public T deserialize(String repoName, NamedXContentRegistry namedXContentRegistr BytesReference bytesReference = Streams.readFully(wrappedStream); deserializeMetaBlobInputStream.verifyFooter(); try ( - XContentParser parser = XContentType.SMILE.xContent() - .createParser(namedXContentRegistry, LoggingDeprecationHandler.INSTANCE, bytesReference.streamInput()) + XContentParser parser = XContentHelper.createParserNotCompressed( + XContentParserConfiguration.EMPTY.withRegistry(namedXContentRegistry) + .withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), + bytesReference, + XContentType.SMILE + ) ) { result = reader.apply(repoName, parser); XContentParserUtils.ensureExpectedToken(null, parser.nextToken(), parser); } catch (Exception e) { try ( - XContentParser parser = XContentType.SMILE.xContent() - .createParser(namedXContentRegistry, LoggingDeprecationHandler.INSTANCE, bytesReference.streamInput()) + XContentParser parser = XContentHelper.createParserNotCompressed( + XContentParserConfiguration.EMPTY.withRegistry(namedXContentRegistry) + .withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), + bytesReference, + XContentType.SMILE + ) ) { result = fallbackReader.apply(repoName, parser); XContentParserUtils.ensureExpectedToken(null, parser.nextToken(), parser); diff --git a/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java b/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java index 77cb51d821843..5ea80ac608b8f 100644 --- a/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java +++ b/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java @@ -14,6 +14,8 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.core.RefCounted; +import org.elasticsearch.core.Releasable; import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.core.Tuple; import org.elasticsearch.plugins.ActionPlugin; @@ -77,30 +79,33 @@ public final long getUsageCount() { @Override public final void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws Exception { // prepare the request for execution; has the side effect of touching the request parameters - final RestChannelConsumer action = prepareRequest(request, client); - - // validate unconsumed params, but we must exclude params used to format the response - // use a sorted set so the unconsumed parameters appear in a reliable sorted order - final SortedSet unconsumedParams = request.unconsumedParams() - .stream() - .filter(p -> responseParams(request.getRestApiVersion()).contains(p) == false) - .collect(Collectors.toCollection(TreeSet::new)); - - // validate the non-response params - if (unconsumedParams.isEmpty() == false) { - final Set candidateParams = new HashSet<>(); - candidateParams.addAll(request.consumedParams()); - candidateParams.addAll(responseParams(request.getRestApiVersion())); - throw new IllegalArgumentException(unrecognized(request, unconsumedParams, candidateParams, "parameter")); - } + try (var action = prepareRequest(request, client)) { + + // validate unconsumed params, but we must exclude params used to format the response + // use a sorted set so the unconsumed parameters appear in a reliable sorted order + final SortedSet unconsumedParams = request.unconsumedParams() + .stream() + .filter(p -> responseParams(request.getRestApiVersion()).contains(p) == false) + .collect(Collectors.toCollection(TreeSet::new)); + + // validate the non-response params + if (unconsumedParams.isEmpty() == false) { + final Set candidateParams = new HashSet<>(); + candidateParams.addAll(request.consumedParams()); + candidateParams.addAll(responseParams(request.getRestApiVersion())); + throw new IllegalArgumentException(unrecognized(request, unconsumedParams, candidateParams, "parameter")); + } - if (request.hasContent() && request.isContentConsumed() == false) { - throw new IllegalArgumentException("request [" + request.method() + " " + request.path() + "] does not support having a body"); - } + if (request.hasContent() && request.isContentConsumed() == false) { + throw new IllegalArgumentException( + "request [" + request.method() + " " + request.path() + "] does not support having a body" + ); + } - usageCount.increment(); - // execute the action - action.accept(channel); + usageCount.increment(); + // execute the action + action.accept(channel); + } } protected static String unrecognized( @@ -149,11 +154,18 @@ protected static String unrecognized( } /** - * REST requests are handled by preparing a channel consumer that represents the execution of - * the request against a channel. + * REST requests are handled by preparing a channel consumer that represents the execution of the request against a channel. */ @FunctionalInterface - protected interface RestChannelConsumer extends CheckedConsumer {} + protected interface RestChannelConsumer extends CheckedConsumer, Releasable { + /** + * Called just after the execution has started (or failed, if the request was invalid), but typically well before the execution has + * completed. This callback should be used to release (refs to) resources that were acquired when constructing this consumer, for + * instance by calling {@link RefCounted#decRef()} on any newly-created transport requests with nontrivial lifecycles. + */ + @Override + default void close() {} + } /** * Prepare the request for execution. Implementations should consume all request params before diff --git a/server/src/main/java/org/elasticsearch/rest/RestRequest.java b/server/src/main/java/org/elasticsearch/rest/RestRequest.java index eac13e5ef87a6..3e418d5ca5d4e 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestRequest.java +++ b/server/src/main/java/org/elasticsearch/rest/RestRequest.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Booleans; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.Nullable; @@ -23,15 +24,14 @@ import org.elasticsearch.core.Tuple; import org.elasticsearch.http.HttpChannel; import org.elasticsearch.http.HttpRequest; +import org.elasticsearch.telemetry.tracing.Traceable; import org.elasticsearch.xcontent.ParsedMediaType; import org.elasticsearch.xcontent.ToXContent; -import org.elasticsearch.xcontent.XContent; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import java.io.IOException; -import java.io.InputStream; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -46,7 +46,7 @@ import static org.elasticsearch.common.unit.ByteSizeValue.parseBytesSizeValue; import static org.elasticsearch.core.TimeValue.parseTimeValue; -public class RestRequest implements ToXContent.Params { +public class RestRequest implements ToXContent.Params, Traceable { public static final String RESPONSE_RESTRICTED = "responseRestricted"; // tchar pattern as defined by RFC7230 section 3.2.6 @@ -499,8 +499,7 @@ public XContentParserConfiguration contentParserConfig() { */ public final XContentParser contentParser() throws IOException { BytesReference content = requiredContent(); // will throw exception if body or content type missing - XContent xContent = xContentType.get().xContent(); - return xContent.createParser(parserConfig, content.streamInput()); + return XContentHelper.createParserNotCompressed(parserConfig, content, xContentType.get()); } @@ -530,7 +529,7 @@ public final boolean hasContentOrSourceParam() { */ public final XContentParser contentOrSourceParamParser() throws IOException { Tuple tuple = contentOrSourceParam(); - return tuple.v1().xContent().createParser(parserConfig, tuple.v2().streamInput()); + return XContentHelper.createParserNotCompressed(parserConfig, tuple.v2(), tuple.v1().xContent().type()); } /** @@ -541,12 +540,7 @@ public final XContentParser contentOrSourceParamParser() throws IOException { public final void withContentOrSourceParamParserOrNull(CheckedConsumer withParser) throws IOException { if (hasContentOrSourceParam()) { Tuple tuple = contentOrSourceParam(); - BytesReference content = tuple.v2(); - XContentType xContentType = tuple.v1(); - try ( - InputStream stream = content.streamInput(); - XContentParser parser = xContentType.xContent().createParser(parserConfig, stream) - ) { + try (XContentParser parser = XContentHelper.createParserNotCompressed(parserConfig, tuple.v2(), tuple.v1())) { withParser.accept(parser); } } else { @@ -631,6 +625,11 @@ public void markResponseRestricted(String restriction) { consumedParams.add(RESPONSE_RESTRICTED); } + @Override + public String getSpanId() { + return "rest-" + getRequestId(); + } + public static class MediaTypeHeaderException extends RuntimeException { private final String message; diff --git a/server/src/main/java/org/elasticsearch/rest/action/RestFieldCapabilitiesAction.java b/server/src/main/java/org/elasticsearch/rest/action/RestFieldCapabilitiesAction.java index eae8af0601557..e62fdf33db456 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/RestFieldCapabilitiesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/RestFieldCapabilitiesAction.java @@ -72,7 +72,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC } return channel -> { RestCancellableNodeClient cancelClient = new RestCancellableNodeClient(client, request.getHttpChannel()); - cancelClient.fieldCaps(fieldRequest, new RestChunkedToXContentListener<>(channel)); + cancelClient.fieldCaps(fieldRequest, new RestRefCountedChunkedToXContentListener<>(channel)); }; } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCloneSnapshotAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCloneSnapshotAction.java index c6790e7de21e6..7785680a3ca8d 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCloneSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCloneSnapshotAction.java @@ -43,15 +43,17 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - final Map source = request.contentParser().map(); - final CloneSnapshotRequest cloneSnapshotRequest = new CloneSnapshotRequest( - request.param("repository"), - request.param("snapshot"), - request.param("target_snapshot"), - XContentMapValues.nodeStringArrayValue(source.getOrDefault("indices", Collections.emptyList())) - ); - cloneSnapshotRequest.masterNodeTimeout(request.paramAsTime("master_timeout", cloneSnapshotRequest.masterNodeTimeout())); - cloneSnapshotRequest.indicesOptions(IndicesOptions.fromMap(source, cloneSnapshotRequest.indicesOptions())); - return channel -> client.admin().cluster().cloneSnapshot(cloneSnapshotRequest, new RestToXContentListener<>(channel)); + try (var parser = request.contentParser()) { + final Map source = parser.map(); + final CloneSnapshotRequest cloneSnapshotRequest = new CloneSnapshotRequest( + request.param("repository"), + request.param("snapshot"), + request.param("target_snapshot"), + XContentMapValues.nodeStringArrayValue(source.getOrDefault("indices", Collections.emptyList())) + ); + cloneSnapshotRequest.masterNodeTimeout(request.paramAsTime("master_timeout", cloneSnapshotRequest.masterNodeTimeout())); + cloneSnapshotRequest.indicesOptions(IndicesOptions.fromMap(source, cloneSnapshotRequest.indicesOptions())); + return channel -> client.admin().cluster().cloneSnapshot(cloneSnapshotRequest, new RestToXContentListener<>(channel)); + } } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterAllocationExplainAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterAllocationExplainAction.java index 896c341953e73..607ae3f554fe8 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterAllocationExplainAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterAllocationExplainAction.java @@ -14,7 +14,7 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; @@ -58,6 +58,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC req.includeYesDecisions(request.paramAsBoolean("include_yes_decisions", false)); req.includeDiskInfo(request.paramAsBoolean("include_disk_info", false)); - return channel -> client.admin().cluster().allocationExplain(req, new RestChunkedToXContentListener<>(channel)); + return channel -> client.admin().cluster().allocationExplain(req, new RestRefCountedChunkedToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterRerouteAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterRerouteAction.java index 9058df2336cc5..468cf30c8de54 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterRerouteAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterRerouteAction.java @@ -19,7 +19,7 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ObjectParser.ValueType; import org.elasticsearch.xcontent.ParseField; @@ -84,7 +84,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC if (metric == null) { request.params().put("metric", DEFAULT_METRICS); } - return channel -> client.admin().cluster().reroute(clusterRerouteRequest, new RestChunkedToXContentListener<>(channel)); + return channel -> client.admin().cluster().reroute(clusterRerouteRequest, new RestRefCountedChunkedToXContentListener<>(channel)); } @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetDesiredBalanceAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetDesiredBalanceAction.java index 312ce353b6d42..a93c1e3d04fd6 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetDesiredBalanceAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetDesiredBalanceAction.java @@ -13,7 +13,7 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.io.IOException; import java.util.List; @@ -35,7 +35,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli return restChannel -> client.execute( TransportGetDesiredBalanceAction.TYPE, new DesiredBalanceRequest(), - new RestChunkedToXContentListener<>(restChannel) + new RestRefCountedChunkedToXContentListener<>(restChannel) ); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java index ae1cfcd7371fb..5cc77d3d50a01 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java @@ -16,7 +16,7 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import org.elasticsearch.search.sort.SortOrder; import java.io.IOException; @@ -82,6 +82,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC getSnapshotsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getSnapshotsRequest.masterNodeTimeout())); return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).admin() .cluster() - .getSnapshots(getSnapshotsRequest, new RestChunkedToXContentListener<>(channel)); + .getSnapshots(getSnapshotsRequest, new RestRefCountedChunkedToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesHotThreadsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesHotThreadsAction.java index 095abcd14d355..2942e59aa1bfd 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesHotThreadsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesHotThreadsAction.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequest; import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsResponse; +import org.elasticsearch.action.admin.cluster.node.hotthreads.TransportNodesHotThreadsAction; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.core.RestApiVersion; @@ -112,10 +113,11 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC nodesHotThreadsRequest.interval(TimeValue.parseTimeValue(request.param("interval"), nodesHotThreadsRequest.interval(), "interval")); nodesHotThreadsRequest.snapshots(request.paramAsInt("snapshots", nodesHotThreadsRequest.snapshots())); nodesHotThreadsRequest.timeout(request.param("timeout")); - return channel -> client.admin().cluster().nodesHotThreads(nodesHotThreadsRequest, new RestResponseListener<>(channel) { + return channel -> client.execute(TransportNodesHotThreadsAction.TYPE, nodesHotThreadsRequest, new RestResponseListener<>(channel) { @Override public RestResponse buildResponse(NodesHotThreadsResponse response) { - return RestResponse.chunked(RestStatus.OK, fromTextChunks(TEXT_CONTENT_TYPE, response.getTextChunks(), null)); + response.mustIncRef(); + return RestResponse.chunked(RestStatus.OK, fromTextChunks(TEXT_CONTENT_TYPE, response.getTextChunks(), response::decRef)); } }); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsAction.java index fef7dc0cbdd37..d311f39f42f7a 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsAction.java @@ -22,7 +22,7 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.io.IOException; import java.util.Collections; @@ -189,7 +189,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).admin() .cluster() - .nodesStats(nodesStatsRequest, new RestChunkedToXContentListener<>(channel)); + .nodesStats(nodesStatsRequest, new RestRefCountedChunkedToXContentListener<>(channel)); } private final Set RESPONSE_PARAMS = Collections.singleton("level"); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPendingClusterTasksAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPendingClusterTasksAction.java index 8442507c36b1c..e5745ec89533c 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPendingClusterTasksAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPendingClusterTasksAction.java @@ -9,12 +9,13 @@ package org.elasticsearch.rest.action.admin.cluster; import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksRequest; +import org.elasticsearch.action.admin.cluster.tasks.TransportPendingClusterTasksAction; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.io.IOException; import java.util.List; @@ -39,8 +40,10 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC PendingClusterTasksRequest pendingClusterTasksRequest = new PendingClusterTasksRequest(); pendingClusterTasksRequest.masterNodeTimeout(request.paramAsTime("master_timeout", pendingClusterTasksRequest.masterNodeTimeout())); pendingClusterTasksRequest.local(request.paramAsBoolean("local", pendingClusterTasksRequest.local())); - return channel -> client.admin() - .cluster() - .pendingClusterTasks(pendingClusterTasksRequest, new RestChunkedToXContentListener<>(channel)); + return channel -> client.execute( + TransportPendingClusterTasksAction.TYPE, + pendingClusterTasksRequest, + new RestRefCountedChunkedToXContentListener<>(channel) + ); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java index 86ac7088642d1..fce50eec6fc01 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java @@ -9,12 +9,14 @@ package org.elasticsearch.rest.action.admin.cluster; import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsRequest; -import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsRequestBuilder; import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsResponse; +import org.elasticsearch.action.admin.cluster.node.reload.TransportNodesReloadSecureSettingsAction; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.core.Nullable; import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestRequestFilter; import org.elasticsearch.rest.RestResponse; @@ -33,14 +35,16 @@ public final class RestReloadSecureSettingsAction extends BaseRestHandler implements RestRequestFilter { - static final ObjectParser PARSER = new ObjectParser<>( - "reload_secure_settings", - NodesReloadSecureSettingsRequest::new - ); + static final class ParsedRequestBody { + @Nullable + SecureString secureSettingsPassword; + } + + static final ObjectParser PARSER = new ObjectParser<>("reload_secure_settings", ParsedRequestBody::new); static { PARSER.declareString( - (request, value) -> request.setSecureStorePassword(new SecureString(value.toCharArray())), + (parsedRequestBody, value) -> parsedRequestBody.secureSettingsPassword = new SecureString(value.toCharArray()), new ParseField("secure_settings_password") ); } @@ -57,31 +61,42 @@ public List routes() { @Override public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { - final String[] nodesIds = Strings.splitStringByCommaToArray(request.param("nodeId")); - final NodesReloadSecureSettingsRequestBuilder nodesRequestBuilder = client.admin() - .cluster() - .prepareReloadSecureSettings() - .setTimeout(request.param("timeout")) - .setNodesIds(nodesIds); + final NodesReloadSecureSettingsRequest reloadSecureSettingsRequest = new NodesReloadSecureSettingsRequest(); + reloadSecureSettingsRequest.nodesIds(Strings.splitStringByCommaToArray(request.param("nodeId"))); + reloadSecureSettingsRequest.timeout(request.param("timeout")); request.withContentOrSourceParamParserOrNull(parser -> { if (parser != null) { - final NodesReloadSecureSettingsRequest nodesRequest = PARSER.parse(parser, null); - nodesRequestBuilder.setSecureStorePassword(nodesRequest.getSecureSettingsPassword()); + final ParsedRequestBody parsedRequestBody = PARSER.parse(parser, null); + reloadSecureSettingsRequest.setSecureStorePassword(parsedRequestBody.secureSettingsPassword); } }); - return channel -> nodesRequestBuilder.execute(new RestBuilderListener(channel) { + return new RestChannelConsumer() { @Override - public RestResponse buildResponse(NodesReloadSecureSettingsResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - RestActions.buildNodesHeader(builder, channel.request(), response); - builder.field("cluster_name", response.getClusterName().value()); - response.toXContent(builder, channel.request()); - builder.endObject(); - nodesRequestBuilder.request().close(); - return new RestResponse(RestStatus.OK, builder); + public void accept(RestChannel channel) { + client.execute( + TransportNodesReloadSecureSettingsAction.TYPE, + reloadSecureSettingsRequest, + new RestBuilderListener<>(channel) { + @Override + public RestResponse buildResponse(NodesReloadSecureSettingsResponse response, XContentBuilder builder) + throws Exception { + builder.startObject(); + RestActions.buildNodesHeader(builder, channel.request(), response); + builder.field("cluster_name", response.getClusterName().value()); + response.toXContent(builder, channel.request()); + builder.endObject(); + return new RestResponse(RestStatus.OK, builder); + } + } + ); } - }); + + @Override + public void close() { + reloadSecureSettingsRequest.decRef(); + } + }; } @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java index b5e25c32824ed..3baebb25c4dc2 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java @@ -16,7 +16,7 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.io.IOException; import java.util.List; @@ -56,6 +56,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC snapshotsStatusRequest.masterNodeTimeout(request.paramAsTime("master_timeout", snapshotsStatusRequest.masterNodeTimeout())); return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).admin() .cluster() - .snapshotsStatus(snapshotsStatusRequest, new RestChunkedToXContentListener<>(channel)); + .snapshotsStatus(snapshotsStatusRequest, new RestRefCountedChunkedToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/dangling/RestDeleteDanglingIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/dangling/RestDeleteDanglingIndexAction.java index 4213f42549cd7..b8a7179f8cfb7 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/dangling/RestDeleteDanglingIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/dangling/RestDeleteDanglingIndexAction.java @@ -9,6 +9,7 @@ package org.elasticsearch.rest.action.admin.cluster.dangling; import org.elasticsearch.action.admin.indices.dangling.delete.DeleteDanglingIndexRequest; +import org.elasticsearch.action.admin.indices.dangling.delete.TransportDeleteDanglingIndexAction; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; @@ -42,6 +43,10 @@ public RestChannelConsumer prepareRequest(final RestRequest request, NodeClient deleteRequest.timeout(request.paramAsTime("timeout", deleteRequest.timeout())); deleteRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteRequest.masterNodeTimeout())); - return channel -> client.admin().cluster().deleteDanglingIndex(deleteRequest, new RestToXContentListener<>(channel, r -> ACCEPTED)); + return channel -> client.execute( + TransportDeleteDanglingIndexAction.TYPE, + deleteRequest, + new RestToXContentListener<>(channel, r -> ACCEPTED) + ); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/dangling/RestImportDanglingIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/dangling/RestImportDanglingIndexAction.java index 7f481c16118bd..9fa46fd9b0a3c 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/dangling/RestImportDanglingIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/dangling/RestImportDanglingIndexAction.java @@ -9,6 +9,7 @@ package org.elasticsearch.rest.action.admin.cluster.dangling; import org.elasticsearch.action.admin.indices.dangling.import_index.ImportDanglingIndexRequest; +import org.elasticsearch.action.admin.indices.dangling.import_index.TransportImportDanglingIndexAction; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; @@ -41,6 +42,10 @@ public RestChannelConsumer prepareRequest(final RestRequest request, NodeClient importRequest.timeout(request.paramAsTime("timeout", importRequest.timeout())); importRequest.masterNodeTimeout(request.paramAsTime("master_timeout", importRequest.masterNodeTimeout())); - return channel -> client.admin().cluster().importDanglingIndex(importRequest, new RestToXContentListener<>(channel, r -> ACCEPTED)); + return channel -> client.execute( + TransportImportDanglingIndexAction.TYPE, + importRequest, + new RestToXContentListener<>(channel, r -> ACCEPTED) + ); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/dangling/RestListDanglingIndicesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/dangling/RestListDanglingIndicesAction.java index 8be7b68624bb4..8f7d9893019a4 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/dangling/RestListDanglingIndicesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/dangling/RestListDanglingIndicesAction.java @@ -9,6 +9,7 @@ package org.elasticsearch.rest.action.admin.cluster.dangling; import org.elasticsearch.action.admin.indices.dangling.list.ListDanglingIndicesRequest; +import org.elasticsearch.action.admin.indices.dangling.list.TransportListDanglingIndicesAction; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; @@ -32,9 +33,10 @@ public String getName() { @Override public BaseRestHandler.RestChannelConsumer prepareRequest(final RestRequest request, NodeClient client) throws IOException { - final ListDanglingIndicesRequest danglingIndicesRequest = new ListDanglingIndicesRequest(); - return channel -> client.admin() - .cluster() - .listDanglingIndices(danglingIndicesRequest, new RestActions.NodesResponseRestListener<>(channel)); + return channel -> client.execute( + TransportListDanglingIndicesAction.TYPE, + new ListDanglingIndicesRequest(), + new RestActions.NodesResponseRestListener<>(channel) + ); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteComponentTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteComponentTemplateAction.java index 888a65c1a119a..733e35ba7f927 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteComponentTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteComponentTemplateAction.java @@ -8,7 +8,7 @@ package org.elasticsearch.rest.action.admin.indices; -import org.elasticsearch.action.admin.indices.template.delete.DeleteComponentTemplateAction; +import org.elasticsearch.action.admin.indices.template.delete.TransportDeleteComponentTemplateAction; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.rest.BaseRestHandler; @@ -38,9 +38,9 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { String[] names = Strings.splitStringByCommaToArray(request.param("name")); - DeleteComponentTemplateAction.Request deleteReq = new DeleteComponentTemplateAction.Request(names); + TransportDeleteComponentTemplateAction.Request deleteReq = new TransportDeleteComponentTemplateAction.Request(names); deleteReq.masterNodeTimeout(request.paramAsTime("master_timeout", deleteReq.masterNodeTimeout())); - return channel -> client.execute(DeleteComponentTemplateAction.INSTANCE, deleteReq, new RestToXContentListener<>(channel)); + return channel -> client.execute(TransportDeleteComponentTemplateAction.TYPE, deleteReq, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteComposableIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteComposableIndexTemplateAction.java index 65c95b6408b53..8c84fb054718e 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteComposableIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteComposableIndexTemplateAction.java @@ -8,7 +8,7 @@ package org.elasticsearch.rest.action.admin.indices; -import org.elasticsearch.action.admin.indices.template.delete.DeleteComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.delete.TransportDeleteComposableIndexTemplateAction; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.rest.BaseRestHandler; @@ -39,9 +39,13 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { String[] names = Strings.splitStringByCommaToArray(request.param("name")); - DeleteComposableIndexTemplateAction.Request deleteReq = new DeleteComposableIndexTemplateAction.Request(names); + TransportDeleteComposableIndexTemplateAction.Request deleteReq = new TransportDeleteComposableIndexTemplateAction.Request(names); deleteReq.masterNodeTimeout(request.paramAsTime("master_timeout", deleteReq.masterNodeTimeout())); - return channel -> client.execute(DeleteComposableIndexTemplateAction.INSTANCE, deleteReq, new RestToXContentListener<>(channel)); + return channel -> client.execute( + TransportDeleteComposableIndexTemplateAction.TYPE, + deleteReq, + new RestToXContentListener<>(channel) + ); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestFieldUsageStatsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestFieldUsageStatsAction.java index ea2a867d35e7c..02785c8ab43eb 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestFieldUsageStatsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestFieldUsageStatsAction.java @@ -19,7 +19,7 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.io.IOException; import java.util.List; @@ -45,7 +45,7 @@ public BaseRestHandler.RestChannelConsumer prepareRequest(final RestRequest requ fusRequest.fields(request.paramAsStringArray("fields", fusRequest.fields())); return channel -> { final RestCancellableNodeClient cancelClient = new RestCancellableNodeClient(client, request.getHttpChannel()); - cancelClient.execute(FieldUsageStatsAction.INSTANCE, fusRequest, new RestChunkedToXContentListener<>(channel)); + cancelClient.execute(FieldUsageStatsAction.INSTANCE, fusRequest, new RestRefCountedChunkedToXContentListener<>(channel)); }; } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndicesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndicesAction.java index 3550a7151ce43..db10bdd985d59 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndicesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndicesAction.java @@ -20,7 +20,7 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.io.IOException; import java.util.List; @@ -72,7 +72,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC final var httpChannel = request.getHttpChannel(); return channel -> new RestCancellableNodeClient(client, httpChannel).admin() .indices() - .getIndex(getIndexRequest, new RestChunkedToXContentListener<>(channel)); + .getIndex(getIndexRequest, new RestRefCountedChunkedToXContentListener<>(channel)); } /** diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java index 0a8ae5ae90c66..065399076c12a 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java @@ -21,7 +21,7 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.io.IOException; import java.util.List; @@ -90,6 +90,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC final HttpChannel httpChannel = request.getHttpChannel(); return channel -> new RestCancellableNodeClient(client, httpChannel).admin() .indices() - .getMappings(getMappingsRequest, new RestChunkedToXContentListener<>(channel)); + .getMappings(getMappingsRequest, new RestRefCountedChunkedToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java index 4512b84de6af5..af72e66f6127d 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java @@ -17,7 +17,7 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.io.IOException; import java.util.List; @@ -56,6 +56,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC .names(names); getSettingsRequest.local(request.paramAsBoolean("local", getSettingsRequest.local())); getSettingsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getSettingsRequest.masterNodeTimeout())); - return channel -> client.admin().indices().getSettings(getSettingsRequest, new RestChunkedToXContentListener<>(channel)); + return channel -> client.admin().indices().getSettings(getSettingsRequest, new RestRefCountedChunkedToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesSegmentsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesSegmentsAction.java index e293cf86d455a..21982c113ac3b 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesSegmentsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesSegmentsAction.java @@ -19,7 +19,7 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.io.IOException; import java.util.List; @@ -58,6 +58,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC indicesSegmentsRequest.indicesOptions(IndicesOptions.fromRequest(request, indicesSegmentsRequest.indicesOptions())); return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).admin() .indices() - .segments(indicesSegmentsRequest, new RestChunkedToXContentListener<>(channel)); + .segments(indicesSegmentsRequest, new RestRefCountedChunkedToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesShardStoresAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesShardStoresAction.java index 11831fdae80fb..854ac937113d8 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesShardStoresAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesShardStoresAction.java @@ -8,15 +8,15 @@ package org.elasticsearch.rest.action.admin.indices; -import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresAction; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest; +import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.io.IOException; import java.util.List; @@ -24,7 +24,7 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; /** - * Rest action for {@link IndicesShardStoresAction} + * Rest action for {@link TransportIndicesShardStoresAction} */ public class RestIndicesShardStoresAction extends BaseRestHandler { @@ -55,8 +55,10 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC request.paramAsInt("max_concurrent_shard_requests", indicesShardStoresRequest.maxConcurrentShardRequests()) ); indicesShardStoresRequest.indicesOptions(IndicesOptions.fromRequest(request, indicesShardStoresRequest.indicesOptions())); - return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).admin() - .indices() - .shardStores(indicesShardStoresRequest, new RestChunkedToXContentListener<>(channel)); + return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).execute( + TransportIndicesShardStoresAction.TYPE, + indicesShardStoresRequest, + new RestRefCountedChunkedToXContentListener<>(channel) + ); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsAction.java index 90aa366d4ecdf..f0aa614d73677 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsAction.java @@ -22,7 +22,7 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import org.elasticsearch.rest.action.document.RestMultiTermVectorsAction; import java.io.IOException; @@ -146,7 +146,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).admin() .indices() - .stats(indicesStatsRequest, new RestChunkedToXContentListener<>(channel)); + .stats(indicesStatsRequest, new RestRefCountedChunkedToXContentListener<>(channel)); } @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutComponentTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutComponentTemplateAction.java index 337283ebf1958..fd6f529d876a2 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutComponentTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutComponentTemplateAction.java @@ -43,7 +43,9 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC putRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putRequest.masterNodeTimeout())); putRequest.create(request.paramAsBoolean("create", false)); putRequest.cause(request.param("cause", "api")); - putRequest.componentTemplate(ComponentTemplate.parse(request.contentParser())); + try (var parser = request.contentParser()) { + putRequest.componentTemplate(ComponentTemplate.parse(parser)); + } return channel -> client.execute(PutComponentTemplateAction.INSTANCE, putRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutComposableIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutComposableIndexTemplateAction.java index afc291bc6dc26..3171c18bc9e28 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutComposableIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutComposableIndexTemplateAction.java @@ -8,7 +8,7 @@ package org.elasticsearch.rest.action.admin.indices; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.rest.BaseRestHandler; @@ -39,12 +39,16 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - PutComposableIndexTemplateAction.Request putRequest = new PutComposableIndexTemplateAction.Request(request.param("name")); + TransportPutComposableIndexTemplateAction.Request putRequest = new TransportPutComposableIndexTemplateAction.Request( + request.param("name") + ); putRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putRequest.masterNodeTimeout())); putRequest.create(request.paramAsBoolean("create", false)); putRequest.cause(request.param("cause", "api")); - putRequest.indexTemplate(ComposableIndexTemplate.parse(request.contentParser())); + try (var parser = request.contentParser()) { + putRequest.indexTemplate(ComposableIndexTemplate.parse(parser)); + } - return channel -> client.execute(PutComposableIndexTemplateAction.INSTANCE, putRequest, new RestToXContentListener<>(channel)); + return channel -> client.execute(TransportPutComposableIndexTemplateAction.TYPE, putRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRecoveryAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRecoveryAction.java index 036d91c000ffe..8db8366f0b9f8 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRecoveryAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRecoveryAction.java @@ -17,7 +17,7 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.io.IOException; import java.util.List; @@ -54,6 +54,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC recoveryRequest.indicesOptions(IndicesOptions.fromRequest(request, recoveryRequest.indicesOptions())); return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).admin() .indices() - .recoveries(recoveryRequest, new RestChunkedToXContentListener<>(channel)); + .recoveries(recoveryRequest, new RestRefCountedChunkedToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRolloverIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRolloverIndexAction.java index b2c4e5e7d0fbd..b22c79230ef3c 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRolloverIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRolloverIndexAction.java @@ -48,6 +48,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC RolloverRequest rolloverIndexRequest = new RolloverRequest(request.param("index"), request.param("new_index")); request.applyContentParser(parser -> rolloverIndexRequest.fromXContent(includeTypeName, parser)); rolloverIndexRequest.dryRun(request.paramAsBoolean("dry_run", false)); + rolloverIndexRequest.lazy(request.paramAsBoolean("lazy", false)); rolloverIndexRequest.timeout(request.paramAsTime("timeout", rolloverIndexRequest.timeout())); rolloverIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", rolloverIndexRequest.masterNodeTimeout())); rolloverIndexRequest.getCreateIndexRequest() diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSimulateIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSimulateIndexTemplateAction.java index 420d7a8d70f58..7eab7168cd100 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSimulateIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSimulateIndexTemplateAction.java @@ -10,7 +10,7 @@ import org.elasticsearch.action.admin.indices.template.post.SimulateIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.post.SimulateIndexTemplateRequest; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.rest.BaseRestHandler; @@ -45,10 +45,12 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC ); simulateIndexTemplateRequest.includeDefaults(request.paramAsBoolean("include_defaults", false)); if (request.hasContent()) { - PutComposableIndexTemplateAction.Request indexTemplateRequest = new PutComposableIndexTemplateAction.Request( + TransportPutComposableIndexTemplateAction.Request indexTemplateRequest = new TransportPutComposableIndexTemplateAction.Request( "simulating_template" ); - indexTemplateRequest.indexTemplate(ComposableIndexTemplate.parse(request.contentParser())); + try (var parser = request.contentParser()) { + indexTemplateRequest.indexTemplate(ComposableIndexTemplate.parse(parser)); + } indexTemplateRequest.create(request.paramAsBoolean("create", false)); indexTemplateRequest.cause(request.param("cause", "api")); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSimulateTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSimulateTemplateAction.java index d458c309933a8..bc38d549926af 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSimulateTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSimulateTemplateAction.java @@ -9,7 +9,7 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.action.admin.indices.template.post.SimulateTemplateAction; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.rest.BaseRestHandler; @@ -41,10 +41,12 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli simulateRequest.templateName(request.param("name")); simulateRequest.includeDefaults(request.paramAsBoolean("include_defaults", false)); if (request.hasContent()) { - PutComposableIndexTemplateAction.Request indexTemplateRequest = new PutComposableIndexTemplateAction.Request( + TransportPutComposableIndexTemplateAction.Request indexTemplateRequest = new TransportPutComposableIndexTemplateAction.Request( "simulating_template" ); - indexTemplateRequest.indexTemplate(ComposableIndexTemplate.parse(request.contentParser())); + try (var parser = request.contentParser()) { + indexTemplateRequest.indexTemplate(ComposableIndexTemplate.parse(parser)); + } indexTemplateRequest.create(request.paramAsBoolean("create", false)); indexTemplateRequest.cause(request.param("cause", "api")); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java index 74eddca033398..779cb229ca48b 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java @@ -47,7 +47,9 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC updateSettingsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", updateSettingsRequest.masterNodeTimeout())); updateSettingsRequest.indicesOptions(IndicesOptions.fromRequest(request, updateSettingsRequest.indicesOptions())); updateSettingsRequest.reopen(request.paramAsBoolean("reopen", false)); - updateSettingsRequest.fromXContent(request.contentParser()); + try (var parser = request.contentParser()) { + updateSettingsRequest.fromXContent(parser); + } return channel -> client.admin().indices().updateSettings(updateSettingsRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java index 5e9b2c8452579..068c809554631 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java @@ -130,7 +130,7 @@ private Table buildTable(RestRequest request, final ClusterStateResponse state, table.startRow(); table.addCell(shardCount); - table.addCell(nodeStats.getIndices().getStore().getSize()); + table.addCell(nodeStats.getIndices().getStore().size()); table.addCell(used < 0 ? null : ByteSizeValue.ofBytes(used)); table.addCell(avail.getBytes() < 0 ? null : avail); table.addCell(total.getBytes() < 0 ? null : total); diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestCatComponentTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestCatComponentTemplateAction.java index b761c7e3ca054..4a238451bcc69 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestCatComponentTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestCatComponentTemplateAction.java @@ -127,24 +127,27 @@ private static int countMappingInTemplate(Template template) throws Exception { } int count = 0; XContentType xContentType = XContentType.JSON; - XContentParser parser = xContentType.xContent() - .createParser(XContentParserConfiguration.EMPTY, template.mappings().uncompressed().array()); - XContentParser.Token token = parser.nextToken(); - String currentFieldName = null; - while (token != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token == XContentParser.Token.START_OBJECT) { - if ("_doc".equals(currentFieldName)) { - List list = parser.mapOrdered().values().stream().toList(); - for (Object mapping : list) { - count = count + countSubAttributes(mapping); + try ( + XContentParser parser = xContentType.xContent() + .createParser(XContentParserConfiguration.EMPTY, template.mappings().uncompressed().array()) + ) { + XContentParser.Token token = parser.nextToken(); + String currentFieldName = null; + while (token != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.START_OBJECT) { + if ("_doc".equals(currentFieldName)) { + List list = parser.mapOrdered().values().stream().toList(); + for (Object mapping : list) { + count = count + countSubAttributes(mapping); + } } + } else { + parser.skipChildren(); } - } else { - parser.skipChildren(); + token = parser.nextToken(); } - token = parser.nextToken(); } return count; } diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestPendingClusterTasksAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestPendingClusterTasksAction.java index 7408bf3ab229e..19ebbd2f19df4 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestPendingClusterTasksAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestPendingClusterTasksAction.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksRequest; import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse; +import org.elasticsearch.action.admin.cluster.tasks.TransportPendingClusterTasksAction; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.service.PendingClusterTask; import org.elasticsearch.common.Table; @@ -46,15 +47,17 @@ public RestChannelConsumer doCatRequest(final RestRequest request, final NodeCli PendingClusterTasksRequest pendingClusterTasksRequest = new PendingClusterTasksRequest(); pendingClusterTasksRequest.masterNodeTimeout(request.paramAsTime("master_timeout", pendingClusterTasksRequest.masterNodeTimeout())); pendingClusterTasksRequest.local(request.paramAsBoolean("local", pendingClusterTasksRequest.local())); - return channel -> client.admin() - .cluster() - .pendingClusterTasks(pendingClusterTasksRequest, new RestResponseListener(channel) { + return channel -> client.execute( + TransportPendingClusterTasksAction.TYPE, + pendingClusterTasksRequest, + new RestResponseListener<>(channel) { @Override public RestResponse buildResponse(PendingClusterTasksResponse pendingClusterTasks) throws Exception { Table tab = buildTable(request, pendingClusterTasks); return RestTable.buildResponse(tab, channel); } - }); + } + ); } @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java index 94fffd6582155..a57d45e07fd15 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java @@ -293,8 +293,8 @@ Table buildTable(RestRequest request, ClusterStateResponse state, IndicesStatsRe } table.addCell(shard.state()); table.addCell(getOrNull(commonStats, CommonStats::getDocs, DocsStats::getCount)); - table.addCell(getOrNull(commonStats, CommonStats::getStore, StoreStats::getSize)); - table.addCell(getOrNull(commonStats, CommonStats::getStore, StoreStats::getTotalDataSetSize)); + table.addCell(getOrNull(commonStats, CommonStats::getStore, StoreStats::size)); + table.addCell(getOrNull(commonStats, CommonStats::getStore, StoreStats::totalDataSetSize)); if (shard.assignedToNode()) { String ip = state.getState().nodes().get(shard.currentNodeId()).getHostAddress(); String nodeId = shard.currentNodeId(); diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestKnnSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestKnnSearchAction.java index 681474403eb14..3dd8269126552 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestKnnSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestKnnSearchAction.java @@ -14,7 +14,7 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import org.elasticsearch.search.vectors.KnnSearchRequestParser; import java.io.IOException; @@ -55,6 +55,6 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient SearchRequestBuilder searchRequestBuilder = cancellableNodeClient.prepareSearch(); parser.toSearchRequest(searchRequestBuilder); - return channel -> searchRequestBuilder.execute(new RestChunkedToXContentListener<>(channel)); + return channel -> searchRequestBuilder.execute(new RestRefCountedChunkedToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java index c232e1a30c553..a881b2497b26c 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java @@ -142,7 +142,7 @@ public static MultiSearchRequest parseRequest( searchRequest.source(new SearchSourceBuilder().parseXContent(parser, false, searchUsageHolder)); RestSearchAction.validateSearchRequest(restRequest, searchRequest); if (searchRequest.pointInTimeBuilder() != null) { - RestSearchAction.preparePointInTime(searchRequest, restRequest, namedWriteableRegistry); + RestSearchAction.preparePointInTime(searchRequest, restRequest); } else { searchRequest.setCcsMinimizeRoundtrips( restRequest.paramAsBoolean("ccs_minimize_roundtrips", searchRequest.isCcsMinimizeRoundtrips()) diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java index 41102a3568e30..711aec182525e 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java @@ -11,7 +11,6 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.search.SearchContextId; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.IndicesOptions; @@ -50,7 +49,6 @@ import java.util.function.IntConsumer; import static org.elasticsearch.action.ValidateActions.addValidationError; -import static org.elasticsearch.action.search.SearchRequest.DEFAULT_INDICES_OPTIONS; import static org.elasticsearch.core.TimeValue.parseTimeValue; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; @@ -220,7 +218,7 @@ public static void parseSearchRequest( validateSearchRequest(request, searchRequest); if (searchRequest.pointInTimeBuilder() != null) { - preparePointInTime(searchRequest, request, namedWriteableRegistry); + preparePointInTime(searchRequest, request); } else { searchRequest.setCcsMinimizeRoundtrips( request.paramAsBoolean("ccs_minimize_roundtrips", searchRequest.isCcsMinimizeRoundtrips()) @@ -373,44 +371,14 @@ static SuggestBuilder parseSuggestUrlParameters(RestRequest request) { return null; } - static void preparePointInTime(SearchRequest request, RestRequest restRequest, NamedWriteableRegistry namedWriteableRegistry) { + static void preparePointInTime(SearchRequest request, RestRequest restRequest) { assert request.pointInTimeBuilder() != null; ActionRequestValidationException validationException = null; - if (request.indices().length > 0) { - validationException = addValidationError( - "[indices] cannot be used with point in time. Do not specify any index with point in time.", - validationException - ); - } - if (request.indicesOptions().equals(DEFAULT_INDICES_OPTIONS) == false) { - validationException = addValidationError("[indicesOptions] cannot be used with point in time", validationException); - } - if (request.routing() != null) { - validationException = addValidationError("[routing] cannot be used with point in time", validationException); - } - if (request.preference() != null) { - validationException = addValidationError("[preference] cannot be used with point in time", validationException); - } if (restRequest.paramAsBoolean("ccs_minimize_roundtrips", false)) { validationException = addValidationError("[ccs_minimize_roundtrips] cannot be used with point in time", validationException); request.setCcsMinimizeRoundtrips(false); } ExceptionsHelper.reThrowIfNotNull(validationException); - - final IndicesOptions indicesOptions = request.indicesOptions(); - final IndicesOptions stricterIndicesOptions = IndicesOptions.fromOptions( - indicesOptions.ignoreUnavailable(), - indicesOptions.allowNoIndices(), - false, - false, - false, - true, - true, - indicesOptions.ignoreThrottled() - ); - request.indicesOptions(stricterIndicesOptions); - final SearchContextId searchContextId = request.pointInTimeBuilder().getSearchContextId(namedWriteableRegistry); - request.indices(searchContextId.getActualIndices()); } /** diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java index de2101f94de2a..acf2818dd8902 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java @@ -14,7 +14,7 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import org.elasticsearch.search.Scroll; import org.elasticsearch.xcontent.XContentParseException; @@ -66,7 +66,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC } } }); - return channel -> client.searchScroll(searchScrollRequest, new RestChunkedToXContentListener<>(channel)); + return channel -> client.searchScroll(searchScrollRequest, new RestRefCountedChunkedToXContentListener<>(channel)); } @Override diff --git a/server/src/main/java/org/elasticsearch/script/Script.java b/server/src/main/java/org/elasticsearch/script/Script.java index d21cdc50e00b5..ced4b0158f62d 100644 --- a/server/src/main/java/org/elasticsearch/script/Script.java +++ b/server/src/main/java/org/elasticsearch/script/Script.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.xcontent.AbstractObjectParser; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ObjectParser.ValueType; @@ -27,7 +28,6 @@ import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParser.Token; -import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonXContent; @@ -301,10 +301,10 @@ public static Script parse(Settings settings) { settings.toXContent(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); try ( - InputStream stream = BytesReference.bytes(builder).streamInput(); - XContentParser parser = JsonXContent.jsonXContent.createParser( - XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), - stream + XContentParser parser = XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG, + BytesReference.bytes(builder), + XContentType.JSON ) ) { return parse(parser); @@ -556,9 +556,9 @@ public Script(StreamInput in) throws IOException { this.lang = in.readOptionalString(); this.idOrCode = in.readString(); @SuppressWarnings("unchecked") - Map options = (Map) (Map) in.readMap(); + Map options = (Map) (Map) in.readGenericMap(); this.options = options; - this.params = in.readMap(); + this.params = in.readGenericMap(); } @Override diff --git a/server/src/main/java/org/elasticsearch/script/StoredScriptSource.java b/server/src/main/java/org/elasticsearch/script/StoredScriptSource.java index 9e1ab0efb543b..e5747e4ccff40 100644 --- a/server/src/main/java/org/elasticsearch/script/StoredScriptSource.java +++ b/server/src/main/java/org/elasticsearch/script/StoredScriptSource.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ObjectParser.ValueType; import org.elasticsearch.xcontent.ParseField; @@ -26,11 +27,9 @@ import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParser.Token; -import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import java.io.IOException; -import java.io.InputStream; import java.io.UncheckedIOException; import java.util.Collections; import java.util.HashMap; @@ -186,9 +185,11 @@ private StoredScriptSource build(boolean ignoreEmpty) { */ public static StoredScriptSource parse(BytesReference content, XContentType xContentType) { try ( - InputStream stream = content.streamInput(); - XContentParser parser = xContentType.xContent() - .createParser(XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), stream) + XContentParser parser = XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG, + content, + xContentType + ) ) { Token token = parser.nextToken(); @@ -288,7 +289,7 @@ public StoredScriptSource(StreamInput in) throws IOException { this.lang = in.readString(); this.source = in.readString(); @SuppressWarnings("unchecked") - Map options = (Map) (Map) in.readMap(); + Map options = (Map) (Map) in.readGenericMap(); this.options = options; } diff --git a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java index a8721503c7454..773934615e051 100644 --- a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java @@ -32,7 +32,6 @@ import org.elasticsearch.index.fielddata.FieldDataContext; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData; -import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.IdLoader; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NestedLookup; @@ -167,20 +166,13 @@ final class DefaultSearchContext extends SearchContext { this.indexShard = readerContext.indexShard(); Engine.Searcher engineSearcher = readerContext.acquireSearcher("search"); - int maximumNumberOfSlices; - if (hasSyntheticSource(indexService)) { - // accessing synthetic source is not thread safe - maximumNumberOfSlices = 1; - } else { - maximumNumberOfSlices = determineMaximumNumberOfSlices( - executor, - request, - resultsType, - enableQueryPhaseParallelCollection, - field -> getFieldCardinality(field, readerContext.indexService(), engineSearcher.getDirectoryReader()) - ); - - } + int maximumNumberOfSlices = determineMaximumNumberOfSlices( + executor, + request, + resultsType, + enableQueryPhaseParallelCollection, + field -> getFieldCardinality(field, readerContext.indexService(), engineSearcher.getDirectoryReader()) + ); if (executor == null) { this.searcher = new ContextIndexSearcher( engineSearcher.getIndexReader(), @@ -222,14 +214,6 @@ final class DefaultSearchContext extends SearchContext { } } - private static boolean hasSyntheticSource(IndexService indexService) { - DocumentMapper documentMapper = indexService.mapperService().documentMapper(); - if (documentMapper != null) { - return documentMapper.sourceMapper().isSynthetic(); - } - return false; - } - static long getFieldCardinality(String field, IndexService indexService, DirectoryReader directoryReader) { MappedFieldType mappedFieldType = indexService.mapperService().fieldType(field); if (mappedFieldType == null) { diff --git a/server/src/main/java/org/elasticsearch/search/SearchHit.java b/server/src/main/java/org/elasticsearch/search/SearchHit.java index 7e1699307c5ee..d2fc20ab83269 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchHit.java +++ b/server/src/main/java/org/elasticsearch/search/SearchHit.java @@ -57,7 +57,6 @@ import java.util.Objects; import static java.util.Collections.emptyMap; -import static java.util.Collections.singletonMap; import static java.util.Collections.unmodifiableMap; import static org.elasticsearch.common.lucene.Lucene.readExplanation; import static org.elasticsearch.common.lucene.Lucene.writeExplanation; @@ -76,29 +75,29 @@ public final class SearchHit implements Writeable, ToXContentObject { private final transient int docId; private static final float DEFAULT_SCORE = Float.NaN; - private float score = DEFAULT_SCORE; + private float score; private static final int NO_RANK = -1; - private int rank = NO_RANK; + private int rank; private final Text id; private final NestedIdentity nestedIdentity; - private long version = -1; - private long seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; - private long primaryTerm = SequenceNumbers.UNASSIGNED_PRIMARY_TERM; + private long version; + private long seqNo; + private long primaryTerm; private BytesReference source; - private final Map documentFields = new HashMap<>(); - private final Map metaFields = new HashMap<>(); + private final Map documentFields; + private final Map metaFields; - private Map highlightFields = null; + private Map highlightFields; - private SearchSortValues sortValues = SearchSortValues.EMPTY; + private SearchSortValues sortValues; - private Map matchedQueries = Collections.emptyMap(); + private Map matchedQueries; private Explanation explanation; @@ -125,79 +124,152 @@ public SearchHit(int docId, String id) { } public SearchHit(int nestedTopDocId, String id, NestedIdentity nestedIdentity) { - this.docId = nestedTopDocId; - if (id != null) { - this.id = new Text(id); - } else { - this.id = null; - } + this( + nestedTopDocId, + DEFAULT_SCORE, + NO_RANK, + id == null ? null : new Text(id), + nestedIdentity, + -1, + SequenceNumbers.UNASSIGNED_SEQ_NO, + SequenceNumbers.UNASSIGNED_PRIMARY_TERM, + null, + null, + SearchSortValues.EMPTY, + Collections.emptyMap(), + null, + null, + null, + null, + null, + new HashMap<>(), + new HashMap<>() + ); + } + + public SearchHit( + int docId, + float score, + int rank, + Text id, + NestedIdentity nestedIdentity, + long version, + long seqNo, + long primaryTerm, + BytesReference source, + Map highlightFields, + SearchSortValues sortValues, + Map matchedQueries, + Explanation explanation, + SearchShardTarget shard, + String index, + String clusterAlias, + Map innerHits, + Map documentFields, + Map metaFields + ) { + this.docId = docId; + this.score = score; + this.rank = rank; + this.id = id; this.nestedIdentity = nestedIdentity; + this.version = version; + this.seqNo = seqNo; + this.primaryTerm = primaryTerm; + this.source = source; + this.highlightFields = highlightFields; + this.sortValues = sortValues; + this.matchedQueries = matchedQueries; + this.explanation = explanation; + this.shard = shard; + this.index = index; + this.clusterAlias = clusterAlias; + this.innerHits = innerHits; + this.documentFields = documentFields; + this.metaFields = metaFields; } - public SearchHit(StreamInput in) throws IOException { - docId = -1; - score = in.readFloat(); + public static SearchHit readFrom(StreamInput in) throws IOException { + final float score = in.readFloat(); + final int rank; if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { rank = in.readVInt(); + } else { + rank = NO_RANK; } - id = in.readOptionalText(); + final Text id = in.readOptionalText(); if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { in.readOptionalText(); } - nestedIdentity = in.readOptionalWriteable(NestedIdentity::new); - version = in.readLong(); - seqNo = in.readZLong(); - primaryTerm = in.readVLong(); - source = in.readBytesReference(); + final NestedIdentity nestedIdentity = in.readOptionalWriteable(NestedIdentity::new); + final long version = in.readLong(); + final long seqNo = in.readZLong(); + final long primaryTerm = in.readVLong(); + BytesReference source = in.readBytesReference(); if (source.length() == 0) { source = null; } + Explanation explanation = null; if (in.readBoolean()) { explanation = readExplanation(in); } - documentFields.putAll(in.readMap(DocumentField::new)); - metaFields.putAll(in.readMap(DocumentField::new)); - - int size = in.readVInt(); - if (size == 0) { - highlightFields = emptyMap(); - } else if (size == 1) { - HighlightField field = new HighlightField(in); - highlightFields = singletonMap(field.name(), field); - } else { - Map highlightFields = new HashMap<>(); - for (int i = 0; i < size; i++) { - HighlightField field = new HighlightField(in); - highlightFields.put(field.name(), field); - } - this.highlightFields = unmodifiableMap(highlightFields); - } - - sortValues = new SearchSortValues(in); + final Map documentFields = in.readMap(DocumentField::new); + final Map metaFields = in.readMap(DocumentField::new); + final Map highlightFields = in.readMapValues(HighlightField::new, HighlightField::name); + final SearchSortValues sortValues = new SearchSortValues(in); + final Map matchedQueries; if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { matchedQueries = in.readOrderedMap(StreamInput::readString, StreamInput::readFloat); } else { - size = in.readVInt(); - matchedQueries = new LinkedHashMap<>(size); + int size = in.readVInt(); + matchedQueries = Maps.newLinkedHashMapWithExpectedSize(size); for (int i = 0; i < size; i++) { matchedQueries.put(in.readString(), Float.NaN); } } - // we call the setter here because that also sets the local index parameter - shard(in.readOptionalWriteable(SearchShardTarget::new)); - size = in.readVInt(); + final SearchShardTarget shardTarget = in.readOptionalWriteable(SearchShardTarget::new); + final String index; + final String clusterAlias; + if (shardTarget == null) { + index = null; + clusterAlias = null; + } else { + index = shardTarget.getIndex(); + clusterAlias = shardTarget.getClusterAlias(); + } + final Map innerHits; + int size = in.readVInt(); if (size > 0) { innerHits = Maps.newMapWithExpectedSize(size); for (int i = 0; i < size; i++) { - String key = in.readString(); - SearchHits value = new SearchHits(in); - innerHits.put(key, value); + innerHits.put(in.readString(), new SearchHits(in)); } } else { innerHits = null; } + return new SearchHit( + -1, + score, + rank, + id, + nestedIdentity, + version, + seqNo, + primaryTerm, + source, + unmodifiableMap(highlightFields), + sortValues, + matchedQueries, + explanation, + shardTarget, + index, + clusterAlias, + innerHits, + documentFields, + metaFields + ); } private static final Text SINGLE_MAPPING_TYPE = new Text(MapperService.SINGLE_MAPPING_NAME); @@ -863,12 +935,6 @@ public static void declareInnerHitsParseFields(ObjectParser, public static SearchHit createFromMap(Map values) { String id = get(Fields._ID, values, null); - NestedIdentity nestedIdentity = get(NestedIdentity._NESTED, values, null); - Map metaFields = get(METADATA_FIELDS, values, Collections.emptyMap()); - Map documentFields = get(DOCUMENT_FIELDS, values, Collections.emptyMap()); - - SearchHit searchHit = new SearchHit(-1, id, nestedIdentity); - searchHit.addDocumentFields(documentFields, metaFields); String index = get(Fields._INDEX, values, null); String clusterAlias = null; if (index != null) { @@ -880,27 +946,36 @@ public static SearchHit createFromMap(Map values) { } ShardId shardId = get(Fields._SHARD, values, null); String nodeId = get(Fields._NODE, values, null); + final SearchShardTarget shardTarget; if (shardId != null && nodeId != null) { assert shardId.getIndexName().equals(index); - searchHit.shard(new SearchShardTarget(nodeId, shardId, clusterAlias)); + shardTarget = new SearchShardTarget(nodeId, shardId, clusterAlias); + index = shardTarget.getIndex(); + clusterAlias = shardTarget.getClusterAlias(); } else { - // these fields get set anyways when setting the shard target, - // but we set them explicitly when we don't have enough info to rebuild the shard target - searchHit.index = index; - searchHit.clusterAlias = clusterAlias; - } - searchHit.score(get(Fields._SCORE, values, DEFAULT_SCORE)); - searchHit.setRank(get(Fields._RANK, values, NO_RANK)); - searchHit.version(get(Fields._VERSION, values, -1L)); - searchHit.setSeqNo(get(Fields._SEQ_NO, values, SequenceNumbers.UNASSIGNED_SEQ_NO)); - searchHit.setPrimaryTerm(get(Fields._PRIMARY_TERM, values, SequenceNumbers.UNASSIGNED_PRIMARY_TERM)); - searchHit.sortValues(get(Fields.SORT, values, SearchSortValues.EMPTY)); - searchHit.highlightFields(get(Fields.HIGHLIGHT, values, null)); - searchHit.sourceRef(get(SourceFieldMapper.NAME, values, null)); - searchHit.explanation(get(Fields._EXPLANATION, values, null)); - searchHit.setInnerHits(get(Fields.INNER_HITS, values, null)); - searchHit.matchedQueries(get(Fields.MATCHED_QUERIES, values, null)); - return searchHit; + shardTarget = null; + } + return new SearchHit( + -1, + get(Fields._SCORE, values, DEFAULT_SCORE), + get(Fields._RANK, values, NO_RANK), + id == null ? null : new Text(id), + get(NestedIdentity._NESTED, values, null), + get(Fields._VERSION, values, -1L), + get(Fields._SEQ_NO, values, SequenceNumbers.UNASSIGNED_SEQ_NO), + get(Fields._PRIMARY_TERM, values, SequenceNumbers.UNASSIGNED_PRIMARY_TERM), + get(SourceFieldMapper.NAME, values, null), + get(Fields.HIGHLIGHT, values, null), + get(Fields.SORT, values, SearchSortValues.EMPTY), + get(Fields.MATCHED_QUERIES, values, null), + get(Fields._EXPLANATION, values, null), + shardTarget, + index, + clusterAlias, + get(Fields.INNER_HITS, values, null), + get(DOCUMENT_FIELDS, values, Collections.emptyMap()), + get(METADATA_FIELDS, values, Collections.emptyMap()) + ); } @SuppressWarnings("unchecked") diff --git a/server/src/main/java/org/elasticsearch/search/SearchHits.java b/server/src/main/java/org/elasticsearch/search/SearchHits.java index 13fc9214f07bf..c689f928954d2 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchHits.java +++ b/server/src/main/java/org/elasticsearch/search/SearchHits.java @@ -35,8 +35,8 @@ public final class SearchHits implements Writeable, ChunkedToXContent, Iterable { public static final SearchHit[] EMPTY = new SearchHit[0]; - public static final SearchHits EMPTY_WITH_TOTAL_HITS = new SearchHits(EMPTY, new TotalHits(0, Relation.EQUAL_TO), 0); - public static final SearchHits EMPTY_WITHOUT_TOTAL_HITS = new SearchHits(EMPTY, null, 0); + public static final SearchHits EMPTY_WITH_TOTAL_HITS = SearchHits.empty(new TotalHits(0, Relation.EQUAL_TO), 0); + public static final SearchHits EMPTY_WITHOUT_TOTAL_HITS = SearchHits.empty(null, 0); private final SearchHit[] hits; private final TotalHits totalHits; @@ -48,6 +48,10 @@ public final class SearchHits implements Writeable, ChunkedToXContent, Iterable< @Nullable private final Object[] collapseValues; + public static SearchHits empty(@Nullable TotalHits totalHits, float maxScore) { + return new SearchHits(EMPTY, totalHits, maxScore); + } + public SearchHits(SearchHit[] hits, @Nullable TotalHits totalHits, float maxScore) { this(hits, totalHits, maxScore, null, null, null); } @@ -82,7 +86,7 @@ public SearchHits(StreamInput in) throws IOException { } else { hits = new SearchHit[size]; for (int i = 0; i < hits.length; i++) { - hits[i] = new SearchHit(in); + hits[i] = SearchHit.readFrom(in); } } sortFields = in.readOptionalArray(Lucene::readSortField, SortField[]::new); @@ -235,7 +239,7 @@ public static SearchHits fromXContent(XContentParser parser) throws IOException } } } - return new SearchHits(hits.toArray(new SearchHit[0]), totalHits, maxScore); + return new SearchHits(hits.toArray(SearchHits.EMPTY), totalHits, maxScore); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index 9e59bfda96d19..8a03c7e9f08ba 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -115,7 +115,6 @@ import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.search.suggest.completion.CompletionSuggestion; import org.elasticsearch.tasks.TaskCancelledException; -import org.elasticsearch.telemetry.tracing.SpanId; import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.Scheduler.Cancellable; @@ -493,7 +492,7 @@ public void executeDfsPhase(ShardSearchRequest request, SearchShardTask task, Ac private DfsSearchResult executeDfsPhase(ShardSearchRequest request, SearchShardTask task) throws IOException { ReaderContext readerContext = createOrGetReaderContext(request); try (@SuppressWarnings("unused") // withScope call is necessary to instrument search execution - Releasable scope = tracer.withScope(SpanId.forTask(task)); + Releasable scope = tracer.withScope(task); Releasable ignored = readerContext.markAsUsed(getKeepAlive(request)); SearchContext context = createContext(readerContext, request, task, ResultsType.DFS, false) ) { @@ -665,9 +664,8 @@ private static void runAsync( */ private SearchPhaseResult executeQueryPhase(ShardSearchRequest request, SearchShardTask task) throws Exception { final ReaderContext readerContext = createOrGetReaderContext(request); - SpanId spanId = SpanId.forTask(task); try ( - Releasable scope = tracer.withScope(spanId); + Releasable scope = tracer.withScope(task); Releasable ignored = readerContext.markAsUsed(getKeepAlive(request)); SearchContext context = createContext(readerContext, request, task, ResultsType.QUERY, true) ) { @@ -680,7 +678,7 @@ private SearchPhaseResult executeQueryPhase(ShardSearchRequest request, SearchSh } afterQueryTime = executor.success(); } finally { - tracer.stopTrace(spanId); + tracer.stopTrace(task); } if (request.numberOfShards() == 1 && (request.source() == null || request.source().rankBuilder() == null)) { // we already have query results, but we can run fetch at the same time @@ -711,7 +709,7 @@ private SearchPhaseResult executeQueryPhase(ShardSearchRequest request, SearchSh private QueryFetchSearchResult executeFetchPhase(ReaderContext reader, SearchContext context, long afterQueryTime) { try ( - Releasable scope = tracer.withScope(SpanId.forTask(context.getTask())); + Releasable scope = tracer.withScope(context.getTask()); SearchOperationListenerExecutor executor = new SearchOperationListenerExecutor(context, true, afterQueryTime) ) { fetchPhase.execute(context, shortcutDocIdsToLoad(context)); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AbstractAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/AbstractAggregationBuilder.java index f22b71c86dd11..494436e5c2052 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AbstractAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AbstractAggregationBuilder.java @@ -48,7 +48,7 @@ protected AbstractAggregationBuilder( protected AbstractAggregationBuilder(StreamInput in) throws IOException { super(in.readString()); factoriesBuilder = new AggregatorFactories.Builder(in); - metadata = in.readMap(); + metadata = in.readGenericMap(); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java b/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java index 52f3dba1ccdd5..3def930f9e296 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java @@ -50,7 +50,7 @@ protected InternalAggregation(String name, Map metadata) { */ protected InternalAggregation(StreamInput in) throws IOException { name = in.readString(); - metadata = in.readMap(); + metadata = in.readGenericMap(); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java index 59fec0dd1540a..b078b62c4b82d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java @@ -108,7 +108,7 @@ public CompositeAggregationBuilder(StreamInput in) throws IOException { } this.size = in.readVInt(); if (in.readBoolean()) { - this.after = in.readMap(); + this.after = in.readGenericMap(); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java index ab188eaaf6257..8fbb5d076a723 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java @@ -26,7 +26,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry; import org.elasticsearch.search.aggregations.support.ValuesSourceType; import org.elasticsearch.xcontent.ObjectParser; -import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; @@ -46,15 +45,6 @@ public class HistogramAggregationBuilder extends ValuesSourceAggregationBuilder< HistogramAggregatorSupplier.class ); - private static final ObjectParser EXTENDED_BOUNDS_PARSER = new ObjectParser<>( - Histogram.EXTENDED_BOUNDS_FIELD.getPreferredName(), - () -> new double[] { Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY } - ); - static { - EXTENDED_BOUNDS_PARSER.declareDouble((bounds, d) -> bounds[0] = d, new ParseField("min")); - EXTENDED_BOUNDS_PARSER.declareDouble((bounds, d) -> bounds[1] = d, new ParseField("max")); - } - public static final ObjectParser PARSER = ObjectParser.fromBuilder( NAME, HistogramAggregationBuilder::new diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregationBuilder.java index 487cc2bd11bd3..337afe3fbeebd 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregationBuilder.java @@ -94,7 +94,7 @@ public ScriptedMetricAggregationBuilder(StreamInput in) throws IOException { combineScript = in.readOptionalWriteable(Script::new); reduceScript = in.readOptionalWriteable(Script::new); if (in.readBoolean()) { - params = in.readMap(); + params = in.readGenericMap(); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/AbstractPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/AbstractPipelineAggregationBuilder.java index b3335dcbd5be5..0d901eaa1dc97 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/AbstractPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/AbstractPipelineAggregationBuilder.java @@ -45,7 +45,7 @@ protected AbstractPipelineAggregationBuilder(String name, String type, String[] */ protected AbstractPipelineAggregationBuilder(StreamInput in, String type) throws IOException { this(in.readString(), type, in.readStringArray()); - metadata = in.readMap(); + metadata = in.readGenericMap(); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index c7077e4c867b0..0211e43933ec3 100644 --- a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -260,7 +260,7 @@ public SearchSourceBuilder(StreamInput in) throws IOException { fetchFields = in.readCollectionAsList(FieldAndFormat::new); } pointInTimeBuilder = in.readOptionalWriteable(PointInTimeBuilder::new); - runtimeMappings = in.readMap(); + runtimeMappings = in.readGenericMap(); if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { if (in.getTransportVersion().before(TransportVersions.V_8_7_0)) { KnnSearchBuilder searchBuilder = in.readOptionalWriteable(KnnSearchBuilder::new); diff --git a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java index 5c98808c9c169..91e4fb791f62d 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java @@ -67,8 +67,8 @@ public void execute(SearchContext context, int[] docIdsToLoad) { if (docIdsToLoad == null || docIdsToLoad.length == 0) { // no individual hits to process, so we shortcut - SearchHits hits = new SearchHits(new SearchHit[0], context.queryResult().getTotalHits(), context.queryResult().getMaxScore()); - context.fetchResult().shardResult(hits, null); + context.fetchResult() + .shardResult(SearchHits.empty(context.queryResult().getTotalHits(), context.queryResult().getMaxScore()), null); return; } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java index 5a26191b8eeec..d91b6e8b4e4a3 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java @@ -158,7 +158,7 @@ protected AbstractHighlighterBuilder(StreamInput in) throws IOException { noMatchSize(in.readOptionalVInt()); phraseLimit(in.readOptionalVInt()); if (in.readBoolean()) { - options(in.readMap()); + options(in.readGenericMap()); } requireFieldMatch(in.readOptionalBoolean()); maxAnalyzedOffset(in.readOptionalInt()); diff --git a/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java b/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java index c47f53ec503b9..d6a3334dd035b 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java @@ -22,6 +22,7 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.LeafCollector; +import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryCache; import org.apache.lucene.search.QueryCachingPolicy; @@ -196,9 +197,11 @@ public Query rewrite(Query original) throws IOException { if (profiler != null) { profiler.startRewriteTime(); } - try { return super.rewrite(original); + } catch (TimeExceededException e) { + timeExceeded = true; + return new MatchNoDocsQuery("rewrite timed out"); } finally { if (profiler != null) { profiler.stopAndAddRewriteTime(); @@ -297,10 +300,10 @@ private static LeafSlice[] computeSlices(List leaves, int min @Override public T search(Query query, CollectorManager collectorManager) throws IOException { final C firstCollector = collectorManager.newCollector(); + // Take advantage of the few extra rewrite rules of ConstantScoreQuery when score are not needed. + query = firstCollector.scoreMode().needsScores() ? rewrite(query) : rewrite(new ConstantScoreQuery(query)); final Weight weight; try { - // Take advantage of the few extra rewrite rules of ConstantScoreQuery when score are not needed. - query = firstCollector.scoreMode().needsScores() ? rewrite(query) : rewrite(new ConstantScoreQuery(query)); weight = createWeight(query, firstCollector.scoreMode(), 1); } catch (@SuppressWarnings("unused") TimeExceededException e) { timeExceeded = true; diff --git a/server/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java b/server/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java deleted file mode 100644 index 392f60ba36cd0..0000000000000 --- a/server/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.search.internal; - -import org.elasticsearch.action.search.SearchResponseSections; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.search.SearchHits; -import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.profile.SearchProfileResults; -import org.elasticsearch.search.suggest.Suggest; - -import java.io.IOException; - -/** - * {@link SearchResponseSections} subclass that can be serialized over the wire. - */ -public class InternalSearchResponse extends SearchResponseSections implements Writeable { - public static final InternalSearchResponse EMPTY_WITH_TOTAL_HITS = new InternalSearchResponse( - SearchHits.EMPTY_WITH_TOTAL_HITS, - null, - null, - null, - false, - null, - 1 - ); - - public static final InternalSearchResponse EMPTY_WITHOUT_TOTAL_HITS = new InternalSearchResponse( - SearchHits.EMPTY_WITHOUT_TOTAL_HITS, - null, - null, - null, - false, - null, - 1 - ); - - public InternalSearchResponse( - SearchHits hits, - InternalAggregations aggregations, - Suggest suggest, - SearchProfileResults profileResults, - boolean timedOut, - Boolean terminatedEarly, - int numReducePhases - ) { - super(hits, aggregations, suggest, timedOut, terminatedEarly, profileResults, numReducePhases); - } - - public InternalSearchResponse(StreamInput in) throws IOException { - super( - new SearchHits(in), - in.readBoolean() ? InternalAggregations.readFrom(in) : null, - in.readBoolean() ? new Suggest(in) : null, - in.readBoolean(), - in.readOptionalBoolean(), - in.readOptionalWriteable(SearchProfileResults::new), - in.readVInt() - ); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - hits.writeTo(out); - out.writeOptionalWriteable((InternalAggregations) aggregations); - out.writeOptionalWriteable(suggest); - out.writeBoolean(timedOut); - out.writeOptionalBoolean(terminatedEarly); - out.writeOptionalWriteable(profileResults); - out.writeVInt(numReducePhases); - } -} diff --git a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java index 01988003f4dd0..18ae708d8fec3 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java @@ -182,6 +182,10 @@ public static long computeWaitForCheckpoint(Map indexToWaitForCh } public ShardSearchRequest(ShardId shardId, long nowInMillis, AliasFilter aliasFilter) { + this(shardId, nowInMillis, aliasFilter, null); + } + + public ShardSearchRequest(ShardId shardId, long nowInMillis, AliasFilter aliasFilter, String clusterAlias) { this( OriginalIndices.NONE, shardId, @@ -195,7 +199,7 @@ public ShardSearchRequest(ShardId shardId, long nowInMillis, AliasFilter aliasFi true, null, nowInMillis, - null, + clusterAlias, null, null, SequenceNumbers.UNASSIGNED_SEQ_NO, diff --git a/server/src/main/java/org/elasticsearch/search/lookup/SourceFilter.java b/server/src/main/java/org/elasticsearch/search/lookup/SourceFilter.java index a58e7fa7d4a2b..3bf32159c1676 100644 --- a/server/src/main/java/org/elasticsearch/search/lookup/SourceFilter.java +++ b/server/src/main/java/org/elasticsearch/search/lookup/SourceFilter.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.xcontent.XContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -94,12 +95,15 @@ private Function buildBytesFilter() { BytesStreamOutput streamOutput = new BytesStreamOutput(1024); XContent xContent = in.sourceContentType().xContent(); XContentBuilder builder = new XContentBuilder(xContent, streamOutput); - XContentParser parser = xContent.createParser(parserConfig, in.internalSourceRef().streamInput()); - if ((parser.currentToken() == null) && (parser.nextToken() == null)) { - return Source.empty(in.sourceContentType()); + try ( + XContentParser parser = XContentHelper.createParserNotCompressed(parserConfig, in.internalSourceRef(), xContent.type()) + ) { + if ((parser.currentToken() == null) && (parser.nextToken() == null)) { + return Source.empty(in.sourceContentType()); + } + builder.copyCurrentStructure(parser); + return Source.fromBytes(BytesReference.bytes(builder)); } - builder.copyCurrentStructure(parser); - return Source.fromBytes(BytesReference.bytes(builder)); } catch (IOException e) { throw new UncheckedIOException(e); } diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java index e088948b18e03..40ff9c6eaf6ee 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java @@ -271,9 +271,7 @@ public Option(int docID, Text text, float score, Map> contex public Option(StreamInput in) throws IOException { super(in); this.doc = Lucene.readScoreDoc(in); - if (in.readBoolean()) { - this.hit = new SearchHit(in); - } + this.hit = in.readOptionalWriteable(SearchHit::readFrom); int contextSize = in.readInt(); this.contexts = Maps.newLinkedHashMapWithExpectedSize(contextSize); for (int i = 0; i < contextSize; i++) { diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java index 10525d7dc89d9..fe1502f53d9b6 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java @@ -127,7 +127,7 @@ public PhraseSuggestionBuilder(StreamInput in) throws IOException { if (in.readBoolean()) { collateQuery = new Script(in); } - collateParams = in.readMap(); + collateParams = in.readGenericMap(); collatePrune = in.readOptionalBoolean(); int generatorsEntries = in.readVInt(); for (int i = 0; i < generatorsEntries; i++) { diff --git a/server/src/main/java/org/elasticsearch/snapshots/PausedSnapshotException.java b/server/src/main/java/org/elasticsearch/snapshots/PausedSnapshotException.java new file mode 100644 index 0000000000000..8a268a5d52078 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/snapshots/PausedSnapshotException.java @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.snapshots; + +public final class PausedSnapshotException extends RuntimeException { + public PausedSnapshotException() { + super("paused"); + } +} diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java index 6414e0d2f1779..c09719ec48039 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java @@ -512,7 +512,7 @@ public static SnapshotInfo readFrom(final StreamInput in) throws IOException { final List shardFailures = in.readCollectionAsImmutableList(SnapshotShardFailure::new); final IndexVersion version = in.readBoolean() ? IndexVersion.readVersion(in) : null; final Boolean includeGlobalState = in.readOptionalBoolean(); - final Map userMetadata = in.readMap(); + final Map userMetadata = in.readGenericMap(); final List dataStreams = in.readStringCollectionAsImmutableList(); final List featureStates = in.readCollectionAsImmutableList(SnapshotFeatureInfo::new); final Map indexSnapshotDetails = in.readImmutableMap(IndexSnapshotDetails::new); diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index 0f7c4f71a089c..7b3a83dfc9bb3 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -136,7 +136,11 @@ public void clusterChanged(ClusterChangedEvent event) { cancelRemoved(currentSnapshots); for (final var oneRepoSnapshotsInProgress : currentSnapshots.entriesByRepo()) { for (final var snapshotsInProgressEntry : oneRepoSnapshotsInProgress) { - handleUpdatedSnapshotsInProgressEntry(localNodeId, snapshotsInProgressEntry); + handleUpdatedSnapshotsInProgressEntry( + localNodeId, + currentSnapshots.isNodeIdForRemoval(localNodeId), + snapshotsInProgressEntry + ); } } } @@ -223,7 +227,7 @@ private void cancelRemoved(SnapshotsInProgress snapshotsInProgress) { } } - private void handleUpdatedSnapshotsInProgressEntry(String localNodeId, SnapshotsInProgress.Entry entry) { + private void handleUpdatedSnapshotsInProgressEntry(String localNodeId, boolean removingLocalNode, SnapshotsInProgress.Entry entry) { if (entry.isClone()) { // This is a snapshot clone, it will be executed on the current master return; @@ -236,7 +240,11 @@ private void handleUpdatedSnapshotsInProgressEntry(String localNodeId, Snapshots return; } - startNewShardSnapshots(localNodeId, entry); + if (removingLocalNode) { + pauseShardSnapshots(localNodeId, entry); + } else { + startNewShardSnapshots(localNodeId, entry); + } } case ABORTED -> { // Abort all running shards for this snapshot @@ -249,7 +257,13 @@ private void handleUpdatedSnapshotsInProgressEntry(String localNodeId, Snapshots // due to CS batching we might have missed the INIT state and straight went into ABORTED // notify master that abort has completed by moving to FAILED if (shard.getValue().state() == ShardState.ABORTED && localNodeId.equals(shard.getValue().nodeId())) { - notifyUnsuccessfulSnapshotShard(snapshot, sid, shard.getValue().reason(), shard.getValue().generation()); + notifyUnsuccessfulSnapshotShard( + snapshot, + sid, + ShardState.FAILED, + shard.getValue().reason(), + shard.getValue().generation() + ); } } else { snapshotStatus.abortIfNotCompleted("snapshot has been aborted", notifyOnAbortTaskRunner::enqueueTask); @@ -263,19 +277,20 @@ private void handleUpdatedSnapshotsInProgressEntry(String localNodeId, Snapshots private void startNewShardSnapshots(String localNodeId, SnapshotsInProgress.Entry entry) { Map shardsToStart = null; final Snapshot snapshot = entry.snapshot(); - final var runningShardsForSnapshot = shardSnapshots.getOrDefault(snapshot, emptyMap()).keySet(); + final var runningShardsForSnapshot = shardSnapshots.getOrDefault(snapshot, emptyMap()); for (var scheduledShard : entry.shards().entrySet()) { // Add all new shards to start processing on final var shardId = scheduledShard.getKey(); final var shardSnapshotStatus = scheduledShard.getValue(); - if (shardSnapshotStatus.state() == ShardState.INIT - && localNodeId.equals(shardSnapshotStatus.nodeId()) - && runningShardsForSnapshot.contains(shardId) == false) { - logger.trace("[{}] adding shard to the queue", shardId); - if (shardsToStart == null) { - shardsToStart = new HashMap<>(); + if (shardSnapshotStatus.state() == ShardState.INIT && localNodeId.equals(shardSnapshotStatus.nodeId())) { + final var runningShard = runningShardsForSnapshot.get(shardId); + if (runningShard == null || runningShard.isPaused()) { + logger.trace("[{}] adding [{}] shard to the queue", shardId, runningShard == null ? "new" : "paused"); + if (shardsToStart == null) { + shardsToStart = new HashMap<>(); + } + shardsToStart.put(shardId, shardSnapshotStatus.generation()); } - shardsToStart.put(shardId, shardSnapshotStatus.generation()); } } if (shardsToStart == null) { @@ -303,6 +318,40 @@ private void startNewShardSnapshots(String localNodeId, SnapshotsInProgress.Entr threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> shardSnapshotTasks.forEach(Runnable::run)); } + private void pauseShardSnapshots(String localNodeId, SnapshotsInProgress.Entry entry) { + final var localShardSnapshots = shardSnapshots.getOrDefault(entry.snapshot(), Map.of()); + + for (final Map.Entry shardEntry : entry.shards().entrySet()) { + final ShardId shardId = shardEntry.getKey(); + final ShardSnapshotStatus masterShardSnapshotStatus = shardEntry.getValue(); + + if (masterShardSnapshotStatus.state() != ShardState.INIT) { + // shard snapshot not currently scheduled by master + continue; + } + + if (localNodeId.equals(masterShardSnapshotStatus.nodeId()) == false) { + // shard snapshot scheduled on a different node + continue; + } + + final var localShardSnapshotStatus = localShardSnapshots.get(shardId); + if (localShardSnapshotStatus == null) { + // shard snapshot scheduled but not currently running, pause immediately without starting + notifyUnsuccessfulSnapshotShard( + entry.snapshot(), + shardId, + ShardState.PAUSED_FOR_NODE_REMOVAL, + "paused", + masterShardSnapshotStatus.generation() + ); + } else { + // shard snapshot currently running, mark for pause + localShardSnapshotStatus.pauseIfNotCompleted(notifyOnAbortTaskRunner::enqueueTask); + } + } + } + private Runnable newShardSnapshotTask( final ShardId shardId, final Snapshot snapshot, @@ -335,15 +384,22 @@ public void onResponse(ShardSnapshotResult shardSnapshotResult) { @Override public void onFailure(Exception e) { final String failure; + final Stage nextStage; if (e instanceof AbortedSnapshotException) { + nextStage = Stage.FAILURE; failure = "aborted"; logger.debug(() -> format("[%s][%s] aborted shard snapshot", shardId, snapshot), e); + } else if (e instanceof PausedSnapshotException) { + nextStage = Stage.PAUSED; + failure = "paused for removal of node holding primary"; + logger.debug(() -> format("[%s][%s] pausing shard snapshot", shardId, snapshot), e); } else { + nextStage = Stage.FAILURE; failure = summarizeFailure(e); logger.warn(() -> format("[%s][%s] failed to snapshot shard", shardId, snapshot), e); } - snapshotStatus.moveToFailed(threadPool.absoluteTimeInMillis(), failure); - notifyUnsuccessfulSnapshotShard(snapshot, shardId, failure, snapshotStatus.generation()); + final var shardState = snapshotStatus.moveToUnsuccessful(nextStage, failure, threadPool.absoluteTimeInMillis()); + notifyUnsuccessfulSnapshotShard(snapshot, shardId, shardState, failure, snapshotStatus.generation()); } }); } @@ -543,6 +599,19 @@ private void syncShardStatsOnNewMaster(List entries) notifyUnsuccessfulSnapshotShard( snapshot.snapshot(), shardId, + ShardState.FAILED, + indexShardSnapshotStatus.getFailure(), + localShard.getValue().generation() + ); + } else if (stage == Stage.PAUSED) { + // but we think the shard has paused - we need to make new master know that + logger.debug(""" + [{}] new master thinks the shard [{}] is still running but the shard paused locally, updating status on \ + master""", snapshot.snapshot(), shardId); + notifyUnsuccessfulSnapshotShard( + snapshot.snapshot(), + shardId, + ShardState.PAUSED_FOR_NODE_REMOVAL, indexShardSnapshotStatus.getFailure(), localShard.getValue().generation() ); @@ -569,13 +638,15 @@ private void notifySuccessfulSnapshotShard(final Snapshot snapshot, final ShardI private void notifyUnsuccessfulSnapshotShard( final Snapshot snapshot, final ShardId shardId, + final ShardState shardState, final String failure, final ShardGeneration generation ) { + assert shardState == ShardState.FAILED || shardState == ShardState.PAUSED_FOR_NODE_REMOVAL : shardState; sendSnapshotShardUpdate( snapshot, shardId, - new ShardSnapshotStatus(clusterService.localNode().getId(), ShardState.FAILED, generation, failure) + new ShardSnapshotStatus(clusterService.localNode().getId(), shardState, generation, failure) ); } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index f62061f5d5b4b..f973d456a6b79 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -238,6 +238,11 @@ public SnapshotsService( this.systemIndices = systemIndices; this.masterServiceTaskQueue = clusterService.createTaskQueue("snapshots-service", Priority.NORMAL, new SnapshotTaskExecutor()); + this.updateNodeIdsToRemoveQueue = clusterService.createTaskQueue( + "snapshots-service-node-ids", + Priority.NORMAL, + UpdateNodeIdsForRemovalTask::executeBatch + ); } /** @@ -829,8 +834,19 @@ public void applyClusterState(ClusterChangedEvent event) { final boolean newMaster = event.previousState().nodes().isLocalNodeElectedMaster() == false; processExternalChanges( newMaster || removedNodesCleanupNeeded(snapshotsInProgress, event.nodesDelta().removedNodes()), - event.routingTableChanged() && waitingShardsStartedOrUnassigned(snapshotsInProgress, event) + snapshotsInProgress.nodeIdsForRemovalChanged(SnapshotsInProgress.get(event.previousState())) + || (event.routingTableChanged() && waitingShardsStartedOrUnassigned(snapshotsInProgress, event)) ); + + if (newMaster + || event.state().metadata().nodeShutdowns().equals(event.previousState().metadata().nodeShutdowns()) == false + || supportsNodeRemovalTracking(event.state()) != supportsNodeRemovalTracking(event.previousState())) { + updateNodeIdsToRemoveQueue.submitTask( + "SnapshotsService#updateNodeIdsToRemove", + new UpdateNodeIdsForRemovalTask(), + null + ); + } } else { final List readyToResolveListeners = new ArrayList<>(); // line-up mutating concurrent operations which can be in form of clusterApplierService and masterService tasks @@ -1046,6 +1062,7 @@ public ClusterState execute(ClusterState currentState) { snapshot, routingTable, nodes, + snapshots::isNodeIdForRemoval, knownFailures ); if (shards != null) { @@ -1130,6 +1147,7 @@ private static ImmutableOpenMap processWaitingShar SnapshotsInProgress.Entry entry, RoutingTable routingTable, DiscoveryNodes nodes, + Predicate nodeIdRemovalPredicate, Map knownFailures ) { assert entry.isClone() == false : "clones take a different path"; @@ -1160,19 +1178,30 @@ private static ImmutableOpenMap processWaitingShar snapshotChanged = true; shards.put(shardId, knownFailure); } - } else if (shardStatus.state() == ShardState.WAITING) { + } else if (shardStatus.state() == ShardState.WAITING || shardStatus.state() == ShardState.PAUSED_FOR_NODE_REMOVAL) { IndexRoutingTable indexShardRoutingTable = routingTable.index(shardId.getIndex()); if (indexShardRoutingTable != null) { IndexShardRoutingTable shardRouting = indexShardRoutingTable.shard(shardId.id()); if (shardRouting != null && shardRouting.primaryShard() != null) { - if (shardRouting.primaryShard().started()) { + final var primaryNodeId = shardRouting.primaryShard().currentNodeId(); + if (nodeIdRemovalPredicate.test(primaryNodeId)) { + if (shardStatus.state() == ShardState.PAUSED_FOR_NODE_REMOVAL) { + // Shard that we are waiting for is on a node marked for removal, keep it as PAUSED_FOR_REMOVAL + shards.put(shardId, shardStatus); + } else { + // Shard that we are waiting for is on a node marked for removal, move it to PAUSED_FOR_REMOVAL + snapshotChanged = true; + shards.put( + shardId, + new ShardSnapshotStatus(primaryNodeId, ShardState.PAUSED_FOR_NODE_REMOVAL, shardStatus.generation()) + ); + } + continue; + } else if (shardRouting.primaryShard().started()) { // Shard that we were waiting for has started on a node, let's process it snapshotChanged = true; logger.trace("starting shard that we were waiting for [{}] on node [{}]", shardId, shardStatus.nodeId()); - shards.put( - shardId, - new ShardSnapshotStatus(shardRouting.primaryShard().currentNodeId(), shardStatus.generation()) - ); + shards.put(shardId, new ShardSnapshotStatus(primaryNodeId, shardStatus.generation())); continue; } else if (shardRouting.primaryShard().initializing() || shardRouting.primaryShard().relocating()) { // Shard that we were waiting for hasn't started yet or still relocating - will continue to wait @@ -1225,7 +1254,7 @@ private static boolean waitingShardsStartedOrUnassigned(SnapshotsInProgress snap if (entry.state() == State.STARTED && entry.isClone() == false) { for (Map.Entry shardStatus : entry.shardsByRepoShardId().entrySet()) { final ShardState state = shardStatus.getValue().state(); - if (state != ShardState.WAITING && state != ShardState.QUEUED) { + if (state != ShardState.WAITING && state != ShardState.QUEUED && state != ShardState.PAUSED_FOR_NODE_REMOVAL) { continue; } final RepositoryShardId shardId = shardStatus.getKey(); @@ -1234,11 +1263,13 @@ private static boolean waitingShardsStartedOrUnassigned(SnapshotsInProgress snap .getRoutingTable() .index(entry.indexByName(shardId.indexName())); if (indexShardRoutingTable == null) { - // index got removed concurrently and we have to fail WAITING or QUEUED state shards + // index got removed concurrently and we have to fail WAITING, QUEUED and PAUSED_FOR_REMOVAL state shards return true; } ShardRouting shardRouting = indexShardRoutingTable.shard(shardId.shardId()).primaryShard(); - if (shardRouting != null && (shardRouting.started() || shardRouting.unassigned())) { + if (shardRouting != null + && (shardRouting.started() && snapshotsInProgress.isNodeIdForRemoval(shardRouting.currentNodeId()) == false + || shardRouting.unassigned())) { return true; } } @@ -2870,7 +2901,11 @@ private static ImmutableOpenMap nodeIdRemovalPredicate + ) { ShardSnapshotStatus shardSnapshotStatus; if (primary == null || primary.assignedToNode() == false) { shardSnapshotStatus = new ShardSnapshotStatus(null, ShardState.MISSING, shardRepoGeneration, "primary shard is not allocated"); } else if (primary.relocating() || primary.initializing()) { shardSnapshotStatus = new ShardSnapshotStatus(primary.currentNodeId(), ShardState.WAITING, shardRepoGeneration); + } else if (nodeIdRemovalPredicate.test(primary.currentNodeId())) { + shardSnapshotStatus = new ShardSnapshotStatus(primary.currentNodeId(), ShardState.PAUSED_FOR_NODE_REMOVAL, shardRepoGeneration); } else if (primary.started() == false) { shardSnapshotStatus = new ShardSnapshotStatus( primary.currentNodeId(), @@ -3044,6 +3086,9 @@ static final class SnapshotShardsUpdateContext { // initial cluster state for update computation private final ClusterState initialState; + // tests whether node IDs are currently marked for removal + private final Predicate nodeIdRemovalPredicate; + // updates outstanding to be applied to existing snapshot entries private final Map> updatesByRepo; @@ -3059,6 +3104,7 @@ static final class SnapshotShardsUpdateContext { ) { this.batchExecutionContext = batchExecutionContext; this.initialState = batchExecutionContext.initialState(); + this.nodeIdRemovalPredicate = SnapshotsInProgress.get(initialState)::isNodeIdForRemoval; this.rerouteRunnable = new RunOnce(rerouteRunnable); // RunOnce to avoid enqueueing O(#shards) listeners this.updatesByRepo = new HashMap<>(); for (final var taskContext : batchExecutionContext.taskContexts()) { @@ -3090,7 +3136,7 @@ SnapshotsInProgress computeUpdatedState() { changedCount, startedCount ); - return updated; + return supportsNodeRemovalTracking(initialState) ? updated.withUpdatedNodeIdsForRemoval(initialState) : updated; } return existing; } @@ -3237,14 +3283,24 @@ private void executeShardSnapshotUpdate( return; } - logger.trace( - "[{}] Updating shard [{}] with status [{}]", - updateSnapshotState.snapshot, - updatedShard, - updateSnapshotState.updatedState.state() - ); + final ShardSnapshotStatus updatedState; + if (existing.state() == ShardState.ABORTED + && updateSnapshotState.updatedState.state() == ShardState.PAUSED_FOR_NODE_REMOVAL) { + // concurrently pausing the shard snapshot due to node shutdown and aborting the snapshot - this shard is no longer + // actively snapshotting but we don't want it to resume, so mark it as FAILED since it didn't complete + updatedState = new ShardSnapshotStatus( + updateSnapshotState.updatedState.nodeId(), + ShardState.FAILED, + updateSnapshotState.updatedState.generation(), + "snapshot aborted" + ); + } else { + updatedState = updateSnapshotState.updatedState; + } + + logger.trace("[{}] Updating shard [{}] with status [{}]", updateSnapshotState.snapshot, updatedShard, updatedState.state()); changedCount++; - newStates.get().put(updatedShard, updateSnapshotState.updatedState); + newStates.get().put(updatedShard, updatedState); executedUpdates.add(updateSnapshotState); } @@ -3308,7 +3364,7 @@ private void startShardSnapshot(RepositoryShardId repoShardId, ShardGeneration g } else { shardRouting = indexRouting.shard(repoShardId.shardId()).primaryShard(); } - final ShardSnapshotStatus shardSnapshotStatus = initShardSnapshotStatus(generation, shardRouting); + final ShardSnapshotStatus shardSnapshotStatus = initShardSnapshotStatus(generation, shardRouting, nodeIdRemovalPredicate); final ShardId routingShardId = shardRouting != null ? shardRouting.shardId() : new ShardId(index, repoShardId.shardId()); if (shardSnapshotStatus.isActive()) { startShardOperation(shardsBuilder(), routingShardId, shardSnapshotStatus); @@ -3924,4 +3980,36 @@ private SnapshotsInProgress createSnapshot( return res; } } + + private record UpdateNodeIdsForRemovalTask() implements ClusterStateTaskListener { + @Override + public void onFailure(Exception e) { + // must be a master failover, and the new master will retry so nbd + assert MasterService.isPublishFailureException(e) : e; + } + + static ClusterState executeBatch( + ClusterStateTaskExecutor.BatchExecutionContext batchExecutionContext + ) { + for (ClusterStateTaskExecutor.TaskContext taskContext : batchExecutionContext.taskContexts()) { + taskContext.success(() -> {}); + } + + final var clusterState = batchExecutionContext.initialState(); + if (supportsNodeRemovalTracking(clusterState)) { + final var snapshotsInProgress = SnapshotsInProgress.get(clusterState); + final var newSnapshotsInProgress = snapshotsInProgress.withUpdatedNodeIdsForRemoval(clusterState); + if (newSnapshotsInProgress != snapshotsInProgress) { + return ClusterState.builder(clusterState).putCustom(SnapshotsInProgress.TYPE, newSnapshotsInProgress).build(); + } + } + return clusterState; + } + } + + private static boolean supportsNodeRemovalTracking(ClusterState clusterState) { + return clusterState.getMinTransportVersion().onOrAfter(TransportVersions.SNAPSHOTS_IN_PROGRESS_TRACKING_REMOVING_NODES_ADDED); + } + + private final MasterServiceTaskQueue updateNodeIdsToRemoveQueue; } diff --git a/server/src/main/java/org/elasticsearch/tasks/Task.java b/server/src/main/java/org/elasticsearch/tasks/Task.java index 3726ba265e433..83ee08574df4e 100644 --- a/server/src/main/java/org/elasticsearch/tasks/Task.java +++ b/server/src/main/java/org/elasticsearch/tasks/Task.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.telemetry.tracing.Traceable; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.ToXContentObject; @@ -21,7 +22,7 @@ /** * Current task information */ -public class Task { +public class Task implements Traceable { /** * The request header to mark tasks with specific ids @@ -265,4 +266,9 @@ public TaskResult result(DiscoveryNode node, ActionResponse response) throws IOE throw new IllegalStateException("response has to implement ToXContent to be able to store the results"); } } + + @Override + public String getSpanId() { + return "task-" + getId(); + } } diff --git a/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java b/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java index e0ef4feb0ae35..377c7b3847b0b 100644 --- a/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java +++ b/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java @@ -67,7 +67,7 @@ public static TaskInfo from(StreamInput in) throws IOException { return new TaskInfo( taskId, in.readString(), - in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_055) ? in.readString() : taskId.getNodeId(), + in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061) ? in.readString() : taskId.getNodeId(), in.readString(), in.readOptionalString(), in.readOptionalNamedWriteable(Task.Status.class), @@ -84,7 +84,7 @@ public static TaskInfo from(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { taskId.writeTo(out); out.writeString(type); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_055)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { out.writeString(node); } out.writeString(action); diff --git a/server/src/main/java/org/elasticsearch/tasks/TaskManager.java b/server/src/main/java/org/elasticsearch/tasks/TaskManager.java index 9b4c2a6b026e9..b6b327ded9b09 100644 --- a/server/src/main/java/org/elasticsearch/tasks/TaskManager.java +++ b/server/src/main/java/org/elasticsearch/tasks/TaskManager.java @@ -505,7 +505,7 @@ public List setBan(TaskId parentTaskId, String reason, Transpor if (channel instanceof TcpTransportChannel) { startTrackingChannel(((TcpTransportChannel) channel).getChannel(), ban::registerChannel); } else { - assert channel.getChannelType().equals("direct") : "expect direct channel; got [" + channel + "]"; + assert TransportService.isDirectResponseChannel(channel) : "expect direct channel; got [" + channel + "]"; ban.registerChannel(DIRECT_CHANNEL_TRACKER); } } diff --git a/server/src/main/java/org/elasticsearch/telemetry/tracing/SpanId.java b/server/src/main/java/org/elasticsearch/telemetry/tracing/SpanId.java deleted file mode 100644 index 8a22102baadf9..0000000000000 --- a/server/src/main/java/org/elasticsearch/telemetry/tracing/SpanId.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.telemetry.tracing; - -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.tasks.Task; - -import java.util.Objects; - -public class SpanId { - private final String rawId; - - private SpanId(String rawId) { - this.rawId = Objects.requireNonNull(rawId); - } - - public String getRawId() { - return rawId; - } - - @Override - public String toString() { - return "SpanId[" + rawId + "]"; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - SpanId spanId = (SpanId) o; - return rawId.equals(spanId.rawId); - } - - @Override - public int hashCode() { - return Objects.hash(rawId); - } - - public static SpanId forTask(Task task) { - return new SpanId("task-" + task.getId()); - } - - public static SpanId forRestRequest(RestRequest restRequest) { - return new SpanId("rest-" + restRequest.getRequestId()); - } - - public static SpanId forBareString(String rawId) { - return new SpanId(rawId); - } -} diff --git a/server/src/main/java/org/elasticsearch/telemetry/tracing/TraceContext.java b/server/src/main/java/org/elasticsearch/telemetry/tracing/TraceContext.java new file mode 100644 index 0000000000000..197b4f96acd5b --- /dev/null +++ b/server/src/main/java/org/elasticsearch/telemetry/tracing/TraceContext.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.telemetry.tracing; + +/** + * Required methods from ThreadContext for Tracer + */ +public interface TraceContext { + /** + * Returns a transient header object or null if there is no header for the given key + */ + T getTransient(String key); + + /** + * Puts a transient header object into this context + */ + void putTransient(String key, Object value); + + /** + * Returns the header for the given key or null if not present + */ + String getHeader(String key); + + /** + * Puts a header into the context + */ + void putHeader(String key, String value); +} diff --git a/server/src/main/java/org/elasticsearch/telemetry/tracing/Traceable.java b/server/src/main/java/org/elasticsearch/telemetry/tracing/Traceable.java new file mode 100644 index 0000000000000..64c8635d75dd8 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/telemetry/tracing/Traceable.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.telemetry.tracing; + +/** + * A class that can be traced using the telemetry tracing API + */ +public interface Traceable { + /** + * A consistent id for the span. Should be structured "[short-name]-[unique-id]" ie "request-abc1234" + */ + String getSpanId(); +} diff --git a/server/src/main/java/org/elasticsearch/telemetry/tracing/Tracer.java b/server/src/main/java/org/elasticsearch/telemetry/tracing/Tracer.java index f54857091b778..6f2c98dda4e2b 100644 --- a/server/src/main/java/org/elasticsearch/telemetry/tracing/Tracer.java +++ b/server/src/main/java/org/elasticsearch/telemetry/tracing/Tracer.java @@ -8,10 +8,7 @@ package org.elasticsearch.telemetry.tracing; -import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Releasable; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.tasks.Task; import java.util.Map; @@ -37,27 +34,13 @@ public interface Tracer { /** * Called when a span starts. - * @param threadContext the current context. Required for tracing parent/child span activity. - * @param spanId a unique identifier for the activity, and will not be sent to the tracing system. Add the ID - * to the attributes if it is important + * @param traceContext the current context. Required for tracing parent/child span activity. + * @param traceable provides a unique identifier for the activity, and will not be sent to the tracing system. Add the ID + * to the attributes if it is important * @param name the name of the span. Used to filter out spans, but also sent to the tracing system * @param attributes arbitrary key/value data for the span. Sent to the tracing system */ - void startTrace(ThreadContext threadContext, SpanId spanId, String name, Map attributes); - - /** - * @see Tracer#startTrace(ThreadContext, SpanId, String, Map) - */ - default void startTrace(ThreadContext threadContext, Task task, String name, Map attributes) { - startTrace(threadContext, SpanId.forTask(task), name, attributes); - } - - /** - * @see Tracer#startTrace(ThreadContext, SpanId, String, Map) - */ - default void startTrace(ThreadContext threadContext, RestRequest restRequest, String name, Map attributes) { - startTrace(threadContext, SpanId.forRestRequest(restRequest), name, attributes); - } + void startTrace(TraceContext traceContext, Traceable traceable, String name, Map attributes); /** * Called when a span starts. This version of the method relies on context to assign the span a parent. @@ -67,23 +50,9 @@ default void startTrace(ThreadContext threadContext, RestRequest restRequest, St /** * Called when a span ends. - * @param spanId an identifier for the span - */ - void stopTrace(SpanId spanId); - - /** - * @see Tracer#stopTrace(SpanId) - */ - default void stopTrace(Task task) { - stopTrace(SpanId.forTask(task)); - } - - /** - * @see Tracer#stopTrace(SpanId) + * @param traceable provides an identifier for the span */ - default void stopTrace(RestRequest restRequest) { - stopTrace(SpanId.forRestRequest(restRequest)); - } + void stopTrace(Traceable traceable); /** * Called when a span ends. This version of the method relies on context to select the span to stop. @@ -94,58 +63,51 @@ default void stopTrace(RestRequest restRequest) { * Some tracing implementations support the concept of "events" within a span, marking a point in time during the span * when something interesting happened. If the tracing implementation doesn't support events, then nothing will be recorded. * This should only be called when a trace already been started on the {@code traceable}. - * @param spanId an identifier for the span + * @param traceable provides an identifier for the span * @param eventName the event that happened. This should be something meaningful to people reviewing the data, for example * "send response", "finished processing", "validated request", etc. */ - void addEvent(SpanId spanId, String eventName); + void addEvent(Traceable traceable, String eventName); /** * If an exception occurs during a span, you can add data about the exception to the span where the exception occurred. * This should only be called when a span has been started, otherwise it has no effect. - * @param spanId an identifier for the span + * @param traceable provides an identifier for the span * @param throwable the exception that occurred. */ - void addError(SpanId spanId, Throwable throwable); - - /** - * @see Tracer#addError(SpanId, Throwable) - */ - default void addError(RestRequest restRequest, Throwable throwable) { - addError(SpanId.forRestRequest(restRequest), throwable); - } + void addError(Traceable traceable, Throwable throwable); /** * Adds a boolean attribute to an active span. These will be sent to the endpoint that collects tracing data. - * @param spanId an identifier for the span + * @param traceable provides an identifier for the span * @param key the attribute key * @param value the attribute value */ - void setAttribute(SpanId spanId, String key, boolean value); + void setAttribute(Traceable traceable, String key, boolean value); /** * Adds a double attribute to an active span. These will be sent to the endpoint that collects tracing data. - * @param spanId an identifier for the span + * @param traceable provides an identifier for the span * @param key the attribute key * @param value the attribute value */ - void setAttribute(SpanId spanId, String key, double value); + void setAttribute(Traceable traceable, String key, double value); /** * Adds a long attribute to an active span. These will be sent to the endpoint that collects tracing data. - * @param spanId an identifier for the span + * @param traceable provides an identifier for the span * @param key the attribute key * @param value the attribute value */ - void setAttribute(SpanId spanId, String key, long value); + void setAttribute(Traceable traceable, String key, long value); /** * Adds a String attribute to an active span. These will be sent to the endpoint that collects tracing data. - * @param spanId an identifier for the span + * @param traceable provides an identifier for the span * @param key the attribute key * @param value the attribute value */ - void setAttribute(SpanId spanId, String key, String value); + void setAttribute(Traceable traceable, String key, String value); /** * Usually you won't need this about scopes when using tracing. However, @@ -172,10 +134,10 @@ default void addError(RestRequest restRequest, Throwable throwable) { *

Nonetheless, it is possible to manually use scope where more detail is needed by * explicitly opening a scope via the `Tracer`. * - * @param spanId an identifier for the span + * @param traceable provides an identifier for the span * @return a scope. You MUST close it when you are finished with it. */ - Releasable withScope(SpanId spanId); + Releasable withScope(Traceable traceable); /** * A Tracer implementation that does nothing. This is used when no tracer is configured, @@ -183,52 +145,37 @@ default void addError(RestRequest restRequest, Throwable throwable) { */ Tracer NOOP = new Tracer() { @Override - public void startTrace(ThreadContext threadContext, SpanId spanId, String name, Map attributes) {} - - @Override - public void startTrace(ThreadContext threadContext, Task task, String name, Map attributes) {} - - @Override - public void startTrace(ThreadContext threadContext, RestRequest restRequest, String name, Map attributes) {} + public void startTrace(TraceContext traceContext, Traceable traceable, String name, Map attributes) {} @Override public void startTrace(String name, Map attributes) {} @Override - public void stopTrace(SpanId spanId) {} - - @Override - public void stopTrace(Task task) {} - - @Override - public void stopTrace(RestRequest restRequest) {} + public void stopTrace(Traceable traceable) {} @Override public void stopTrace() {} @Override - public void addEvent(SpanId spanId, String eventName) {} - - @Override - public void addError(SpanId spanId, Throwable throwable) {} + public void addEvent(Traceable traceable, String eventName) {} @Override - public void addError(RestRequest restRequest, Throwable throwable) {} + public void addError(Traceable traceable, Throwable throwable) {} @Override - public void setAttribute(SpanId spanId, String key, boolean value) {} + public void setAttribute(Traceable traceable, String key, boolean value) {} @Override - public void setAttribute(SpanId spanId, String key, double value) {} + public void setAttribute(Traceable traceable, String key, double value) {} @Override - public void setAttribute(SpanId spanId, String key, long value) {} + public void setAttribute(Traceable traceable, String key, long value) {} @Override - public void setAttribute(SpanId spanId, String key, String value) {} + public void setAttribute(Traceable traceable, String key, String value) {} @Override - public Releasable withScope(SpanId spanId) { + public Releasable withScope(Traceable traceable) { return () -> {}; } }; diff --git a/server/src/main/java/org/elasticsearch/transport/LeakTracker.java b/server/src/main/java/org/elasticsearch/transport/LeakTracker.java index ea12953e7df12..3be22f6fae53a 100644 --- a/server/src/main/java/org/elasticsearch/transport/LeakTracker.java +++ b/server/src/main/java/org/elasticsearch/transport/LeakTracker.java @@ -77,11 +77,28 @@ public static Releasable wrap(Releasable releasable) { return releasable; } var leak = INSTANCE.track(releasable); - return () -> { - try { - releasable.close(); - } finally { - leak.close(releasable); + return new Releasable() { + @Override + public void close() { + try { + releasable.close(); + } finally { + leak.close(releasable); + } + } + + @Override + public int hashCode() { + // It's legitimate to wrap the resource twice, with two different wrap() calls, which would yield different objects + // if and only if assertions are enabled. So we'd better not ever use these things as map keys etc. + throw new AssertionError("almost certainly a mistake to need the hashCode() of a leak-tracking Releasable"); + } + + @Override + public boolean equals(Object obj) { + // It's legitimate to wrap the resource twice, with two different wrap() calls, which would yield different objects + // if and only if assertions are enabled. So we'd better not ever use these things as map keys etc. + throw new AssertionError("almost certainly a mistake to compare a leak-tracking Releasable for equality"); } }; } @@ -118,6 +135,20 @@ public boolean decRef() { public boolean hasReferences() { return refCounted.hasReferences(); } + + @Override + public int hashCode() { + // It's legitimate to wrap the resource twice, with two different wrap() calls, which would yield different objects + // if and only if assertions are enabled. So we'd better not ever use these things as map keys etc. + throw new AssertionError("almost certainly a mistake to need the hashCode() of a leak-tracking RefCounted"); + } + + @Override + public boolean equals(Object obj) { + // It's legitimate to wrap the resource twice, with two different wrap() calls, which would yield different objects + // if and only if assertions are enabled. So we'd better not ever use these things as map keys etc. + throw new AssertionError("almost certainly a mistake to compare a leak-tracking RefCounted for equality"); + } }; } diff --git a/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java b/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java index 320b9cfdbf7e6..cfb6f872ce748 100644 --- a/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java +++ b/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java @@ -179,7 +179,7 @@ public class ProxyConnectionStrategy extends RemoteConnectionStrategy { RemoteConnectionManager.wrapConnectionWithRemoteClusterInfo( newConnection, clusterAlias, - actualProfile.getTransportProfile() + connectionManager.getCredentialsManager() ), actualProfile.getHandshakeTimeout(), cn -> true, diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java index a055e4122257f..3c74e46851504 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java @@ -57,15 +57,28 @@ final class RemoteClusterConnection implements Closeable { * @param settings the nodes settings object * @param clusterAlias the configured alias of the cluster to connect to * @param transportService the local nodes transport service - * @param credentialsProtected Whether the remote cluster is protected by a credentials, i.e. it has a credentials configured - * via secure setting. This means the remote cluster uses the new configurable access RCS model - * (as opposed to the basic model). + * @param credentialsManager object to lookup remote cluster credentials by cluster alias. If a cluster is protected by a credential, + * i.e. it has a credential configured via secure setting. + * This means the remote cluster uses the advances RCS model (as opposed to the basic model). */ - RemoteClusterConnection(Settings settings, String clusterAlias, TransportService transportService, boolean credentialsProtected) { + RemoteClusterConnection( + Settings settings, + String clusterAlias, + TransportService transportService, + RemoteClusterCredentialsManager credentialsManager + ) { this.transportService = transportService; this.clusterAlias = clusterAlias; - ConnectionProfile profile = RemoteConnectionStrategy.buildConnectionProfile(clusterAlias, settings, credentialsProtected); - this.remoteConnectionManager = new RemoteConnectionManager(clusterAlias, createConnectionManager(profile, transportService)); + ConnectionProfile profile = RemoteConnectionStrategy.buildConnectionProfile( + clusterAlias, + settings, + credentialsManager.hasCredentials(clusterAlias) + ); + this.remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + credentialsManager, + createConnectionManager(profile, transportService) + ); this.connectionStrategy = RemoteConnectionStrategy.buildStrategy(clusterAlias, transportService, remoteConnectionManager, settings); // we register the transport service here as a listener to make sure we notify handlers on disconnect etc. this.remoteConnectionManager.addListener(transportService); diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterCredentialsManager.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterCredentialsManager.java new file mode 100644 index 0000000000000..58e84f5e4ef11 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterCredentialsManager.java @@ -0,0 +1,53 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.transport; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Nullable; + +import java.util.Map; + +import static org.elasticsearch.transport.RemoteClusterService.REMOTE_CLUSTER_CREDENTIALS; + +public class RemoteClusterCredentialsManager { + + private static final Logger logger = LogManager.getLogger(RemoteClusterCredentialsManager.class); + + private volatile Map clusterCredentials; + + @SuppressWarnings("this-escape") + public RemoteClusterCredentialsManager(Settings settings) { + updateClusterCredentials(settings); + } + + public final void updateClusterCredentials(Settings settings) { + clusterCredentials = REMOTE_CLUSTER_CREDENTIALS.getAsMap(settings); + logger.debug( + () -> Strings.format( + "Updated remote cluster credentials for clusters: [%s]", + Strings.collectionToCommaDelimitedString(clusterCredentials.keySet()) + ) + ); + } + + @Nullable + public SecureString resolveCredentials(String clusterAlias) { + return clusterCredentials.get(clusterAlias); + } + + public boolean hasCredentials(String clusterAlias) { + return clusterCredentials.containsKey(clusterAlias); + } + + public static final RemoteClusterCredentialsManager EMPTY = new RemoteClusterCredentialsManager(Settings.EMPTY); +} diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterPortSettings.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterPortSettings.java index 814b17bac95ef..fd5c39ec5fb1f 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterPortSettings.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterPortSettings.java @@ -39,7 +39,7 @@ */ public class RemoteClusterPortSettings { - public static final TransportVersion TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY = TransportVersions.V_8_500_059; + public static final TransportVersion TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY = TransportVersions.V_8_500_061; public static final String REMOTE_CLUSTER_PROFILE = "_remote_cluster"; public static final String REMOTE_CLUSTER_PREFIX = "remote_cluster."; diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index c38f4b26c665f..6bfbb95cbcfe9 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -147,15 +147,14 @@ public boolean isRemoteClusterServerEnabled() { private final TransportService transportService; private final Map remoteClusters = ConcurrentCollections.newConcurrentMap(); - private final Set credentialsProtectedRemoteClusters; + private final RemoteClusterCredentialsManager remoteClusterCredentialsManager; RemoteClusterService(Settings settings, TransportService transportService) { super(settings); this.enabled = DiscoveryNode.isRemoteClusterClient(settings); this.remoteClusterServerEnabled = REMOTE_CLUSTER_SERVER_ENABLED.get(settings); this.transportService = transportService; - this.credentialsProtectedRemoteClusters = REMOTE_CLUSTER_CREDENTIALS.getAsMap(settings).keySet(); - + this.remoteClusterCredentialsManager = new RemoteClusterCredentialsManager(settings); if (remoteClusterServerEnabled) { registerRemoteClusterHandshakeRequestHandler(transportService); } @@ -305,6 +304,14 @@ private synchronized void updateSkipUnavailable(String clusterAlias, Boolean ski } } + public void updateRemoteClusterCredentials(Settings settings) { + remoteClusterCredentialsManager.updateClusterCredentials(settings); + } + + public RemoteClusterCredentialsManager getRemoteClusterCredentialsManager() { + return remoteClusterCredentialsManager; + } + @Override protected void updateRemoteCluster(String clusterAlias, Settings settings) { CountDownLatch latch = new CountDownLatch(1); @@ -363,12 +370,7 @@ synchronized void updateRemoteCluster( if (remote == null) { // this is a new cluster we have to add a new representation Settings finalSettings = Settings.builder().put(this.settings, false).put(newSettings, false).build(); - remote = new RemoteClusterConnection( - finalSettings, - clusterAlias, - transportService, - credentialsProtectedRemoteClusters.contains(clusterAlias) - ); + remote = new RemoteClusterConnection(finalSettings, clusterAlias, transportService, remoteClusterCredentialsManager); remoteClusters.put(clusterAlias, remote); remote.ensureConnected(listener.map(ignored -> RemoteClusterConnectionStatus.CONNECTED)); } else if (remote.shouldRebuildConnection(newSettings)) { @@ -380,12 +382,7 @@ synchronized void updateRemoteCluster( } remoteClusters.remove(clusterAlias); Settings finalSettings = Settings.builder().put(this.settings, false).put(newSettings, false).build(); - remote = new RemoteClusterConnection( - finalSettings, - clusterAlias, - transportService, - credentialsProtectedRemoteClusters.contains(clusterAlias) - ); + remote = new RemoteClusterConnection(finalSettings, clusterAlias, transportService, remoteClusterCredentialsManager); remoteClusters.put(clusterAlias, remote); remote.ensureConnected(listener.map(ignored -> RemoteClusterConnectionStatus.RECONNECTED)); } else { diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteConnectionManager.java b/server/src/main/java/org/elasticsearch/transport/RemoteConnectionManager.java index b16734b273376..3b531d54fb033 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteConnectionManager.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteConnectionManager.java @@ -12,6 +12,7 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; @@ -25,18 +26,19 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicLong; -import static org.elasticsearch.transport.RemoteClusterPortSettings.REMOTE_CLUSTER_PROFILE; import static org.elasticsearch.transport.RemoteClusterService.REMOTE_CLUSTER_HANDSHAKE_ACTION_NAME; public class RemoteConnectionManager implements ConnectionManager { private final String clusterAlias; + private final RemoteClusterCredentialsManager credentialsManager; private final ConnectionManager delegate; private final AtomicLong counter = new AtomicLong(); private volatile List connectedNodes = Collections.emptyList(); - RemoteConnectionManager(String clusterAlias, ConnectionManager delegate) { + RemoteConnectionManager(String clusterAlias, RemoteClusterCredentialsManager credentialsManager, ConnectionManager delegate) { this.clusterAlias = clusterAlias; + this.credentialsManager = credentialsManager; this.delegate = delegate; this.delegate.addListener(new TransportConnectionListener() { @Override @@ -51,6 +53,10 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti }); } + public RemoteClusterCredentialsManager getCredentialsManager() { + return credentialsManager; + } + /** * Remote cluster connections have a different lifecycle from intra-cluster connections. Use {@link #connectToRemoteClusterNode} * instead of this method. @@ -95,13 +101,7 @@ public void openConnection(DiscoveryNode node, @Nullable ConnectionProfile profi node, profile, listener.delegateFailureAndWrap( - (l, connection) -> l.onResponse( - new InternalRemoteConnection( - connection, - clusterAlias, - profile != null ? profile.getTransportProfile() : getConnectionProfile().getTransportProfile() - ) - ) + (l, connection) -> l.onResponse(wrapConnectionWithRemoteClusterInfo(connection, clusterAlias, credentialsManager)) ) ); } @@ -182,16 +182,35 @@ public void closeNoBlock() { * @return a cluster alias if the connection target a node in the remote cluster, otherwise an empty result */ public static Optional resolveRemoteClusterAlias(Transport.Connection connection) { + return resolveRemoteClusterAliasWithCredentials(connection).map(RemoteClusterAliasWithCredentials::clusterAlias); + } + + public record RemoteClusterAliasWithCredentials(String clusterAlias, @Nullable SecureString credentials) { + @Override + public String toString() { + return "RemoteClusterAliasWithCredentials{clusterAlias='" + clusterAlias + "', credentials='::es_redacted::'}"; + } + } + + /** + * This method returns information (alias and credentials) for remote cluster for the given transport connection. + * Either or both of alias and credentials can be null depending on the connection. + * + * @param connection the transport connection for which to resolve a remote cluster alias + */ + public static Optional resolveRemoteClusterAliasWithCredentials(Transport.Connection connection) { Transport.Connection unwrapped = TransportService.unwrapConnection(connection); if (unwrapped instanceof InternalRemoteConnection remoteConnection) { - return Optional.of(remoteConnection.getClusterAlias()); + return Optional.of( + new RemoteClusterAliasWithCredentials(remoteConnection.getClusterAlias(), remoteConnection.getClusterCredentials()) + ); } return Optional.empty(); } private Transport.Connection getConnectionInternal(DiscoveryNode node) throws NodeNotConnectedException { Transport.Connection connection = delegate.getConnection(node); - return new InternalRemoteConnection(connection, clusterAlias, getConnectionProfile().getTransportProfile()); + return wrapConnectionWithRemoteClusterInfo(connection, clusterAlias, credentialsManager); } private synchronized void addConnectedNode(DiscoveryNode addedNode) { @@ -297,21 +316,27 @@ private static final class InternalRemoteConnection implements Transport.Connect private static final Logger logger = LogManager.getLogger(InternalRemoteConnection.class); private final Transport.Connection connection; private final String clusterAlias; - private final boolean isRemoteClusterProfile; + @Nullable + private final SecureString clusterCredentials; - InternalRemoteConnection(Transport.Connection connection, String clusterAlias, String transportProfile) { + private InternalRemoteConnection(Transport.Connection connection, String clusterAlias, @Nullable SecureString clusterCredentials) { assert false == connection instanceof InternalRemoteConnection : "should not double wrap"; assert false == connection instanceof ProxyConnection : "proxy connection should wrap internal remote connection, not the other way around"; - this.clusterAlias = Objects.requireNonNull(clusterAlias); this.connection = Objects.requireNonNull(connection); - this.isRemoteClusterProfile = REMOTE_CLUSTER_PROFILE.equals(Objects.requireNonNull(transportProfile)); + this.clusterAlias = Objects.requireNonNull(clusterAlias); + this.clusterCredentials = clusterCredentials; } public String getClusterAlias() { return clusterAlias; } + @Nullable + public SecureString getClusterCredentials() { + return clusterCredentials; + } + @Override public DiscoveryNode getNode() { return connection.getNode(); @@ -321,7 +346,7 @@ public DiscoveryNode getNode() { public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException { final String effectiveAction; - if (isRemoteClusterProfile && TransportService.HANDSHAKE_ACTION_NAME.equals(action)) { + if (clusterCredentials != null && TransportService.HANDSHAKE_ACTION_NAME.equals(action)) { logger.trace("sending remote cluster specific handshake to node [{}] of remote cluster [{}]", getNode(), clusterAlias); effectiveAction = REMOTE_CLUSTER_HANDSHAKE_ACTION_NAME; } else { @@ -389,8 +414,8 @@ public boolean hasReferences() { static InternalRemoteConnection wrapConnectionWithRemoteClusterInfo( Transport.Connection connection, String clusterAlias, - String transportProfile + RemoteClusterCredentialsManager credentialsManager ) { - return new InternalRemoteConnection(connection, clusterAlias, transportProfile); + return new InternalRemoteConnection(connection, clusterAlias, credentialsManager.resolveCredentials(clusterAlias)); } } diff --git a/server/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java b/server/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java index fbc1dbdf6c8fc..f82284a685b87 100644 --- a/server/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java +++ b/server/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java @@ -63,7 +63,7 @@ public Request newRequest(StreamInput in) throws IOException { } public void processMessageReceived(Request request, TransportChannel channel) throws Exception { - final Task task = taskManager.register(channel.getChannelType(), action, request); + final Task task = taskManager.register("transport", action, request); Releasable unregisterTask = () -> taskManager.unregister(task); try { if (channel instanceof TcpTransportChannel tcpTransportChannel && task instanceof CancellableTask cancellableTask) { diff --git a/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java b/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java index 0dcad9cf6864c..0f68a58faf463 100644 --- a/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java +++ b/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java @@ -357,7 +357,11 @@ private ConnectionManager.ConnectionValidator getConnectionValidator(DiscoveryNo : "transport profile must be consistent between the connection manager and the actual profile"; transportService.connectionValidator(node) .validate( - RemoteConnectionManager.wrapConnectionWithRemoteClusterInfo(connection, clusterAlias, profile.getTransportProfile()), + RemoteConnectionManager.wrapConnectionWithRemoteClusterInfo( + connection, + clusterAlias, + connectionManager.getCredentialsManager() + ), profile, listener ); diff --git a/server/src/main/java/org/elasticsearch/transport/TaskTransportChannel.java b/server/src/main/java/org/elasticsearch/transport/TaskTransportChannel.java index 502a9a3541243..bb2257de6a135 100644 --- a/server/src/main/java/org/elasticsearch/transport/TaskTransportChannel.java +++ b/server/src/main/java/org/elasticsearch/transport/TaskTransportChannel.java @@ -31,11 +31,6 @@ public String getProfileName() { return channel.getProfileName(); } - @Override - public String getChannelType() { - return channel.getChannelType(); - } - @Override public void sendResponse(TransportResponse response) throws IOException { try { diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransportChannel.java b/server/src/main/java/org/elasticsearch/transport/TcpTransportChannel.java index 958b246a03844..40cca4f0717c7 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransportChannel.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransportChannel.java @@ -95,11 +95,6 @@ private void release(boolean isExceptionResponse) { } } - @Override - public String getChannelType() { - return "transport"; - } - @Override public TransportVersion getVersion() { return version; diff --git a/server/src/main/java/org/elasticsearch/transport/TransportChannel.java b/server/src/main/java/org/elasticsearch/transport/TransportChannel.java index d99a97ab796fe..f87e74b43a0a3 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportChannel.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportChannel.java @@ -19,8 +19,6 @@ public interface TransportChannel { String getProfileName(); - String getChannelType(); - void sendResponse(TransportResponse response) throws IOException; void sendResponse(Exception exception) throws IOException; diff --git a/server/src/main/java/org/elasticsearch/transport/TransportLogger.java b/server/src/main/java/org/elasticsearch/transport/TransportLogger.java index be9e0070d05ba..485fd85e7ab7c 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportLogger.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportLogger.java @@ -13,7 +13,6 @@ import org.elasticsearch.TransportVersions; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressorFactory; -import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.IOUtils; @@ -159,7 +158,7 @@ private static String format(TcpChannel channel, InboundMessage message, String private static StreamInput decompressingStream(byte status, StreamInput streamInput) throws IOException { if (TransportStatus.isCompress(status) && streamInput.available() > 0) { try { - return new InputStreamStreamInput(CompressorFactory.COMPRESSOR.threadLocalInputStream(streamInput)); + return CompressorFactory.COMPRESSOR.threadLocalStreamInput(streamInput); } catch (IllegalArgumentException e) { throw new IllegalStateException("stream marked as compressed, but is missing deflate header"); } diff --git a/server/src/main/java/org/elasticsearch/transport/TransportService.java b/server/src/main/java/org/elasticsearch/transport/TransportService.java index 5ce44c74a7a69..3c1907c2115e8 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportService.java @@ -1456,6 +1456,10 @@ TransportResponseHandler unwrap() { } } + public static boolean isDirectResponseChannel(TransportChannel transportChannel) { + return transportChannel instanceof DirectResponseChannel; + } + static class DirectResponseChannel implements TransportChannel { final DiscoveryNode localNode; private final String action; @@ -1574,11 +1578,6 @@ protected void processException(final TransportResponseHandler handler, final } } - @Override - public String getChannelType() { - return "direct"; - } - @Override public String toString() { return Strings.format("DirectResponseChannel{req=%d}{%s}", requestId, action); diff --git a/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrationTaskState.java b/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrationTaskState.java index 39c60e1224cf9..51e97cc193ec3 100644 --- a/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrationTaskState.java +++ b/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrationTaskState.java @@ -62,7 +62,7 @@ public SystemIndexMigrationTaskState(String currentIndex, String currentFeature, public SystemIndexMigrationTaskState(StreamInput in) throws IOException { this.currentIndex = in.readString(); this.currentFeature = in.readString(); - this.featureCallbackMetadata = in.readMap(); + this.featureCallbackMetadata = in.readGenericMap(); } /** diff --git a/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat b/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat index 119a708832948..bdb1b75be4843 100644 --- a/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat +++ b/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat @@ -1,2 +1,3 @@ org.elasticsearch.index.codec.bloomfilter.ES85BloomFilterPostingsFormat org.elasticsearch.index.codec.bloomfilter.ES87BloomFilterPostingsFormat +org.elasticsearch.index.codec.postings.ES812PostingsFormat diff --git a/server/src/test/java/org/elasticsearch/action/RequestValidatorsTests.java b/server/src/test/java/org/elasticsearch/action/RequestValidatorsTests.java index 8db6f8b1186bb..45843b553069f 100644 --- a/server/src/test/java/org/elasticsearch/action/RequestValidatorsTests.java +++ b/server/src/test/java/org/elasticsearch/action/RequestValidatorsTests.java @@ -11,13 +11,17 @@ import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.common.Randomness; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.hamcrest.OptionalMatchers; -import org.hamcrest.Matchers; import java.util.ArrayList; import java.util.List; import java.util.Optional; +import static org.elasticsearch.test.LambdaMatchers.transformedMatch; +import static org.elasticsearch.test.hamcrest.OptionalMatchers.isEmpty; +import static org.elasticsearch.test.hamcrest.OptionalMatchers.isPresent; +import static org.elasticsearch.test.hamcrest.OptionalMatchers.isPresentWith; +import static org.hamcrest.Matchers.arrayWithSize; + public class RequestValidatorsTests extends ESTestCase { private final RequestValidators.RequestValidator EMPTY = (request, state, indices) -> Optional.empty(); @@ -32,17 +36,17 @@ public void testValidates() { validators.add(EMPTY); } final RequestValidators requestValidators = new RequestValidators<>(validators); - assertThat(requestValidators.validateRequest(null, null, null), OptionalMatchers.isEmpty()); + assertThat(requestValidators.validateRequest(null, null, null), isEmpty()); } public void testFailure() { final RequestValidators validators = new RequestValidators<>(List.of(FAIL)); - assertThat(validators.validateRequest(null, null, null), OptionalMatchers.isPresent()); + assertThat(validators.validateRequest(null, null, null), isPresent()); } public void testValidatesAfterFailure() { final RequestValidators validators = new RequestValidators<>(List.of(FAIL, EMPTY)); - assertThat(validators.validateRequest(null, null, null), OptionalMatchers.isPresent()); + assertThat(validators.validateRequest(null, null, null), isPresent()); } public void testMultipleFailures() { @@ -53,9 +57,7 @@ public void testMultipleFailures() { } final RequestValidators requestValidators = new RequestValidators<>(validators); final Optional e = requestValidators.validateRequest(null, null, null); - assertThat(e, OptionalMatchers.isPresent()); - // noinspection OptionalGetWithoutIsPresent - assertThat(e.get().getSuppressed(), Matchers.arrayWithSize(numberOfFailures - 1)); + assertThat(e, isPresentWith(transformedMatch(Exception::getSuppressed, arrayWithSize(numberOfFailures - 1)))); } public void testRandom() { @@ -74,11 +76,9 @@ public void testRandom() { final RequestValidators requestValidators = new RequestValidators<>(validators); final Optional e = requestValidators.validateRequest(null, null, null); if (numberOfFailures == 0) { - assertThat(e, OptionalMatchers.isEmpty()); + assertThat(e, isEmpty()); } else { - assertThat(e, OptionalMatchers.isPresent()); - // noinspection OptionalGetWithoutIsPresent - assertThat(e.get().getSuppressed(), Matchers.arrayWithSize(numberOfFailures - 1)); + assertThat(e, isPresentWith(transformedMatch(Exception::getSuppressed, arrayWithSize(numberOfFailures - 1)))); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/DesiredBalanceResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/DesiredBalanceResponseTests.java index 7b452beac0938..a063c590a8c07 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/DesiredBalanceResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/DesiredBalanceResponseTests.java @@ -179,9 +179,14 @@ public void testToXContent() throws IOException { randomClusterInfo() ); - Map json = createParser( - ChunkedToXContent.wrapAsToXContent(response).toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS) - ).map(); + Map json; + try ( + var parser = createParser( + ChunkedToXContent.wrapAsToXContent(response).toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS) + ) + ) { + json = parser.map(); + } assertThat(json.keySet(), containsInAnyOrder("stats", "cluster_balance_stats", "routing_table", "cluster_info")); // stats diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceActionTests.java index e702446406238..6eb3310623b92 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceActionTests.java @@ -78,7 +78,7 @@ public void testReturnsErrorIfAllocatorIsNotDesiredBalanced() throws Exception { mock(ShardsAllocator.class) ).masterOperation(mock(Task.class), new DesiredBalanceRequest(), ClusterState.EMPTY_STATE, listener); - var exception = expectThrows(ResourceNotFoundException.class, listener::actionGet); + var exception = expectThrows(ResourceNotFoundException.class, listener); assertThat(exception.getMessage(), equalTo("Desired balance allocator is not in use, no desired balance found")); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/hotthreads/NodesHotThreadsResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/hotthreads/NodesHotThreadsResponseTests.java index b6c7b591cbaa5..33676489b04d5 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/hotthreads/NodesHotThreadsResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/hotthreads/NodesHotThreadsResponseTests.java @@ -13,6 +13,8 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.test.ESTestCase; import java.util.List; @@ -23,19 +25,33 @@ public class NodesHotThreadsResponseTests extends ESTestCase { public void testGetTextChunks() { final var node0 = DiscoveryNodeUtils.create("node-0"); final var node1 = DiscoveryNodeUtils.create("node-1"); - assertEquals(Strings.format(""" - ::: %s - node 0 line 1 - node 0 line 2 - - ::: %s - node 1 line 1 - node 1 line 2 - - """, node0, node1), getTextBodyContent(new NodesHotThreadsResponse(ClusterName.DEFAULT, List.of(new NodeHotThreads(node0, """ - node 0 line 1 - node 0 line 2"""), new NodeHotThreads(node1, """ - node 1 line 1 - node 1 line 2""")), List.of()).getTextChunks())); + final var response = new NodesHotThreadsResponse( + ClusterName.DEFAULT, + List.of( + + new NodeHotThreads(node0, ReleasableBytesReference.wrap(new BytesArray(""" + node 0 line 1 + node 0 line 2"""))), + + new NodeHotThreads(node1, ReleasableBytesReference.wrap(new BytesArray(""" + node 1 line 1 + node 1 line 2"""))) + ), + List.of() + ); + try { + assertEquals(Strings.format(""" + ::: %s + node 0 line 1 + node 0 line 2 + + ::: %s + node 1 line 1 + node 1 line 2 + + """, node0, node1), getTextBodyContent(response.getTextChunks())); + } finally { + response.decRef(); + } } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java index 8334c98e5fca0..86ccd9807cf9f 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java @@ -697,7 +697,7 @@ protected void taskOperation( cancellationFuture.actionGet(); logger.info("Parent task is now cancelled counting down task latch"); taskLatch.countDown(); - expectThrows(TaskCancelledException.class, taskFuture::actionGet); + expectThrows(TaskCancelledException.class, taskFuture); // Release all node tasks and wait for response checkLatch.countDown(); @@ -775,7 +775,7 @@ protected void taskOperation( reachabilityChecker.ensureUnreachable(); } - expectThrows(TaskCancelledException.class, taskFuture::actionGet); + expectThrows(TaskCancelledException.class, taskFuture); blockedActionLatch.countDown(); NodesResponse responses = future.get(10, TimeUnit.SECONDS); @@ -848,7 +848,7 @@ protected void taskOperation( reachabilityChecker.ensureUnreachable(); } - expectThrows(TaskCancelledException.class, taskFuture::actionGet); + expectThrows(TaskCancelledException.class, taskFuture); } public void testTaskLevelActionFailures() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestTests.java index a1d2ef33d85f3..774093834e941 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestTests.java @@ -61,10 +61,11 @@ private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws () -> randomAlphaOfLengthBetween(3, 10) ) ); - XContentParseException iae = expectThrows( - XContentParseException.class, - () -> ClusterUpdateSettingsRequest.fromXContent(createParser(xContentType.xContent(), mutated)) - ); + XContentParseException iae = expectThrows(XContentParseException.class, () -> { + try (var parser = createParser(xContentType.xContent(), mutated)) { + ClusterUpdateSettingsRequest.fromXContent(parser); + } + }); assertThat(iae.getMessage(), containsString("[cluster_update_settings_request] unknown field [" + unsupportedField + "]")); } else { try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) { diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestTests.java index 2f151e516cde4..97a5775f7c69f 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestTests.java @@ -99,15 +99,18 @@ public void testToXContent() throws IOException { } XContentBuilder builder = original.toXContent(XContentFactory.jsonBuilder(), new MapParams(Collections.emptyMap())); - XContentParser parser = XContentType.JSON.xContent() - .createParser(NamedXContentRegistry.EMPTY, null, BytesReference.bytes(builder).streamInput()); - Map map = parser.mapOrdered(); - CreateSnapshotRequest processed = new CreateSnapshotRequest((String) map.get("repository"), (String) map.get("snapshot")); - processed.waitForCompletion(original.waitForCompletion()); - processed.masterNodeTimeout(original.masterNodeTimeout()); - processed.source(map); - - assertEquals(original, processed); + try ( + XContentParser parser = XContentType.JSON.xContent() + .createParser(NamedXContentRegistry.EMPTY, null, BytesReference.bytes(builder).streamInput()) + ) { + Map map = parser.mapOrdered(); + CreateSnapshotRequest processed = new CreateSnapshotRequest((String) map.get("repository"), (String) map.get("snapshot")); + processed.waitForCompletion(original.waitForCompletion()); + processed.masterNodeTimeout(original.masterNodeTimeout()); + processed.source(map); + + assertEquals(original, processed); + } } public void testSizeCheck() { diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestTests.java index 922e7e03c7600..56216d2670150 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestTests.java @@ -130,9 +130,13 @@ public void testSource() throws IOException { original.snapshotUuid(null); // cannot be set via the REST API original.quiet(false); // cannot be set via the REST API XContentBuilder builder = original.toXContent(XContentFactory.jsonBuilder(), new ToXContent.MapParams(Collections.emptyMap())); - XContentParser parser = XContentType.JSON.xContent() - .createParser(NamedXContentRegistry.EMPTY, null, BytesReference.bytes(builder).streamInput()); - Map map = parser.mapOrdered(); + Map map; + try ( + XContentParser parser = XContentType.JSON.xContent() + .createParser(NamedXContentRegistry.EMPTY, null, BytesReference.bytes(builder).streamInput()) + ) { + map = parser.mapOrdered(); + } // we will only restore properties from the map that are contained in the request body. All other // properties are restored from the original (in the actual REST action this is restored from the @@ -174,8 +178,11 @@ public void testToStringWillIncludeSkipOperatorOnlyState() { private Map convertRequestToMap(RestoreSnapshotRequest request) throws IOException { XContentBuilder builder = request.toXContent(XContentFactory.jsonBuilder(), new ToXContent.MapParams(Collections.emptyMap())); - XContentParser parser = XContentType.JSON.xContent() - .createParser(NamedXContentRegistry.EMPTY, null, BytesReference.bytes(builder).streamInput()); - return parser.mapOrdered(); + try ( + XContentParser parser = XContentType.JSON.xContent() + .createParser(NamedXContentRegistry.EMPTY, null, BytesReference.bytes(builder).streamInput()) + ) { + return parser.mapOrdered(); + } } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/SearchUsageStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/SearchUsageStatsTests.java index a5704748ea242..a5bf7b39669e7 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/SearchUsageStatsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/SearchUsageStatsTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.action.admin.cluster.stats; -import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.TransportVersion; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.Writeable.Reader; @@ -20,7 +19,6 @@ import java.util.List; import java.util.Map; -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/102920") // failing test is final, mute whole suite public class SearchUsageStatsTests extends AbstractWireSerializingTestCase { private static final List QUERY_TYPES = List.of( @@ -108,7 +106,7 @@ protected SearchUsageStats mutateInstance(SearchUsageStats instance) { case 2 -> new SearchUsageStats( instance.getQueryUsage(), instance.getRescorerUsage(), - randomValueOtherThan(instance.getRescorerUsage(), () -> randomSectionsUsage(randomIntBetween(0, SECTIONS.size()))), + randomValueOtherThan(instance.getSectionsUsage(), () -> randomSectionsUsage(randomIntBetween(0, SECTIONS.size()))), instance.getTotalSearchCount() ); default -> new SearchUsageStats( diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerTests.java index 6c79946cce15f..d6cf90034f5b5 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerTests.java @@ -14,7 +14,6 @@ import org.apache.lucene.codecs.lucene90.Lucene90DocValuesFormat; import org.apache.lucene.codecs.lucene99.Lucene99Codec; import org.apache.lucene.codecs.lucene99.Lucene99HnswVectorsFormat; -import org.apache.lucene.codecs.lucene99.Lucene99PostingsFormat; import org.apache.lucene.codecs.perfield.PerFieldDocValuesFormat; import org.apache.lucene.codecs.perfield.PerFieldKnnVectorsFormat; import org.apache.lucene.codecs.perfield.PerFieldPostingsFormat; @@ -67,6 +66,7 @@ import org.apache.lucene.util.FixedBitSet; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.index.codec.postings.ES812PostingsFormat; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.LuceneFilesExtensions; import org.elasticsearch.test.ESTestCase; @@ -642,7 +642,7 @@ static void rewriteIndexWithPerFieldCodec(Directory source, CodecMode mode, Dire .setCodec(new Lucene99Codec(mode.mode()) { @Override public PostingsFormat getPostingsFormatForField(String field) { - return new Lucene99PostingsFormat(); + return new ES812PostingsFormat(); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestTests.java index 8b700ecb9fc01..b34045b50654c 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestTests.java @@ -66,7 +66,9 @@ public void testConditionsParsing() throws Exception { .field("min_primary_shard_docs", 10) .endObject() .endObject(); - request.fromXContent(false, createParser(builder)); + try (var parser = createParser(builder)) { + request.fromXContent(false, parser); + } Map> conditions = request.getConditions().getConditions(); assertThat(conditions.size(), equalTo(10)); MaxAgeCondition maxAgeCondition = (MaxAgeCondition) conditions.get(MaxAgeCondition.NAME); @@ -118,7 +120,9 @@ public void testParsingWithIndexSettings() throws Exception { .endObject() .endObject() .endObject(); - request.fromXContent(false, createParser(builder)); + try (var parser = createParser(builder)) { + request.fromXContent(false, parser); + } Map> conditions = request.getConditions().getConditions(); assertThat(conditions.size(), equalTo(3)); assertThat(request.getCreateIndexRequest().mappings(), containsString("not_analyzed")); @@ -139,8 +143,9 @@ public void testTypelessMappingParsing() throws Exception { .endObject() .endObject(); - request.fromXContent(false, createParser(builder)); - + try (var parser = createParser(builder)) { + request.fromXContent(false, parser); + } CreateIndexRequest createIndexRequest = request.getCreateIndexRequest(); String mapping = createIndexRequest.mappings(); assertNotNull(mapping); @@ -169,6 +174,7 @@ public void testSerialize() throws Exception { .addMinPrimaryShardDocsCondition(randomNonNegativeLong()) .build() ); + originalRequest.lazy(randomBoolean()); try (BytesStreamOutput out = new BytesStreamOutput()) { originalRequest.writeTo(out); BytesReference bytes = out.bytes(); @@ -176,6 +182,7 @@ public void testSerialize() throws Exception { RolloverRequest cloneRequest = new RolloverRequest(in); assertThat(cloneRequest.getNewIndexName(), equalTo(originalRequest.getNewIndexName())); assertThat(cloneRequest.getRolloverTarget(), equalTo(originalRequest.getRolloverTarget())); + assertThat(cloneRequest.isLazy(), equalTo(originalRequest.isLazy())); for (Map.Entry> entry : cloneRequest.getConditions().getConditions().entrySet()) { Condition condition = originalRequest.getConditions().getConditions().get(entry.getKey()); // here we compare the string representation as there is some information loss when serializing @@ -198,7 +205,11 @@ public void testUnknownFields() throws IOException { } builder.endObject(); BytesReference mutated = XContentTestUtils.insertRandomFields(xContentType, BytesReference.bytes(builder), null, random()); - expectThrows(XContentParseException.class, () -> request.fromXContent(false, createParser(xContentType.xContent(), mutated))); + expectThrows(XContentParseException.class, () -> { + try (var parser = createParser(xContentType.xContent(), mutated)) { + request.fromXContent(false, parser); + } + }); } public void testValidation() { diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponseTests.java index 55150b7800482..dc382c8b2a3be 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponseTests.java @@ -33,7 +33,8 @@ protected RolloverResponse createTestInstance() { randomBoolean(), randomBoolean(), acknowledged, - shardsAcknowledged + shardsAcknowledged, + randomBoolean() ); } @@ -69,100 +70,46 @@ protected Writeable.Reader instanceReader() { @Override protected RolloverResponse mutateInstance(RolloverResponse response) { - int i = randomIntBetween(0, 6); + var oldIndex = response.getOldIndex(); + var newIndex = response.getNewIndex(); + var conditionStatus = response.getConditionStatus(); + var dryRun = response.isDryRun(); + var rolledOver = response.isRolledOver(); + var acknowledged = response.isAcknowledged(); + var shardsAcknowledged = response.isShardsAcknowledged(); + var lazy = response.isLazy(); + int i = randomIntBetween(0, 7); switch (i) { - case 0: - return new RolloverResponse( - response.getOldIndex() + randomAlphaOfLengthBetween(2, 5), - response.getNewIndex(), - response.getConditionStatus(), - response.isDryRun(), - response.isRolledOver(), - response.isAcknowledged(), - response.isShardsAcknowledged() - ); - case 1: - return new RolloverResponse( - response.getOldIndex(), - response.getNewIndex() + randomAlphaOfLengthBetween(2, 5), - response.getConditionStatus(), - response.isDryRun(), - response.isRolledOver(), - response.isAcknowledged(), - response.isShardsAcknowledged() - ); - case 2: - Map results; + case 0 -> oldIndex = oldIndex + randomAlphaOfLengthBetween(2, 5); + case 1 -> newIndex = newIndex + randomAlphaOfLengthBetween(2, 5); + case 2 -> { if (response.getConditionStatus().isEmpty()) { - results = randomResults(false); + conditionStatus = randomResults(false); } else { - results = Maps.newMapWithExpectedSize(response.getConditionStatus().size()); + conditionStatus = Maps.newMapWithExpectedSize(response.getConditionStatus().size()); List keys = randomSubsetOf( randomIntBetween(1, response.getConditionStatus().size()), response.getConditionStatus().keySet() ); for (Map.Entry entry : response.getConditionStatus().entrySet()) { boolean value = keys.contains(entry.getKey()) ? entry.getValue() == false : entry.getValue(); - results.put(entry.getKey(), value); + conditionStatus.put(entry.getKey(), value); } } - return new RolloverResponse( - response.getOldIndex(), - response.getNewIndex(), - results, - response.isDryRun(), - response.isRolledOver(), - response.isAcknowledged(), - response.isShardsAcknowledged() - ); - case 3: - return new RolloverResponse( - response.getOldIndex(), - response.getNewIndex(), - response.getConditionStatus(), - response.isDryRun() == false, - response.isRolledOver(), - response.isAcknowledged(), - response.isShardsAcknowledged() - ); - case 4: - return new RolloverResponse( - response.getOldIndex(), - response.getNewIndex(), - response.getConditionStatus(), - response.isDryRun(), - response.isRolledOver() == false, - response.isAcknowledged(), - response.isShardsAcknowledged() - ); - case 5: { - boolean acknowledged = response.isAcknowledged() == false; - boolean shardsAcknowledged = acknowledged && response.isShardsAcknowledged(); - return new RolloverResponse( - response.getOldIndex(), - response.getNewIndex(), - response.getConditionStatus(), - response.isDryRun(), - response.isRolledOver(), - acknowledged, - shardsAcknowledged - ); } - case 6: { - boolean shardsAcknowledged = response.isShardsAcknowledged() == false; - boolean acknowledged = shardsAcknowledged || response.isAcknowledged(); - return new RolloverResponse( - response.getOldIndex(), - response.getNewIndex(), - response.getConditionStatus(), - response.isDryRun(), - response.isRolledOver(), - acknowledged, - shardsAcknowledged - ); + case 3 -> dryRun = dryRun == false; + case 4 -> rolledOver = rolledOver == false; + case 5 -> { + acknowledged = response.isAcknowledged() == false; + shardsAcknowledged = acknowledged && response.isShardsAcknowledged(); } - default: - throw new UnsupportedOperationException(); + case 6 -> { + shardsAcknowledged = response.isShardsAcknowledged() == false; + acknowledged = shardsAcknowledged || response.isAcknowledged(); + } + case 7 -> lazy = lazy == false; + default -> throw new UnsupportedOperationException(); } + return new RolloverResponse(oldIndex, newIndex, conditionStatus, dryRun, rolledOver, acknowledged, shardsAcknowledged, lazy); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java index e10fd960a7554..950d1a9f22f08 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java @@ -18,14 +18,17 @@ import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.AliasMetadata; +import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; +import org.elasticsearch.cluster.metadata.MetadataDataStreamsService; import org.elasticsearch.cluster.metadata.MetadataIndexAliasesService; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RecoverySource; @@ -38,6 +41,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.cache.query.QueryCacheStats; import org.elasticsearch.index.cache.request.RequestCacheStats; @@ -61,6 +65,8 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.hamcrest.Matchers; +import org.junit.Before; import org.mockito.ArgumentCaptor; import java.nio.file.Path; @@ -74,6 +80,7 @@ import static org.elasticsearch.action.admin.indices.rollover.TransportRolloverAction.buildStats; import static org.elasticsearch.action.admin.indices.rollover.TransportRolloverAction.evaluateConditions; import static org.elasticsearch.test.ActionListenerUtils.anyActionListener; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.mockito.ArgumentMatchers.any; @@ -86,6 +93,30 @@ public class TransportRolloverActionTests extends ESTestCase { + final ClusterService mockClusterService = mock(ClusterService.class); + final DiscoveryNode mockNode = mock(DiscoveryNode.class); + final ThreadPool mockThreadPool = mock(ThreadPool.class); + final MetadataCreateIndexService mockCreateIndexService = mock(MetadataCreateIndexService.class); + final IndexNameExpressionResolver mockIndexNameExpressionResolver = mock(IndexNameExpressionResolver.class); + final ActionFilters mockActionFilters = mock(ActionFilters.class); + final MetadataIndexAliasesService mdIndexAliasesService = mock(MetadataIndexAliasesService.class); + final MetadataDataStreamsService mockMetadataDataStreamService = mock(MetadataDataStreamsService.class); + final Client mockClient = mock(Client.class); + final AllocationService mockAllocationService = mock(AllocationService.class); + final MetadataRolloverService rolloverService = new MetadataRolloverService( + mockThreadPool, + mockCreateIndexService, + mdIndexAliasesService, + EmptySystemIndices.INSTANCE, + WriteLoadForecaster.DEFAULT + ); + + @Before + public void setUpMocks() { + when(mockNode.getId()).thenReturn("mocknode"); + when(mockClusterService.localNode()).thenReturn(mockNode); + } + public void testDocStatsSelectionFromPrimariesOnly() { long docsInPrimaryShards = 100; long docsInShards = 200; @@ -300,19 +331,6 @@ public void testEvaluateWithoutMetadata() { } public void testConditionEvaluationWhenAliasToWriteAndReadIndicesConsidersOnlyPrimariesFromWriteIndex() throws Exception { - final ClusterService mockClusterService = mock(ClusterService.class); - final DiscoveryNode mockNode = mock(DiscoveryNode.class); - when(mockNode.getId()).thenReturn("mocknode"); - when(mockClusterService.localNode()).thenReturn(mockNode); - final ThreadPool mockThreadPool = mock(ThreadPool.class); - final MetadataCreateIndexService mockCreateIndexService = mock(MetadataCreateIndexService.class); - final IndexNameExpressionResolver mockIndexNameExpressionResolver = mock(IndexNameExpressionResolver.class); - final ActionFilters mockActionFilters = mock(ActionFilters.class); - final MetadataIndexAliasesService mdIndexAliasesService = mock(MetadataIndexAliasesService.class); - - final Client mockClient = mock(Client.class); - final AllocationService mockAllocationService = mock(AllocationService.class); - final Map indexStats = new HashMap<>(); int total = randomIntBetween(500, 1000); indexStats.put("logs-index-000001", createIndexStats(200L, total)); @@ -346,13 +364,6 @@ public void testConditionEvaluationWhenAliasToWriteAndReadIndicesConsidersOnlyPr when(mockCreateIndexService.applyCreateIndexRequest(any(), any(), anyBoolean(), any())).thenReturn(stateBefore); when(mdIndexAliasesService.applyAliasActions(any(), any())).thenReturn(stateBefore); - MetadataRolloverService rolloverService = new MetadataRolloverService( - mockThreadPool, - mockCreateIndexService, - mdIndexAliasesService, - EmptySystemIndices.INSTANCE, - WriteLoadForecaster.DEFAULT - ); final TransportRolloverAction transportRolloverAction = new TransportRolloverAction( mock(TransportService.class), @@ -362,7 +373,8 @@ public void testConditionEvaluationWhenAliasToWriteAndReadIndicesConsidersOnlyPr mockIndexNameExpressionResolver, rolloverService, mockClient, - mockAllocationService + mockAllocationService, + mockMetadataDataStreamService ); // For given alias, verify that condition evaluation fails when the condition doc count is greater than the primaries doc count @@ -398,6 +410,136 @@ public void testConditionEvaluationWhenAliasToWriteAndReadIndicesConsidersOnlyPr assertThat(response.getConditionStatus().get("[max_docs: 300]"), is(true)); } + public void testLazyRollover() throws Exception { + final IndexMetadata backingIndexMetadata = IndexMetadata.builder(".ds-logs-ds-000001") + .settings(settings(IndexVersion.current())) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + final DataStream dataStream = new DataStream( + "logs-ds", + List.of(backingIndexMetadata.getIndex()), + 1, + Map.of(), + false, + false, + false, + false, + IndexMode.STANDARD + ); + final ClusterState stateBefore = ClusterState.builder(ClusterName.DEFAULT) + .metadata(Metadata.builder().put(backingIndexMetadata, false).put(dataStream)) + .build(); + + doAnswer(invocation -> { + Object[] args = invocation.getArguments(); + assert args.length == 5; + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) args[4]; + listener.onResponse(AcknowledgedResponse.TRUE); + return null; + }).when(mockMetadataDataStreamService).setRolloverOnWrite(eq(dataStream.getName()), eq(true), any(), any(), anyActionListener()); + + final TransportRolloverAction transportRolloverAction = new TransportRolloverAction( + mock(TransportService.class), + mockClusterService, + mockThreadPool, + mockActionFilters, + mockIndexNameExpressionResolver, + rolloverService, + mockClient, + mockAllocationService, + mockMetadataDataStreamService + ); + final PlainActionFuture future = new PlainActionFuture<>(); + RolloverRequest rolloverRequest = new RolloverRequest("logs-ds", null); + rolloverRequest.lazy(true); + transportRolloverAction.masterOperation(mock(CancellableTask.class), rolloverRequest, stateBefore, future); + RolloverResponse rolloverResponse = future.actionGet(); + assertThat(rolloverResponse.getOldIndex(), equalTo(".ds-logs-ds-000001")); + assertThat(rolloverResponse.getNewIndex(), Matchers.startsWith(".ds-logs-ds-")); + assertThat(rolloverResponse.getNewIndex(), Matchers.endsWith("-000002")); + assertThat(rolloverResponse.isLazy(), equalTo(true)); + assertThat(rolloverResponse.isDryRun(), equalTo(false)); + assertThat(rolloverResponse.isRolledOver(), equalTo(false)); + assertThat(rolloverResponse.getConditionStatus().size(), equalTo(0)); + assertThat(rolloverResponse.isAcknowledged(), is(true)); + } + + public void testLazyRolloverFails() throws Exception { + final IndexMetadata.Builder indexMetadata = IndexMetadata.builder("logs-index-000001") + .putAlias(AliasMetadata.builder("logs-alias").writeIndex(true).build()) + .settings(settings(IndexVersion.current())) + .numberOfShards(1) + .numberOfReplicas(1); + final IndexMetadata backingIndexMetadata = IndexMetadata.builder(".ds-logs-ds-000001") + .settings(settings(IndexVersion.current())) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + final DataStream dataStream = new DataStream( + "logs-ds", + List.of(backingIndexMetadata.getIndex()), + randomIntBetween(1, 10), + Map.of(), + false, + false, + false, + false, + IndexMode.STANDARD + ); + final ClusterState stateBefore = ClusterState.builder(ClusterName.DEFAULT) + .metadata(Metadata.builder().put(indexMetadata).put(backingIndexMetadata, false).put(dataStream)) + .build(); + + final TransportRolloverAction transportRolloverAction = new TransportRolloverAction( + mock(TransportService.class), + mockClusterService, + mockThreadPool, + mockActionFilters, + mockIndexNameExpressionResolver, + rolloverService, + mockClient, + mockAllocationService, + mockMetadataDataStreamService + ); + + // Lazy rollover fails on a concrete index + { + final PlainActionFuture future = new PlainActionFuture<>(); + RolloverRequest rolloverRequest = new RolloverRequest("logs-alias", null); + rolloverRequest.lazy(true); + transportRolloverAction.masterOperation(mock(CancellableTask.class), rolloverRequest, stateBefore, future); + IllegalArgumentException illegalArgumentException = expectThrows(IllegalArgumentException.class, future::actionGet); + assertThat(illegalArgumentException.getMessage(), containsString("Lazy rollover can be applied only on a data stream.")); + } + + // Lazy rollover fails when used with conditions + { + final PlainActionFuture future = new PlainActionFuture<>(); + RolloverRequest rolloverRequest = new RolloverRequest("logs-ds", null); + rolloverRequest.setConditions(RolloverConditions.newBuilder().addMaxIndexAgeCondition(TimeValue.timeValueDays(1)).build()); + rolloverRequest.lazy(true); + transportRolloverAction.masterOperation(mock(CancellableTask.class), rolloverRequest, stateBefore, future); + IllegalArgumentException illegalArgumentException = expectThrows(IllegalArgumentException.class, future::actionGet); + assertThat(illegalArgumentException.getMessage(), containsString("Lazy rollover can be used only without any conditions.")); + } + + // Lazy rollover fails on concrete index with conditions + { + final PlainActionFuture future = new PlainActionFuture<>(); + RolloverRequest rolloverRequest = new RolloverRequest("logs-alias", null); + rolloverRequest.setConditions(RolloverConditions.newBuilder().addMaxIndexAgeCondition(TimeValue.timeValueDays(1)).build()); + rolloverRequest.lazy(true); + transportRolloverAction.masterOperation(mock(CancellableTask.class), rolloverRequest, stateBefore, future); + IllegalArgumentException illegalArgumentException = expectThrows(IllegalArgumentException.class, future::actionGet); + assertThat( + illegalArgumentException.getMessage(), + containsString("Lazy rollover can be applied only on a data stream with no conditions.") + ); + } + } + private IndicesStatsResponse createIndicesStatResponse(String indexName, long totalDocs, long primariesDocs) { final CommonStats primaryStats = mock(CommonStats.class); when(primaryStats.getDocs()).thenReturn(new DocsStats(primariesDocs, 0, between(1, 10000))); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresActionTests.java index dfafcd0662290..ffe42722b308d 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresActionTests.java @@ -63,13 +63,12 @@ void runTest() { request.shardStatuses("green", "red"); // newly-created shards are in yellow health so this matches none of them final var future = new PlainActionFuture(); action.execute( - new CancellableTask(1, "transport", IndicesShardStoresAction.NAME, "", TaskId.EMPTY_TASK_ID, Map.of()), + new CancellableTask(1, "transport", TransportIndicesShardStoresAction.TYPE.name(), "", TaskId.EMPTY_TASK_ID, Map.of()), request, future ); - assertTrue(future.isDone()); - final var response = future.actionGet(0L); + final var response = future.result(); assertThat(response.getFailures(), empty()); assertThat(response.getStoreStatuses(), anEmptyMap()); assertThat(shardsWithFailures, empty()); @@ -86,7 +85,7 @@ void runTest() { request.shardStatuses(randomFrom("yellow", "all")); // newly-created shards are in yellow health so this matches all of them final var future = new PlainActionFuture(); action.execute( - new CancellableTask(1, "transport", IndicesShardStoresAction.NAME, "", TaskId.EMPTY_TASK_ID, Map.of()), + new CancellableTask(1, "transport", TransportIndicesShardStoresAction.TYPE.name(), "", TaskId.EMPTY_TASK_ID, Map.of()), request, future ); @@ -123,7 +122,14 @@ public void testCancellation() { runTest(new TestHarness() { @Override void runTest() { - final var task = new CancellableTask(1, "transport", IndicesShardStoresAction.NAME, "", TaskId.EMPTY_TASK_ID, Map.of()); + final var task = new CancellableTask( + 1, + "transport", + TransportIndicesShardStoresAction.TYPE.name(), + "", + TaskId.EMPTY_TASK_ID, + Map.of() + ); final var request = new IndicesShardStoresRequest(); request.shardStatuses(randomFrom("yellow", "all")); final var future = new PlainActionFuture(); @@ -132,8 +138,7 @@ void runTest() { listExpected = false; assertFalse(future.isDone()); deterministicTaskQueue.runAllTasks(); - assertTrue(future.isDone()); - expectThrows(TaskCancelledException.class, () -> future.actionGet(0L)); + expectThrows(TaskCancelledException.class, future::result); } }); } @@ -146,16 +151,15 @@ void runTest() { request.shardStatuses(randomFrom("yellow", "all")); final var future = new PlainActionFuture(); action.execute( - new CancellableTask(1, "transport", IndicesShardStoresAction.NAME, "", TaskId.EMPTY_TASK_ID, Map.of()), + new CancellableTask(1, "transport", TransportIndicesShardStoresAction.TYPE.name(), "", TaskId.EMPTY_TASK_ID, Map.of()), request, future ); assertFalse(future.isDone()); failOneRequest = true; deterministicTaskQueue.runAllTasks(); - assertTrue(future.isDone()); assertFalse(failOneRequest); - assertEquals("simulated", expectThrows(ElasticsearchException.class, () -> future.actionGet(0L)).getMessage()); + assertEquals("simulated", expectThrows(ElasticsearchException.class, future::result).getMessage()); } }); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/template/delete/DeleteComposableIndexTemplateRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/template/delete/DeleteComposableIndexTemplateRequestTests.java index b06e7824ce5de..3800a640ebd17 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/template/delete/DeleteComposableIndexTemplateRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/template/delete/DeleteComposableIndexTemplateRequestTests.java @@ -12,19 +12,21 @@ import org.elasticsearch.test.AbstractWireSerializingTestCase; public class DeleteComposableIndexTemplateRequestTests extends AbstractWireSerializingTestCase< - DeleteComposableIndexTemplateAction.Request> { + TransportDeleteComposableIndexTemplateAction.Request> { @Override - protected Writeable.Reader instanceReader() { - return DeleteComposableIndexTemplateAction.Request::new; + protected Writeable.Reader instanceReader() { + return TransportDeleteComposableIndexTemplateAction.Request::new; } @Override - protected DeleteComposableIndexTemplateAction.Request createTestInstance() { - return new DeleteComposableIndexTemplateAction.Request(randomAlphaOfLength(5)); + protected TransportDeleteComposableIndexTemplateAction.Request createTestInstance() { + return new TransportDeleteComposableIndexTemplateAction.Request(randomAlphaOfLength(5)); } @Override - protected DeleteComposableIndexTemplateAction.Request mutateInstance(DeleteComposableIndexTemplateAction.Request instance) { + protected TransportDeleteComposableIndexTemplateAction.Request mutateInstance( + TransportDeleteComposableIndexTemplateAction.Request instance + ) { return randomValueOtherThan(instance, this::createTestInstance); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequestTests.java index ee1c423c74a4d..2461a5df90a88 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequestTests.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.admin.indices.template.post; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.ComposableIndexTemplateTests; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -33,7 +33,9 @@ protected Writeable.Reader instanceReader() { @Override protected SimulateIndexTemplateRequest createTestInstance() { SimulateIndexTemplateRequest req = new SimulateIndexTemplateRequest(randomAlphaOfLength(10)); - PutComposableIndexTemplateAction.Request newTemplateRequest = new PutComposableIndexTemplateAction.Request(randomAlphaOfLength(4)); + TransportPutComposableIndexTemplateAction.Request newTemplateRequest = new TransportPutComposableIndexTemplateAction.Request( + randomAlphaOfLength(4) + ); newTemplateRequest.indexTemplate(ComposableIndexTemplateTests.randomInstance()); req.indexTemplateRequest(newTemplateRequest); req.includeDefaults(randomBoolean()); @@ -54,7 +56,7 @@ public void testAddingGlobalTemplateWithHiddenIndexSettingIsIllegal() { Template template = new Template(Settings.builder().put(IndexMetadata.SETTING_INDEX_HIDDEN, true).build(), null, null); ComposableIndexTemplate globalTemplate = ComposableIndexTemplate.builder().indexPatterns(List.of("*")).template(template).build(); - PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request("test"); + TransportPutComposableIndexTemplateAction.Request request = new TransportPutComposableIndexTemplateAction.Request("test"); request.indexTemplate(globalTemplate); SimulateIndexTemplateRequest simulateRequest = new SimulateIndexTemplateRequest("testing"); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateRequestTests.java index 05cfe6ef7068c..5d24c44aee792 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateRequestTests.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.admin.indices.template.post; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.ComposableIndexTemplateTests; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -33,7 +33,9 @@ protected Writeable.Reader instanceReader() { @Override protected SimulateTemplateAction.Request createTestInstance() { SimulateTemplateAction.Request req = new SimulateTemplateAction.Request(randomAlphaOfLength(10)); - PutComposableIndexTemplateAction.Request newTemplateRequest = new PutComposableIndexTemplateAction.Request(randomAlphaOfLength(4)); + TransportPutComposableIndexTemplateAction.Request newTemplateRequest = new TransportPutComposableIndexTemplateAction.Request( + randomAlphaOfLength(4) + ); newTemplateRequest.indexTemplate(ComposableIndexTemplateTests.randomInstance()); req.indexTemplateRequest(newTemplateRequest); req.includeDefaults(randomBoolean()); @@ -49,7 +51,7 @@ public void testIndexNameCannotBeNullOrEmpty() { expectThrows(IllegalArgumentException.class, () -> new SimulateTemplateAction.Request((String) null)); expectThrows( IllegalArgumentException.class, - () -> new SimulateTemplateAction.Request((PutComposableIndexTemplateAction.Request) null) + () -> new SimulateTemplateAction.Request((TransportPutComposableIndexTemplateAction.Request) null) ); } @@ -57,7 +59,7 @@ public void testAddingGlobalTemplateWithHiddenIndexSettingIsIllegal() { Template template = new Template(Settings.builder().put(IndexMetadata.SETTING_INDEX_HIDDEN, true).build(), null, null); ComposableIndexTemplate globalTemplate = ComposableIndexTemplate.builder().indexPatterns(List.of("*")).template(template).build(); - PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request("test"); + TransportPutComposableIndexTemplateAction.Request request = new TransportPutComposableIndexTemplateAction.Request("test"); request.indexTemplate(globalTemplate); SimulateTemplateAction.Request simulateRequest = new SimulateTemplateAction.Request("testing"); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/PutComposableIndexTemplateRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/PutComposableIndexTemplateRequestTests.java index cd5c1c477a108..448749c84278b 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/PutComposableIndexTemplateRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/PutComposableIndexTemplateRequestTests.java @@ -24,15 +24,18 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; -public class PutComposableIndexTemplateRequestTests extends AbstractWireSerializingTestCase { +public class PutComposableIndexTemplateRequestTests extends AbstractWireSerializingTestCase< + TransportPutComposableIndexTemplateAction.Request> { @Override - protected Writeable.Reader instanceReader() { - return PutComposableIndexTemplateAction.Request::new; + protected Writeable.Reader instanceReader() { + return TransportPutComposableIndexTemplateAction.Request::new; } @Override - protected PutComposableIndexTemplateAction.Request createTestInstance() { - PutComposableIndexTemplateAction.Request req = new PutComposableIndexTemplateAction.Request(randomAlphaOfLength(4)); + protected TransportPutComposableIndexTemplateAction.Request createTestInstance() { + TransportPutComposableIndexTemplateAction.Request req = new TransportPutComposableIndexTemplateAction.Request( + randomAlphaOfLength(4) + ); req.cause(randomAlphaOfLength(4)); req.create(randomBoolean()); req.indexTemplate(ComposableIndexTemplateTests.randomInstance()); @@ -40,7 +43,7 @@ protected PutComposableIndexTemplateAction.Request createTestInstance() { } @Override - protected PutComposableIndexTemplateAction.Request mutateInstance(PutComposableIndexTemplateAction.Request instance) { + protected TransportPutComposableIndexTemplateAction.Request mutateInstance(TransportPutComposableIndexTemplateAction.Request instance) { return randomValueOtherThan(instance, this::createTestInstance); } @@ -48,7 +51,7 @@ public void testPutGlobalTemplatesCannotHaveHiddenIndexSetting() { Template template = new Template(Settings.builder().put(IndexMetadata.SETTING_INDEX_HIDDEN, true).build(), null, null); ComposableIndexTemplate globalTemplate = ComposableIndexTemplate.builder().indexPatterns(List.of("*")).template(template).build(); - PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request("test"); + TransportPutComposableIndexTemplateAction.Request request = new TransportPutComposableIndexTemplateAction.Request("test"); request.indexTemplate(globalTemplate); ActionRequestValidationException validationException = request.validate(); @@ -60,7 +63,9 @@ public void testPutGlobalTemplatesCannotHaveHiddenIndexSetting() { } public void testPutIndexTemplateV2RequestMustContainTemplate() { - PutComposableIndexTemplateAction.Request requestWithoutTemplate = new PutComposableIndexTemplateAction.Request("test"); + TransportPutComposableIndexTemplateAction.Request requestWithoutTemplate = new TransportPutComposableIndexTemplateAction.Request( + "test" + ); ActionRequestValidationException validationException = requestWithoutTemplate.validate(); assertThat(validationException, is(notNullValue())); @@ -71,7 +76,7 @@ public void testPutIndexTemplateV2RequestMustContainTemplate() { } public void testValidationOfPriority() { - PutComposableIndexTemplateAction.Request req = new PutComposableIndexTemplateAction.Request("test"); + TransportPutComposableIndexTemplateAction.Request req = new TransportPutComposableIndexTemplateAction.Request("test"); req.indexTemplate(ComposableIndexTemplate.builder().indexPatterns(Arrays.asList("foo", "bar")).priority(-5L).build()); ActionRequestValidationException validationException = req.validate(); assertThat(validationException, is(notNullValue())); @@ -82,7 +87,7 @@ public void testValidationOfPriority() { } public void testValidateNoTemplate() { - PutComposableIndexTemplateAction.Request req = new PutComposableIndexTemplateAction.Request("test"); + TransportPutComposableIndexTemplateAction.Request req = new TransportPutComposableIndexTemplateAction.Request("test"); req.indexTemplate(ComposableIndexTemplate.builder().indexPatterns(Collections.singletonList("*")).build()); assertNull(req.validate()); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/template/reservedstate/ReservedComposableIndexTemplateActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/template/reservedstate/ReservedComposableIndexTemplateActionTests.java index 173cb4c66d18f..84921effed9f5 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/template/reservedstate/ReservedComposableIndexTemplateActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/template/reservedstate/ReservedComposableIndexTemplateActionTests.java @@ -8,12 +8,9 @@ package org.elasticsearch.action.admin.indices.template.reservedstate; -import org.elasticsearch.action.admin.indices.template.delete.DeleteComponentTemplateAction; -import org.elasticsearch.action.admin.indices.template.delete.DeleteComposableIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.delete.TransportDeleteComponentTemplateAction; import org.elasticsearch.action.admin.indices.template.delete.TransportDeleteComposableIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.put.PutComponentTemplateAction; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.put.TransportPutComponentTemplateAction; import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.support.ActionFilters; @@ -686,7 +683,7 @@ public void testHandlerCorrectness() { ); assertEquals(ReservedComposableIndexTemplateAction.NAME, putIndexAction.reservedStateHandlerName().get()); assertThat( - putIndexAction.modifiedKeys(new PutComposableIndexTemplateAction.Request("aaa")), + putIndexAction.modifiedKeys(new TransportPutComposableIndexTemplateAction.Request("aaa")), containsInAnyOrder(reservedComposableIndexName("aaa")) ); var delIndexAction = new TransportDeleteComposableIndexTemplateAction( @@ -699,7 +696,7 @@ public void testHandlerCorrectness() { ); assertEquals(ReservedComposableIndexTemplateAction.NAME, delIndexAction.reservedStateHandlerName().get()); assertThat( - delIndexAction.modifiedKeys(new DeleteComposableIndexTemplateAction.Request("a", "b")), + delIndexAction.modifiedKeys(new TransportDeleteComposableIndexTemplateAction.Request("a", "b")), containsInAnyOrder(reservedComposableIndexName("a"), reservedComposableIndexName("b")) ); @@ -728,7 +725,7 @@ public void testHandlerCorrectness() { ); assertEquals(ReservedComposableIndexTemplateAction.NAME, delComponentAction.reservedStateHandlerName().get()); assertThat( - delComponentAction.modifiedKeys(new DeleteComponentTemplateAction.Request("a", "b")), + delComponentAction.modifiedKeys(new TransportDeleteComponentTemplateAction.Request("a", "b")), containsInAnyOrder(reservedComponentName("a"), reservedComponentName("b")) ); } @@ -928,7 +925,9 @@ public void testTemplatesWithReservedPrefix() throws Exception { // apply the modified keys to a cluster state, as the ReservedStateService would do ClusterState withReservedState = new ClusterState.Builder(updatedState.state()).metadata(withReservedMetadata).build(); - PutComposableIndexTemplateAction.Request pr = new PutComposableIndexTemplateAction.Request(conflictingTemplateName); + TransportPutComposableIndexTemplateAction.Request pr = new TransportPutComposableIndexTemplateAction.Request( + conflictingTemplateName + ); final ThreadPool threadPool = mock(ThreadPool.class); TransportService transportService = MockUtils.setupTransportServiceWithThreadpoolExecutor(threadPool); @@ -945,7 +944,7 @@ public void testTemplatesWithReservedPrefix() throws Exception { var modifiedKeys = putTemplateAction.modifiedKeys(pr); assertEquals(1, modifiedKeys.size()); - var fakeAction = new ActionWithReservedState() { + var fakeAction = new ActionWithReservedState() { }; assertEquals( "Failed to process request [validate_template] with errors: " @@ -964,7 +963,7 @@ public void testTemplatesWithReservedPrefix() throws Exception { // Try fake REST modification request with the weird prefixed composable_index_template:validate_template, this will work, since // the reserved keys for that name would be composable_index_template:composable_index_template:validate_template and it will not // match our reserved state. - var prOK = new PutComposableIndexTemplateAction.Request(reservedComposableIndexName(conflictingTemplateName)); + var prOK = new TransportPutComposableIndexTemplateAction.Request(reservedComposableIndexName(conflictingTemplateName)); var modifiedKeysOK = putTemplateAction.modifiedKeys(prOK); assertEquals(1, modifiedKeysOK.size()); diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java index 7a87cf29bb526..a2e164f6a242c 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java @@ -315,7 +315,7 @@ public void testRejectCoordination() throws Exception { threadPool.startForcingRejections(); PlainActionFuture future = new PlainActionFuture<>(); ActionTestUtils.execute(bulkAction, null, bulkRequest, future); - expectThrows(EsRejectedExecutionException.class, future::actionGet); + expectThrows(EsRejectedExecutionException.class, future); } finally { threadPool.stopForcingRejections(); } @@ -329,7 +329,7 @@ public void testRejectionAfterCreateIndexIsPropagated() throws Exception { bulkAction.beforeIndexCreation = threadPool::startForcingRejections; PlainActionFuture future = new PlainActionFuture<>(); ActionTestUtils.execute(bulkAction, null, bulkRequest, future); - expectThrows(EsRejectedExecutionException.class, future::actionGet); + expectThrows(EsRejectedExecutionException.class, future); assertTrue(bulkAction.indexCreated); } finally { threadPool.stopForcingRejections(); diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java index 19abcb93fef4b..2ab5df93b2af2 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.client.internal.Requests; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; @@ -36,6 +37,7 @@ import org.elasticsearch.index.bulk.stats.ShardBulkStats; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.VersionConflictEngineException; +import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.MetadataFieldMapper; @@ -262,7 +264,13 @@ public void testExecuteBulkIndexRequestWithMappingUpdates() throws Exception { when(shard.applyIndexOperationOnPrimary(anyLong(), any(), any(), anyLong(), anyLong(), anyLong(), anyBoolean())).thenReturn( mappingUpdate ); - when(shard.mapperService()).thenReturn(mock(MapperService.class)); + MapperService mapperService = mock(MapperService.class); + when(shard.mapperService()).thenReturn(mapperService); + + // merged mapping source needs to be different from previous one for the master node to be invoked + DocumentMapper mergedDoc = mock(DocumentMapper.class); + when(mapperService.merge(any(), any(CompressedXContent.class), any())).thenReturn(mergedDoc); + when(mergedDoc.mappingSource()).thenReturn(CompressedXContent.fromJSON("{}")); randomlySetIgnoredPrimaryResponse(items[0]); @@ -875,9 +883,14 @@ public void testRetries() throws Exception { }); when(shard.indexSettings()).thenReturn(indexSettings); when(shard.shardId()).thenReturn(shardId); - when(shard.mapperService()).thenReturn(mock(MapperService.class)); + MapperService mapperService = mock(MapperService.class); + when(shard.mapperService()).thenReturn(mapperService); when(shard.getBulkOperationListener()).thenReturn(mock(ShardBulkStats.class)); + DocumentMapper mergedDocMapper = mock(DocumentMapper.class); + when(mergedDocMapper.mappingSource()).thenReturn(CompressedXContent.fromJSON("{}")); + when(mapperService.merge(any(), any(CompressedXContent.class), any())).thenReturn(mergedDocMapper); + UpdateHelper updateHelper = mock(UpdateHelper.class); when(updateHelper.prepare(any(), eq(shard), any())).thenReturn( new UpdateHelper.Result( @@ -964,7 +977,13 @@ public void testForceExecutionOnRejectionAfterMappingUpdate() throws Exception { success2 ); when(shard.getFailedIndexResult(any(EsRejectedExecutionException.class), anyLong(), anyString())).thenCallRealMethod(); - when(shard.mapperService()).thenReturn(mock(MapperService.class)); + MapperService mapperService = mock(MapperService.class); + when(shard.mapperService()).thenReturn(mapperService); + + // merged mapping source needs to be different from previous one for the master node to be invoked + DocumentMapper mergedDoc = mock(DocumentMapper.class); + when(mapperService.merge(any(), any(CompressedXContent.class), any())).thenReturn(mergedDoc); + when(mergedDoc.mappingSource()).thenReturn(CompressedXContent.fromJSON("{}")); randomlySetIgnoredPrimaryResponse(items[0]); @@ -1072,6 +1091,136 @@ public void testPerformOnPrimaryReportsBulkStats() throws Exception { latch.await(); } + public void testNoopMappingUpdateInfiniteLoopPrevention() throws Exception { + Engine.IndexResult mappingUpdate = new Engine.IndexResult( + new Mapping(mock(RootObjectMapper.class), new MetadataFieldMapper[0], Collections.emptyMap()), + "id" + ); + + IndexShard shard = mockShard(); + when(shard.applyIndexOperationOnPrimary(anyLong(), any(), any(), anyLong(), anyLong(), anyLong(), anyBoolean())).thenReturn( + mappingUpdate + ); + MapperService mapperService = mock(MapperService.class); + when(shard.mapperService()).thenReturn(mapperService); + + DocumentMapper documentMapper = mock(DocumentMapper.class); + when(documentMapper.mappingSource()).thenReturn(CompressedXContent.fromJSON("{}")); + // returning the current document mapper as the merge result to simulate a noop mapping update + when(mapperService.documentMapper()).thenReturn(documentMapper); + when(mapperService.merge(any(), any(CompressedXContent.class), any())).thenReturn(documentMapper); + + UpdateHelper updateHelper = mock(UpdateHelper.class); + when(updateHelper.prepare(any(), eq(shard), any())).thenReturn( + new UpdateHelper.Result( + new IndexRequest("index").id("id").source(Requests.INDEX_CONTENT_TYPE, "field", "value"), + randomBoolean() ? DocWriteResponse.Result.CREATED : DocWriteResponse.Result.UPDATED, + Collections.singletonMap("field", "value"), + Requests.INDEX_CONTENT_TYPE + ) + ); + + BulkItemRequest[] items = new BulkItemRequest[] { + new BulkItemRequest(0, new UpdateRequest("index", "id").doc(Requests.INDEX_CONTENT_TYPE, "field", "value")) }; + BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items); + + AssertionError error = expectThrows( + AssertionError.class, + () -> TransportShardBulkAction.performOnPrimary( + bulkShardRequest, + shard, + updateHelper, + threadPool::absoluteTimeInMillis, + (update, shardId, listener) -> fail("the master should not be contacted as the operation yielded a noop mapping update"), + listener -> listener.onResponse(null), + ActionTestUtils.assertNoFailureListener(result -> {}), + threadPool, + Names.WRITE + ) + ); + assertThat( + error.getMessage(), + equalTo( + "On retry, this indexing request resulted in another noop mapping update." + + " Failing the indexing operation to prevent an infinite retry loop." + ) + ); + } + + public void testNoopMappingUpdateSuccessOnRetry() throws Exception { + Engine.IndexResult mappingUpdate = new Engine.IndexResult( + new Mapping(mock(RootObjectMapper.class), new MetadataFieldMapper[0], Collections.emptyMap()), + "id" + ); + Translog.Location resultLocation = new Translog.Location(42, 42, 42); + Engine.IndexResult successfulResult = new FakeIndexResult(1, 1, 10, true, resultLocation, "id"); + + IndexShard shard = mockShard(); + when(shard.applyIndexOperationOnPrimary(anyLong(), any(), any(), anyLong(), anyLong(), anyLong(), anyBoolean())).thenReturn( + // on the first invocation, return a result that attempts a mapping update + // the mapping update will be a noop and the operation is retired without contacting the master + mappingUpdate, + // the second invocation also returns a mapping update result + // this doesn't trigger the infinite loop detection because MapperService#mappingVersion returns a different mapping version + mappingUpdate, + // on the third attempt, return a successful result, indicating that no mapping update needs to be executed + successfulResult + ); + + MapperService mapperService = mock(MapperService.class); + when(shard.mapperService()).thenReturn(mapperService); + + DocumentMapper documentMapper = mock(DocumentMapper.class); + when(documentMapper.mappingSource()).thenReturn(CompressedXContent.fromJSON("{}")); + when(mapperService.documentMapper()).thenReturn(documentMapper); + // returning the current document mapper as the merge result to simulate a noop mapping update + when(mapperService.merge(any(), any(CompressedXContent.class), any())).thenReturn(documentMapper); + // on the second invocation, the mapping version is incremented + // so that the second mapping update attempt doesn't trigger the infinite loop prevention + when(mapperService.mappingVersion()).thenReturn(0L, 1L); + + UpdateHelper updateHelper = mock(UpdateHelper.class); + when(updateHelper.prepare(any(), eq(shard), any())).thenReturn( + new UpdateHelper.Result( + new IndexRequest("index").id("id").source(Requests.INDEX_CONTENT_TYPE, "field", "value"), + randomBoolean() ? DocWriteResponse.Result.CREATED : DocWriteResponse.Result.UPDATED, + Collections.singletonMap("field", "value"), + Requests.INDEX_CONTENT_TYPE + ) + ); + + BulkItemRequest[] items = new BulkItemRequest[] { + new BulkItemRequest(0, new UpdateRequest("index", "id").doc(Requests.INDEX_CONTENT_TYPE, "field", "value")) }; + BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items); + + final CountDownLatch latch = new CountDownLatch(1); + TransportShardBulkAction.performOnPrimary( + bulkShardRequest, + shard, + updateHelper, + threadPool::absoluteTimeInMillis, + (update, shardId, listener) -> fail("the master should not be contacted as the operation yielded a noop mapping update"), + listener -> listener.onFailure(new IllegalStateException("no failure expected")), + new LatchedActionListener<>(ActionTestUtils.assertNoFailureListener(result -> { + BulkItemResponse primaryResponse = result.replicaRequest().items()[0].getPrimaryResponse(); + assertFalse(primaryResponse.isFailed()); + }), latch), + threadPool, + Names.WRITE + ); + + latch.await(); + verify(mapperService, times(2)).merge(any(), any(CompressedXContent.class), any()); + } + + private IndexShard mockShard() { + IndexShard shard = mock(IndexShard.class); + when(shard.shardId()).thenReturn(shardId); + when(shard.getBulkOperationListener()).thenReturn(mock(ShardBulkStats.class)); + when(shard.getFailedIndexResult(any(Exception.class), anyLong(), anyString())).thenCallRealMethod(); + return shard; + } + private void randomlySetIgnoredPrimaryResponse(BulkItemRequest primaryRequest) { if (randomBoolean()) { // add a response to the request and thereby check that it is ignored for the primary. diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionTests.java index 647eafb5f3cdd..49dff864e7374 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionTests.java @@ -190,6 +190,7 @@ public void onFailure(Exception e) { } }; Set autoCreateIndices = Set.of(); // unused + Set dataStreamsToRollover = Set.of(); // unused Map indicesThatCannotBeCreated = Map.of(); // unused long startTime = 0; bulkAction.createMissingIndicesAndIndexData( @@ -198,6 +199,7 @@ public void onFailure(Exception e) { randomAlphaOfLength(10), listener, autoCreateIndices, + dataStreamsToRollover, indicesThatCannotBeCreated, startTime ); diff --git a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequestTests.java b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequestTests.java index 3af2639538f0d..ae25a5b597ec3 100644 --- a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequestTests.java @@ -152,15 +152,15 @@ public void testToXContent() throws IOException { } public void testFromXContent() throws IOException { - XContentParser parser = createParser(JsonXContent.jsonXContent, "{ \"fields\" : [\"FOO\"] }"); - FieldCapabilitiesRequest request = new FieldCapabilitiesRequest(); - ObjectParser PARSER = new ObjectParser<>("field_caps_request"); - PARSER.declareStringArray(fromList(String.class, FieldCapabilitiesRequest::fields), new ParseField("fields")); - - PARSER.parse(parser, request, null); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, "{ \"fields\" : [\"FOO\"] }")) { + FieldCapabilitiesRequest request = new FieldCapabilitiesRequest(); + ObjectParser PARSER = new ObjectParser<>("field_caps_request"); + PARSER.declareStringArray(fromList(String.class, FieldCapabilitiesRequest::fields), new ParseField("fields")); - assertArrayEquals(request.fields(), new String[] { "FOO" }); + PARSER.parse(parser, request, null); + assertArrayEquals(request.fields(), new String[] { "FOO" }); + } } public void testValidation() { diff --git a/server/src/test/java/org/elasticsearch/action/get/MultiGetRequestTests.java b/server/src/test/java/org/elasticsearch/action/get/MultiGetRequestTests.java index eb9cfa4a6939c..76fdef3d06c1f 100644 --- a/server/src/test/java/org/elasticsearch/action/get/MultiGetRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/get/MultiGetRequestTests.java @@ -80,24 +80,25 @@ public void testUnexpectedField() throws IOException { } public void testAddWithValidSourceValueIsAccepted() throws Exception { - XContentParser parser = createParser( - XContentFactory.jsonBuilder() - .startObject() - .startArray("docs") - .startObject() - .field("_source", randomFrom("false", "true")) - .endObject() - .startObject() - .field("_source", randomBoolean()) - .endObject() - .endArray() - .endObject() - ); - - MultiGetRequest multiGetRequest = new MultiGetRequest(); - multiGetRequest.add(randomAlphaOfLength(5), null, FetchSourceContext.FETCH_SOURCE, null, parser, true); - - assertEquals(2, multiGetRequest.getItems().size()); + try ( + XContentParser parser = createParser( + XContentFactory.jsonBuilder() + .startObject() + .startArray("docs") + .startObject() + .field("_source", randomFrom("false", "true")) + .endObject() + .startObject() + .field("_source", randomBoolean()) + .endObject() + .endArray() + .endObject() + ) + ) { + MultiGetRequest multiGetRequest = new MultiGetRequest(); + multiGetRequest.add(randomAlphaOfLength(5), null, FetchSourceContext.FETCH_SOURCE, null, parser, true); + assertEquals(2, multiGetRequest.getItems().size()); + } } public void testXContentSerialization() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/action/ingest/GetPipelineResponseTests.java b/server/src/test/java/org/elasticsearch/action/ingest/GetPipelineResponseTests.java index e6efc00209ba5..6f5841f3d2a03 100644 --- a/server/src/test/java/org/elasticsearch/action/ingest/GetPipelineResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/ingest/GetPipelineResponseTests.java @@ -62,11 +62,15 @@ public void testXContentDeserialization() throws IOException { Map pipelinesMap = createPipelineConfigMap(); GetPipelineResponse response = new GetPipelineResponse(new ArrayList<>(pipelinesMap.values())); XContentBuilder builder = response.toXContent(getRandomXContentBuilder(), ToXContent.EMPTY_PARAMS); - XContentParser parser = builder.generator() - .contentType() - .xContent() - .createParser(xContentRegistry(), LoggingDeprecationHandler.INSTANCE, BytesReference.bytes(builder).streamInput()); - GetPipelineResponse parsedResponse = GetPipelineResponse.fromXContent(parser); + GetPipelineResponse parsedResponse; + try ( + XContentParser parser = builder.generator() + .contentType() + .xContent() + .createParser(xContentRegistry(), LoggingDeprecationHandler.INSTANCE, BytesReference.bytes(builder).streamInput()) + ) { + parsedResponse = GetPipelineResponse.fromXContent(parser); + } List actualPipelines = response.pipelines(); List parsedPipelines = parsedResponse.pipelines(); assertEquals(actualPipelines.size(), parsedPipelines.size()); diff --git a/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java b/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java index 7f5b5f7716f3e..bd6171e353add 100644 --- a/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java @@ -21,7 +21,6 @@ import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.internal.AliasFilter; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.test.ESTestCase; @@ -194,7 +193,7 @@ public void testSendSearchResponseDisallowPartialFailures() { new IllegalArgumentException() ); } - action.sendSearchResponse(InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, phaseResults.results); + action.sendSearchResponse(SearchResponseSections.EMPTY_WITH_TOTAL_HITS, phaseResults.results); assertThat(exception.get(), instanceOf(SearchPhaseExecutionException.class)); SearchPhaseExecutionException searchPhaseExecutionException = (SearchPhaseExecutionException) exception.get(); assertEquals(0, searchPhaseExecutionException.getSuppressed().length); diff --git a/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java index f8a22ec04fb15..648cb8aa60158 100644 --- a/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java @@ -20,7 +20,6 @@ import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.collapse.CollapseBuilder; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; @@ -56,81 +55,93 @@ public void testCollapseSingleHit() throws IOException { Map runtimeMappings = randomBoolean() ? emptyMap() : AbstractSearchTestCase.randomRuntimeMappings(); final MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(1); - String collapseValue = randomBoolean() ? null : "boom"; + try { + String collapseValue = randomBoolean() ? null : "boom"; - mockSearchPhaseContext.getRequest() - .source( - new SearchSourceBuilder().collapse( - new CollapseBuilder("someField").setInnerHits( - IntStream.range(0, numInnerHits).mapToObj(hitNum -> new InnerHitBuilder().setName("innerHit" + hitNum)).toList() + mockSearchPhaseContext.getRequest() + .source( + new SearchSourceBuilder().collapse( + new CollapseBuilder("someField").setInnerHits( + IntStream.range(0, numInnerHits) + .mapToObj(hitNum -> new InnerHitBuilder().setName("innerHit" + hitNum)) + .toList() + ) ) - ) - ); - mockSearchPhaseContext.getRequest().source().query(originalQuery).runtimeMappings(runtimeMappings); - mockSearchPhaseContext.searchTransport = new SearchTransportService(null, null, null) { - @Override - void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionListener listener) { - assertTrue(executedMultiSearch.compareAndSet(false, true)); - assertEquals(numInnerHits, request.requests().size()); - SearchRequest searchRequest = request.requests().get(0); - assertTrue(searchRequest.source().query() instanceof BoolQueryBuilder); + ); + mockSearchPhaseContext.getRequest().source().query(originalQuery).runtimeMappings(runtimeMappings); + mockSearchPhaseContext.searchTransport = new SearchTransportService(null, null, null) { + @Override + void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionListener listener) { + assertTrue(executedMultiSearch.compareAndSet(false, true)); + assertEquals(numInnerHits, request.requests().size()); + SearchRequest searchRequest = request.requests().get(0); + assertTrue(searchRequest.source().query() instanceof BoolQueryBuilder); - BoolQueryBuilder groupBuilder = (BoolQueryBuilder) searchRequest.source().query(); - if (collapseValue == null) { - assertThat(groupBuilder.mustNot(), Matchers.contains(QueryBuilders.existsQuery("someField"))); - } else { - assertThat(groupBuilder.filter(), Matchers.contains(QueryBuilders.matchQuery("someField", "boom"))); - } - if (originalQuery != null) { - assertThat(groupBuilder.must(), Matchers.contains(QueryBuilders.termQuery("foo", "bar"))); - } - assertArrayEquals(mockSearchPhaseContext.getRequest().indices(), searchRequest.indices()); - assertThat(searchRequest.source().runtimeMappings(), equalTo(runtimeMappings)); + BoolQueryBuilder groupBuilder = (BoolQueryBuilder) searchRequest.source().query(); + if (collapseValue == null) { + assertThat(groupBuilder.mustNot(), Matchers.contains(QueryBuilders.existsQuery("someField"))); + } else { + assertThat(groupBuilder.filter(), Matchers.contains(QueryBuilders.matchQuery("someField", "boom"))); + } + if (originalQuery != null) { + assertThat(groupBuilder.must(), Matchers.contains(QueryBuilders.termQuery("foo", "bar"))); + } + assertArrayEquals(mockSearchPhaseContext.getRequest().indices(), searchRequest.indices()); + assertThat(searchRequest.source().runtimeMappings(), equalTo(runtimeMappings)); - List mSearchResponses = new ArrayList<>(numInnerHits); - for (int innerHitNum = 0; innerHitNum < numInnerHits; innerHitNum++) { - InternalSearchResponse internalSearchResponse = new InternalSearchResponse( - collapsedHits.get(innerHitNum), - null, - null, - null, - false, - null, - 1 + List mSearchResponses = new ArrayList<>(numInnerHits); + for (int innerHitNum = 0; innerHitNum < numInnerHits; innerHitNum++) { + var sections = new SearchResponseSections(collapsedHits.get(innerHitNum), null, null, false, null, null, 1); + try { + mockSearchPhaseContext.sendSearchResponse(sections, null); + } finally { + sections.decRef(); + } + mSearchResponses.add(new MultiSearchResponse.Item(mockSearchPhaseContext.searchResponse.get(), null)); + } + + ActionListener.respondAndRelease( + listener, + new MultiSearchResponse(mSearchResponses.toArray(new MultiSearchResponse.Item[0]), randomIntBetween(1, 10000)) ); - mockSearchPhaseContext.sendSearchResponse(internalSearchResponse, null); - mSearchResponses.add(new MultiSearchResponse.Item(mockSearchPhaseContext.searchResponse.get(), null)); } + }; + + SearchHit hit = new SearchHit(1, "ID"); + hit.setDocumentField("someField", new DocumentField("someField", Collections.singletonList(collapseValue))); + SearchHits hits = new SearchHits(new SearchHit[] { hit }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0F); + ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, hits, () -> new SearchPhase("test") { + @Override + public void run() { + var sections = new SearchResponseSections(hits, null, null, false, null, null, 1); + try { + mockSearchPhaseContext.sendSearchResponse(sections, null); + } finally { + sections.decRef(); + } + } + }); - ActionListener.respondAndRelease( - listener, - new MultiSearchResponse(mSearchResponses.toArray(new MultiSearchResponse.Item[0]), randomIntBetween(1, 10000)) + phase.run(); + mockSearchPhaseContext.assertNoFailure(); + SearchResponse theResponse = mockSearchPhaseContext.searchResponse.get(); + assertNotNull(theResponse); + assertEquals(numInnerHits, theResponse.getHits().getHits()[0].getInnerHits().size()); + + for (int innerHitNum = 0; innerHitNum < numInnerHits; innerHitNum++) { + assertSame( + theResponse.getHits().getHits()[0].getInnerHits().get("innerHit" + innerHitNum), + collapsedHits.get(innerHitNum) ); } - }; - SearchHit hit = new SearchHit(1, "ID"); - hit.setDocumentField("someField", new DocumentField("someField", Collections.singletonList(collapseValue))); - SearchHits hits = new SearchHits(new SearchHit[] { hit }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0F); - InternalSearchResponse internalSearchResponse = new InternalSearchResponse(hits, null, null, null, false, null, 1); - ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, internalSearchResponse, () -> new SearchPhase("test") { - @Override - public void run() { - mockSearchPhaseContext.sendSearchResponse(internalSearchResponse, null); + assertTrue(executedMultiSearch.get()); + } finally { + var resp = mockSearchPhaseContext.searchResponse.get(); + if (resp != null) { + resp.decRef(); } - }); - - phase.run(); - mockSearchPhaseContext.assertNoFailure(); - SearchResponse theResponse = mockSearchPhaseContext.searchResponse.get(); - assertNotNull(theResponse); - assertEquals(numInnerHits, theResponse.getHits().getHits()[0].getInnerHits().size()); - - for (int innerHitNum = 0; innerHitNum < numInnerHits; innerHitNum++) { - assertSame(theResponse.getHits().getHits()[0].getInnerHits().get("innerHit" + innerHitNum), collapsedHits.get(innerHitNum)); } - - assertTrue(executedMultiSearch.get()); } } @@ -154,9 +165,14 @@ public void testFailOneItemFailsEntirePhase() throws IOException { @Override void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionListener listener) { assertTrue(executedMultiSearch.compareAndSet(false, true)); - InternalSearchResponse internalSearchResponse = new InternalSearchResponse(collapsedHits, null, null, null, false, null, 1); SearchResponse searchResponse = new SearchResponse( - internalSearchResponse, + collapsedHits, + null, + null, + false, + null, + null, + 1, null, 1, 1, @@ -182,11 +198,15 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL SearchHit hit2 = new SearchHit(2, "ID2"); hit2.setDocumentField("someField", new DocumentField("someField", Collections.singletonList(collapseValue))); SearchHits hits = new SearchHits(new SearchHit[] { hit1, hit2 }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0F); - InternalSearchResponse internalSearchResponse = new InternalSearchResponse(hits, null, null, null, false, null, 1); - ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, internalSearchResponse, () -> new SearchPhase("test") { + ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, hits, () -> new SearchPhase("test") { @Override public void run() { - mockSearchPhaseContext.sendSearchResponse(internalSearchResponse, null); + var sections = new SearchResponseSections(hits, null, null, false, null, null, 1); + try { + mockSearchPhaseContext.sendSearchResponse(sections, null); + } finally { + sections.decRef(); + } } }); phase.run(); @@ -198,98 +218,121 @@ public void run() { public void testSkipPhase() throws IOException { MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(1); - mockSearchPhaseContext.searchTransport = new SearchTransportService(null, null, null) { - @Override - void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionListener listener) { - fail("no collapsing here"); - } - }; + try { + mockSearchPhaseContext.searchTransport = new SearchTransportService(null, null, null) { + @Override + void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionListener listener) { + fail("no collapsing here"); + } + }; - SearchHit hit1 = new SearchHit(1, "ID"); - hit1.setDocumentField("someField", new DocumentField("someField", Collections.singletonList(null))); - SearchHit hit2 = new SearchHit(2, "ID2"); - hit2.setDocumentField("someField", new DocumentField("someField", Collections.singletonList(null))); - SearchHits hits = new SearchHits(new SearchHit[] { hit1, hit2 }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0F); - InternalSearchResponse internalSearchResponse = new InternalSearchResponse(hits, null, null, null, false, null, 1); - ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, internalSearchResponse, () -> new SearchPhase("test") { - @Override - public void run() { - mockSearchPhaseContext.sendSearchResponse(internalSearchResponse, null); + SearchHit hit1 = new SearchHit(1, "ID"); + hit1.setDocumentField("someField", new DocumentField("someField", Collections.singletonList(null))); + SearchHit hit2 = new SearchHit(2, "ID2"); + hit2.setDocumentField("someField", new DocumentField("someField", Collections.singletonList(null))); + SearchHits hits = new SearchHits(new SearchHit[] { hit1, hit2 }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0F); + ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, hits, () -> new SearchPhase("test") { + @Override + public void run() { + var sections = new SearchResponseSections(hits, null, null, false, null, null, 1); + try { + mockSearchPhaseContext.sendSearchResponse(sections, null); + } finally { + sections.decRef(); + } + } + }); + phase.run(); + mockSearchPhaseContext.assertNoFailure(); + assertNotNull(mockSearchPhaseContext.searchResponse.get()); + } finally { + var resp = mockSearchPhaseContext.searchResponse.get(); + if (resp != null) { + resp.decRef(); } - }); - phase.run(); - mockSearchPhaseContext.assertNoFailure(); - assertNotNull(mockSearchPhaseContext.searchResponse.get()); + } } public void testSkipExpandCollapseNoHits() throws IOException { MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(1); - mockSearchPhaseContext.searchTransport = new SearchTransportService(null, null, null) { - @Override - void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionListener listener) { - fail("expand should not try to send empty multi search request"); - } - }; - mockSearchPhaseContext.getRequest() - .source( - new SearchSourceBuilder().collapse( - new CollapseBuilder("someField").setInnerHits(new InnerHitBuilder().setName("foobarbaz")) - ) - ); + try { + mockSearchPhaseContext.searchTransport = new SearchTransportService(null, null, null) { + @Override + void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionListener listener) { + fail("expand should not try to send empty multi search request"); + } + }; + mockSearchPhaseContext.getRequest() + .source( + new SearchSourceBuilder().collapse( + new CollapseBuilder("someField").setInnerHits(new InnerHitBuilder().setName("foobarbaz")) + ) + ); - SearchHits hits = new SearchHits(new SearchHit[0], new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0f); - InternalSearchResponse internalSearchResponse = new InternalSearchResponse(hits, null, null, null, false, null, 1); - ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, internalSearchResponse, () -> new SearchPhase("test") { - @Override - public void run() { - mockSearchPhaseContext.sendSearchResponse(internalSearchResponse, null); + SearchHits hits = SearchHits.empty(new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0f); + ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, hits, () -> new SearchPhase("test") { + @Override + public void run() { + mockSearchPhaseContext.sendSearchResponse(new SearchResponseSections(hits, null, null, false, null, null, 1), null); + } + }); + phase.run(); + mockSearchPhaseContext.assertNoFailure(); + assertNotNull(mockSearchPhaseContext.searchResponse.get()); + } finally { + var resp = mockSearchPhaseContext.searchResponse.get(); + if (resp != null) { + resp.decRef(); } - }); - phase.run(); - mockSearchPhaseContext.assertNoFailure(); - assertNotNull(mockSearchPhaseContext.searchResponse.get()); + } } public void testExpandRequestOptions() throws IOException { MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(1); - boolean version = randomBoolean(); - final boolean seqNoAndTerm = randomBoolean(); + try { + boolean version = randomBoolean(); + final boolean seqNoAndTerm = randomBoolean(); - mockSearchPhaseContext.searchTransport = new SearchTransportService(null, null, null) { - @Override - void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionListener listener) { - final QueryBuilder postFilter = QueryBuilders.existsQuery("foo"); - assertTrue(request.requests().stream().allMatch((r) -> "foo".equals(r.preference()))); - assertTrue(request.requests().stream().allMatch((r) -> "baz".equals(r.routing()))); - assertTrue(request.requests().stream().allMatch((r) -> version == r.source().version())); - assertTrue(request.requests().stream().allMatch((r) -> seqNoAndTerm == r.source().seqNoAndPrimaryTerm())); - assertTrue(request.requests().stream().allMatch((r) -> postFilter.equals(r.source().postFilter()))); - assertTrue(request.requests().stream().allMatch((r) -> r.source().fetchSource().fetchSource() == false)); - assertTrue(request.requests().stream().allMatch((r) -> r.source().fetchSource().includes().length == 0)); - assertTrue(request.requests().stream().allMatch((r) -> r.source().fetchSource().excludes().length == 0)); - } - }; - mockSearchPhaseContext.getRequest() - .source( - new SearchSourceBuilder().collapse( - new CollapseBuilder("someField").setInnerHits( - new InnerHitBuilder().setName("foobarbaz").setVersion(version).setSeqNoAndPrimaryTerm(seqNoAndTerm) - ) - ).fetchSource(false).postFilter(QueryBuilders.existsQuery("foo")) - ) - .preference("foobar") - .routing("baz"); + mockSearchPhaseContext.searchTransport = new SearchTransportService(null, null, null) { + @Override + void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionListener listener) { + final QueryBuilder postFilter = QueryBuilders.existsQuery("foo"); + assertTrue(request.requests().stream().allMatch((r) -> "foo".equals(r.preference()))); + assertTrue(request.requests().stream().allMatch((r) -> "baz".equals(r.routing()))); + assertTrue(request.requests().stream().allMatch((r) -> version == r.source().version())); + assertTrue(request.requests().stream().allMatch((r) -> seqNoAndTerm == r.source().seqNoAndPrimaryTerm())); + assertTrue(request.requests().stream().allMatch((r) -> postFilter.equals(r.source().postFilter()))); + assertTrue(request.requests().stream().allMatch((r) -> r.source().fetchSource().fetchSource() == false)); + assertTrue(request.requests().stream().allMatch((r) -> r.source().fetchSource().includes().length == 0)); + assertTrue(request.requests().stream().allMatch((r) -> r.source().fetchSource().excludes().length == 0)); + } + }; + mockSearchPhaseContext.getRequest() + .source( + new SearchSourceBuilder().collapse( + new CollapseBuilder("someField").setInnerHits( + new InnerHitBuilder().setName("foobarbaz").setVersion(version).setSeqNoAndPrimaryTerm(seqNoAndTerm) + ) + ).fetchSource(false).postFilter(QueryBuilders.existsQuery("foo")) + ) + .preference("foobar") + .routing("baz"); - SearchHits hits = new SearchHits(new SearchHit[0], new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0f); - InternalSearchResponse internalSearchResponse = new InternalSearchResponse(hits, null, null, null, false, null, 1); - ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, internalSearchResponse, () -> new SearchPhase("test") { - @Override - public void run() throws IOException { - mockSearchPhaseContext.sendSearchResponse(internalSearchResponse, null); + SearchHits hits = SearchHits.empty(new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0f); + ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, hits, () -> new SearchPhase("test") { + @Override + public void run() { + mockSearchPhaseContext.sendSearchResponse(new SearchResponseSections(hits, null, null, false, null, null, 1), null); + } + }); + phase.run(); + mockSearchPhaseContext.assertNoFailure(); + assertNotNull(mockSearchPhaseContext.searchResponse.get()); + } finally { + var resp = mockSearchPhaseContext.searchResponse.get(); + if (resp != null) { + resp.decRef(); } - }); - phase.run(); - mockSearchPhaseContext.assertNoFailure(); - assertNotNull(mockSearchPhaseContext.searchResponse.get()); + } } } diff --git a/server/src/test/java/org/elasticsearch/action/search/FetchLookupFieldsPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/FetchLookupFieldsPhaseTests.java index 38409752c7e7d..035d01108d655 100644 --- a/server/src/test/java/org/elasticsearch/action/search/FetchLookupFieldsPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/FetchLookupFieldsPhaseTests.java @@ -17,7 +17,6 @@ import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.fetch.subphase.FieldAndFormat; import org.elasticsearch.search.fetch.subphase.LookupField; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.ESTestCase; import java.util.List; @@ -34,176 +33,199 @@ public class FetchLookupFieldsPhaseTests extends ESTestCase { public void testNoLookupField() { MockSearchPhaseContext searchPhaseContext = new MockSearchPhaseContext(1); - searchPhaseContext.searchTransport = new SearchTransportService(null, null, null) { - @Override - void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionListener listener) { - throw new AssertionError("No lookup field"); + try { + searchPhaseContext.searchTransport = new SearchTransportService(null, null, null) { + @Override + void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionListener listener) { + throw new AssertionError("No lookup field"); + } + }; + int numHits = randomIntBetween(0, 10); + SearchHit[] searchHits = new SearchHit[randomIntBetween(0, 10)]; + for (int i = 0; i < searchHits.length; i++) { + searchHits[i] = SearchHitTests.createTestItem(randomBoolean(), randomBoolean()); + } + SearchHits hits = new SearchHits(searchHits, new TotalHits(numHits, TotalHits.Relation.EQUAL_TO), 1.0f); + var sections = new SearchResponseSections(hits, null, null, false, null, null, 1); + try { + FetchLookupFieldsPhase phase = new FetchLookupFieldsPhase(searchPhaseContext, sections, null); + phase.run(); + } finally { + sections.decRef(); + } + searchPhaseContext.assertNoFailure(); + assertNotNull(searchPhaseContext.searchResponse.get()); + } finally { + var resp = searchPhaseContext.searchResponse.get(); + if (resp != null) { + resp.decRef(); } - }; - int numHits = randomIntBetween(0, 10); - SearchHit[] searchHits = new SearchHit[randomIntBetween(0, 10)]; - for (int i = 0; i < searchHits.length; i++) { - searchHits[i] = SearchHitTests.createTestItem(randomBoolean(), randomBoolean()); } - SearchHits hits = new SearchHits(searchHits, new TotalHits(numHits, TotalHits.Relation.EQUAL_TO), 1.0f); - InternalSearchResponse searchResponse = new InternalSearchResponse(hits, null, null, null, false, null, 1); - FetchLookupFieldsPhase phase = new FetchLookupFieldsPhase(searchPhaseContext, searchResponse, null); - phase.run(); - searchPhaseContext.assertNoFailure(); - assertNotNull(searchPhaseContext.searchResponse.get()); } public void testBasic() { MockSearchPhaseContext searchPhaseContext = new MockSearchPhaseContext(1); - final AtomicBoolean requestSent = new AtomicBoolean(); - searchPhaseContext.searchTransport = new SearchTransportService(null, null, null) { - @Override - void sendExecuteMultiSearch( - MultiSearchRequest multiSearchRequest, - SearchTask task, - ActionListener listener - ) { - assertTrue(requestSent.compareAndSet(false, true)); - // send 4 requests for term_1, term_2, term_3, and unknown - assertThat(multiSearchRequest.requests(), hasSize(4)); - for (SearchRequest r : multiSearchRequest.requests()) { - assertNotNull(r.source()); - assertThat(r.source().query(), instanceOf(TermQueryBuilder.class)); - assertThat(r.source().size(), equalTo(1)); - } - final List queryTerms = multiSearchRequest.requests().stream().map(r -> { - final TermQueryBuilder query = (TermQueryBuilder) r.source().query(); - return query.value().toString(); - }).sorted().toList(); - assertThat(queryTerms, equalTo(List.of("term_1", "term_2", "term_3", "xyz"))); - final MultiSearchResponse.Item[] responses = new MultiSearchResponse.Item[multiSearchRequest.requests().size()]; - for (int i = 0; i < responses.length; i++) { - final SearchRequest r = multiSearchRequest.requests().get(i); - final TermQueryBuilder query = (TermQueryBuilder) r.source().query(); - final Map> fields = switch (query.value().toString()) { - case "term_1" -> Map.of("field_a", List.of("a1", "a2"), "field_b", List.of("b2")); - case "term_2" -> Map.of("field_a", List.of("a2", "a3"), "field_b", List.of("b1")); - case "term_3" -> Map.of("field_a", List.of("a2"), "field_b", List.of("b1", "b2")); - case "xyz" -> null; - default -> throw new AssertionError("unknown term value"); - }; - final SearchHits searchHits; - if (fields != null) { - final SearchHit hit = new SearchHit(randomInt(1000)); - fields.forEach((f, values) -> hit.setDocumentField(f, new DocumentField(f, values, List.of()))); - searchHits = new SearchHits(new SearchHit[] { hit }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0f); - } else { - searchHits = new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), 1.0f); + try { + final AtomicBoolean requestSent = new AtomicBoolean(); + searchPhaseContext.searchTransport = new SearchTransportService(null, null, null) { + @Override + void sendExecuteMultiSearch( + MultiSearchRequest multiSearchRequest, + SearchTask task, + ActionListener listener + ) { + assertTrue(requestSent.compareAndSet(false, true)); + // send 4 requests for term_1, term_2, term_3, and unknown + assertThat(multiSearchRequest.requests(), hasSize(4)); + for (SearchRequest r : multiSearchRequest.requests()) { + assertNotNull(r.source()); + assertThat(r.source().query(), instanceOf(TermQueryBuilder.class)); + assertThat(r.source().size(), equalTo(1)); } - InternalSearchResponse internalSearchResponse = new InternalSearchResponse( - searchHits, - null, - null, - null, - false, - null, - 1 - ); - responses[i] = new MultiSearchResponse.Item( - new SearchResponse( - internalSearchResponse, - null, - 1, - 1, - 0, - randomNonNegativeLong(), - ShardSearchFailure.EMPTY_ARRAY, - SearchResponseTests.randomClusters(), + final List queryTerms = multiSearchRequest.requests().stream().map(r -> { + final TermQueryBuilder query = (TermQueryBuilder) r.source().query(); + return query.value().toString(); + }).sorted().toList(); + assertThat(queryTerms, equalTo(List.of("term_1", "term_2", "term_3", "xyz"))); + final MultiSearchResponse.Item[] responses = new MultiSearchResponse.Item[multiSearchRequest.requests().size()]; + for (int i = 0; i < responses.length; i++) { + final SearchRequest r = multiSearchRequest.requests().get(i); + final TermQueryBuilder query = (TermQueryBuilder) r.source().query(); + final Map> fields = switch (query.value().toString()) { + case "term_1" -> Map.of("field_a", List.of("a1", "a2"), "field_b", List.of("b2")); + case "term_2" -> Map.of("field_a", List.of("a2", "a3"), "field_b", List.of("b1")); + case "term_3" -> Map.of("field_a", List.of("a2"), "field_b", List.of("b1", "b2")); + case "xyz" -> null; + default -> throw new AssertionError("unknown term value"); + }; + final SearchHits searchHits; + if (fields != null) { + final SearchHit hit = new SearchHit(randomInt(1000)); + fields.forEach((f, values) -> hit.setDocumentField(f, new DocumentField(f, values, List.of()))); + searchHits = new SearchHits(new SearchHit[] { hit }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0f); + } else { + searchHits = SearchHits.empty(new TotalHits(0, TotalHits.Relation.EQUAL_TO), 1.0f); + } + responses[i] = new MultiSearchResponse.Item( + new SearchResponse( + searchHits, + null, + null, + false, + null, + null, + 1, + null, + 1, + 1, + 0, + randomNonNegativeLong(), + ShardSearchFailure.EMPTY_ARRAY, + SearchResponseTests.randomClusters(), + null + ), null - ), - null - ); + ); + } + ActionListener.respondAndRelease(listener, new MultiSearchResponse(responses, randomNonNegativeLong())); } - ActionListener.respondAndRelease(listener, new MultiSearchResponse(responses, randomNonNegativeLong())); - } - }; + }; - SearchHit leftHit0 = new SearchHit(randomInt(100)); - final List fetchFields = List.of(new FieldAndFormat(randomAlphaOfLength(10), null)); - { - leftHit0.setDocumentField( - "lookup_field_1", - new DocumentField( + SearchHit leftHit0 = new SearchHit(randomInt(100)); + final List fetchFields = List.of(new FieldAndFormat(randomAlphaOfLength(10), null)); + { + leftHit0.setDocumentField( "lookup_field_1", - List.of(), - List.of(), - List.of( - new LookupField("test_index", new TermQueryBuilder("test_field", "term_1"), fetchFields, 1), - new LookupField("test_index", new TermQueryBuilder("test_field", "term_2"), fetchFields, 1) + new DocumentField( + "lookup_field_1", + List.of(), + List.of(), + List.of( + new LookupField("test_index", new TermQueryBuilder("test_field", "term_1"), fetchFields, 1), + new LookupField("test_index", new TermQueryBuilder("test_field", "term_2"), fetchFields, 1) + ) ) - ) - ); - leftHit0.setDocumentField( - "lookup_field_2", - new DocumentField( + ); + leftHit0.setDocumentField( "lookup_field_2", - List.of(), - List.of(), - List.of(new LookupField("test_index", new TermQueryBuilder("test_field", "term_2"), fetchFields, 1)) - ) - ); - } + new DocumentField( + "lookup_field_2", + List.of(), + List.of(), + List.of(new LookupField("test_index", new TermQueryBuilder("test_field", "term_2"), fetchFields, 1)) + ) + ); + } - SearchHit leftHit1 = new SearchHit(randomInt(100)); - { - leftHit1.setDocumentField( - "lookup_field_2", - new DocumentField( + SearchHit leftHit1 = new SearchHit(randomInt(100)); + { + leftHit1.setDocumentField( "lookup_field_2", - List.of(), - List.of(), - List.of( - new LookupField("test_index", new TermQueryBuilder("test_field", "term_2"), fetchFields, 1), - new LookupField("test_index", new TermQueryBuilder("test_field", "xyz"), fetchFields, 1) + new DocumentField( + "lookup_field_2", + List.of(), + List.of(), + List.of( + new LookupField("test_index", new TermQueryBuilder("test_field", "term_2"), fetchFields, 1), + new LookupField("test_index", new TermQueryBuilder("test_field", "xyz"), fetchFields, 1) + ) ) - ) - ); - leftHit1.setDocumentField( - "lookup_field_3", - new DocumentField( + ); + leftHit1.setDocumentField( "lookup_field_3", - List.of(), - List.of(), - List.of(new LookupField("test_index", new TermQueryBuilder("test_field", "term_3"), fetchFields, 1)) + new DocumentField( + "lookup_field_3", + List.of(), + List.of(), + List.of(new LookupField("test_index", new TermQueryBuilder("test_field", "term_3"), fetchFields, 1)) + ) + ); + } + SearchHits searchHits = new SearchHits( + new SearchHit[] { leftHit0, leftHit1 }, + new TotalHits(2, TotalHits.Relation.EQUAL_TO), + 1.0f + ); + var sections = new SearchResponseSections(searchHits, null, null, false, null, null, 1); + try { + FetchLookupFieldsPhase phase = new FetchLookupFieldsPhase(searchPhaseContext, sections, null); + phase.run(); + } finally { + sections.decRef(); + } + assertTrue(requestSent.get()); + searchPhaseContext.assertNoFailure(); + assertNotNull(searchPhaseContext.searchResponse.get()); + assertSame(searchPhaseContext.searchResponse.get().getHits().getHits()[0], leftHit0); + assertSame(searchPhaseContext.searchResponse.get().getHits().getHits()[1], leftHit1); + assertFalse(leftHit0.hasLookupFields()); + assertThat( + leftHit0.field("lookup_field_1").getValues(), + containsInAnyOrder( + Map.of("field_a", List.of("a1", "a2"), "field_b", List.of("b2")), + Map.of("field_a", List.of("a2", "a3"), "field_b", List.of("b1")) ) ); - } - SearchHits searchHits = new SearchHits(new SearchHit[] { leftHit0, leftHit1 }, new TotalHits(2, TotalHits.Relation.EQUAL_TO), 1.0f); - InternalSearchResponse searchResponse = new InternalSearchResponse(searchHits, null, null, null, false, null, 1); - FetchLookupFieldsPhase phase = new FetchLookupFieldsPhase(searchPhaseContext, searchResponse, null); - phase.run(); - assertTrue(requestSent.get()); - searchPhaseContext.assertNoFailure(); - assertNotNull(searchPhaseContext.searchResponse.get()); - assertSame(searchPhaseContext.searchResponse.get().getHits().getHits()[0], leftHit0); - assertSame(searchPhaseContext.searchResponse.get().getHits().getHits()[1], leftHit1); - assertFalse(leftHit0.hasLookupFields()); - assertThat( - leftHit0.field("lookup_field_1").getValues(), - containsInAnyOrder( - Map.of("field_a", List.of("a1", "a2"), "field_b", List.of("b2")), - Map.of("field_a", List.of("a2", "a3"), "field_b", List.of("b1")) - ) - ); - assertThat( - leftHit0.field("lookup_field_2").getValues(), - contains(Map.of("field_a", List.of("a2", "a3"), "field_b", List.of("b1"))) - ); + assertThat( + leftHit0.field("lookup_field_2").getValues(), + contains(Map.of("field_a", List.of("a2", "a3"), "field_b", List.of("b1"))) + ); - assertFalse(leftHit1.hasLookupFields()); - assertThat( - leftHit1.field("lookup_field_2").getValues(), - contains(Map.of("field_a", List.of("a2", "a3"), "field_b", List.of("b1"))) - ); - assertThat( - leftHit1.field("lookup_field_3").getValues(), - contains(Map.of("field_a", List.of("a2"), "field_b", List.of("b1", "b2"))) - ); + assertFalse(leftHit1.hasLookupFields()); + assertThat( + leftHit1.field("lookup_field_2").getValues(), + contains(Map.of("field_a", List.of("a2", "a3"), "field_b", List.of("b1"))) + ); + assertThat( + leftHit1.field("lookup_field_3").getValues(), + contains(Map.of("field_a", List.of("a2"), "field_b", List.of("b1", "b2"))) + ); + } finally { + var resp = searchPhaseContext.searchResponse.get(); + if (resp != null) { + resp.decRef(); + } + } } } diff --git a/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java index 24b2610c8d190..4594810da575a 100644 --- a/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java @@ -122,6 +122,10 @@ public void run() { assertProfiles(profiled, 1, searchResponse); assertTrue(mockSearchPhaseContext.releasedSearchContexts.isEmpty()); } finally { + var resp = mockSearchPhaseContext.searchResponse.get(); + if (resp != null) { + resp.decRef(); + } results.decRef(); } } @@ -250,6 +254,10 @@ public void run() { assertProfiles(profiled, 2, searchResponse); assertTrue(mockSearchPhaseContext.releasedSearchContexts.isEmpty()); } finally { + var resp = mockSearchPhaseContext.searchResponse.get(); + if (resp != null) { + resp.decRef(); + } results.decRef(); } } @@ -374,6 +382,10 @@ public void run() { } assertTrue(mockSearchPhaseContext.releasedSearchContexts.contains(ctx)); } finally { + var resp = mockSearchPhaseContext.searchResponse.get(); + if (resp != null) { + resp.decRef(); + } results.decRef(); } } @@ -489,6 +501,10 @@ public void run() { mockSearchPhaseContext.releasedSearchContexts.size() ); } finally { + var resp = mockSearchPhaseContext.searchResponse.get(); + if (resp != null) { + resp.decRef(); + } results.decRef(); } } @@ -600,6 +616,10 @@ public void run() { assertThat(mockSearchPhaseContext.searchResponse.get().getShardFailures(), arrayWithSize(1)); assertThat(mockSearchPhaseContext.releasedSearchContexts, hasSize(1)); } finally { + var resp = mockSearchPhaseContext.searchResponse.get(); + if (resp != null) { + resp.decRef(); + } results.decRef(); } } @@ -716,6 +736,10 @@ public void run() { assertEquals(1, mockSearchPhaseContext.releasedSearchContexts.size()); assertTrue(mockSearchPhaseContext.releasedSearchContexts.contains(ctx1)); } finally { + var resp = mockSearchPhaseContext.searchResponse.get(); + if (resp != null) { + resp.decRef(); + } results.decRef(); } diff --git a/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java b/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java index 71156517b0306..1a510058e3bbd 100644 --- a/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java +++ b/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java @@ -16,7 +16,6 @@ import org.elasticsearch.core.Releasables; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.transport.Transport; @@ -83,10 +82,10 @@ public OriginalIndices getOriginalIndices(int shardIndex) { } @Override - public void sendSearchResponse(InternalSearchResponse internalSearchResponse, AtomicArray queryResults) { + public void sendSearchResponse(SearchResponseSections internalSearchResponse, AtomicArray queryResults) { String scrollId = getRequest().scroll() != null ? TransportSearchHelper.buildScrollId(queryResults) : null; String searchContextId = getRequest().pointInTimeBuilder() != null ? TransportSearchHelper.buildScrollId(queryResults) : null; - searchResponse.set( + var existing = searchResponse.getAndSet( new SearchResponse( internalSearchResponse, scrollId, @@ -99,6 +98,9 @@ public void sendSearchResponse(InternalSearchResponse internalSearchResponse, At searchContextId ) ); + if (existing != null) { + existing.decRef(); + } } @Override diff --git a/server/src/test/java/org/elasticsearch/action/search/MultiSearchActionTookTests.java b/server/src/test/java/org/elasticsearch/action/search/MultiSearchActionTookTests.java index 9b1ed6eee1028..f682e75b89a07 100644 --- a/server/src/test/java/org/elasticsearch/action/search/MultiSearchActionTookTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/MultiSearchActionTookTests.java @@ -19,7 +19,7 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AtomicArray; -import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; @@ -149,8 +149,7 @@ public void search(final SearchRequest request, final ActionListener> mutators = new ArrayList<>(); diff --git a/server/src/test/java/org/elasticsearch/action/search/MultiSearchResponseTests.java b/server/src/test/java/org/elasticsearch/action/search/MultiSearchResponseTests.java index 2ad5d17770687..1de057f0446ee 100644 --- a/server/src/test/java/org/elasticsearch/action/search/MultiSearchResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/MultiSearchResponseTests.java @@ -9,7 +9,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.core.RefCounted; -import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentParser; @@ -39,17 +39,14 @@ private MultiSearchResponse createTestInstance() { int successfulShards = randomIntBetween(0, totalShards); int skippedShards = totalShards - successfulShards; SearchResponse.Clusters clusters = SearchResponseTests.randomSimpleClusters(); - InternalSearchResponse internalSearchResponse = InternalSearchResponse.EMPTY_WITH_TOTAL_HITS; - SearchResponse searchResponse = new SearchResponse( - internalSearchResponse, + SearchResponse searchResponse = SearchResponseUtils.emptyWithTotalHits( null, totalShards, successfulShards, skippedShards, tookInMillis, ShardSearchFailure.EMPTY_ARRAY, - clusters, - null + clusters ); items[i] = new MultiSearchResponse.Item(searchResponse, null); } @@ -67,17 +64,14 @@ private static MultiSearchResponse createTestInstanceWithFailures() { int successfulShards = randomIntBetween(0, totalShards); int skippedShards = totalShards - successfulShards; SearchResponse.Clusters clusters = SearchResponseTests.randomSimpleClusters(); - InternalSearchResponse internalSearchResponse = InternalSearchResponse.EMPTY_WITH_TOTAL_HITS; - SearchResponse searchResponse = new SearchResponse( - internalSearchResponse, + SearchResponse searchResponse = SearchResponseUtils.emptyWithTotalHits( null, totalShards, successfulShards, skippedShards, tookInMillis, ShardSearchFailure.EMPTY_ARRAY, - clusters, - null + clusters ); items[i] = new MultiSearchResponse.Item(searchResponse, null); } else { diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java index a02eddf039e46..d6b1bd8057708 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java @@ -21,10 +21,10 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.internal.AliasFilter; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.Transport; @@ -152,7 +152,7 @@ public void run() { assertTrue(searchPhaseDidRun.get()); assertEquals(shardsIter.size() - numSkipped, numRequests.get()); - asyncAction.sendSearchResponse(null, null); + asyncAction.sendSearchResponse(SearchResponseSections.EMPTY_WITH_TOTAL_HITS, null); assertNotNull(searchResponse.get()); assertEquals(0, searchResponse.get().getFailedShards()); assertEquals(numSkipped, searchResponse.get().getSkippedShards()); @@ -695,7 +695,7 @@ public void run() { assertThat(latch.await(4, TimeUnit.SECONDS), equalTo(true)); assertThat(searchPhaseDidRun.get(), equalTo(true)); - asyncAction.sendSearchResponse(null, null); + asyncAction.sendSearchResponse(SearchResponseSections.EMPTY_WITH_TOTAL_HITS, null); assertNotNull(searchResponse.get()); assertThat(searchResponse.get().getSkippedShards(), equalTo(numUnavailableSkippedShards)); assertThat(searchResponse.get().getFailedShards(), equalTo(0)); @@ -772,7 +772,23 @@ public static class TestSearchResponse extends SearchResponse { final Set queried = new HashSet<>(); TestSearchResponse() { - super(InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, null, 0, 0, 0, 0L, ShardSearchFailure.EMPTY_ARRAY, Clusters.EMPTY, null); + super( + SearchHits.EMPTY_WITH_TOTAL_HITS, + null, + null, + false, + null, + null, + 1, + null, + 0, + 0, + 0, + 0L, + ShardSearchFailure.EMPTY_ARRAY, + Clusters.EMPTY, + null + ); } } diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchContextIdTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchContextIdTests.java index 90ac90738837d..32091780484fa 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchContextIdTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchContextIdTests.java @@ -85,5 +85,11 @@ public void testEncode() { assertThat(node3.getNode(), equalTo("node_3")); assertThat(node3.getSearchContextId().getId(), equalTo(42L)); assertThat(node3.getSearchContextId().getSessionId(), equalTo("c")); + + final String[] indices = SearchContextId.decodeIndices(id); + assertThat(indices.length, equalTo(3)); + assertThat(indices[0], equalTo("cluster_x:idx")); + assertThat(indices[1], equalTo("cluster_y:idy")); + assertThat(indices[2], equalTo("idy")); } } diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java index cd86a2e4f55d6..ac88f999adef6 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java @@ -50,7 +50,6 @@ import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.fetch.FetchSearchResult; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.profile.ProfileResult; @@ -293,8 +292,8 @@ public void testMerge() { reducedQueryPhase.suggest(), profile ); + final SearchResponseSections mergedResponse = SearchPhaseController.merge(false, reducedQueryPhase, fetchResults); try { - InternalSearchResponse mergedResponse = SearchPhaseController.merge(false, reducedQueryPhase, fetchResults); if (trackTotalHits == SearchContext.TRACK_TOTAL_HITS_DISABLED) { assertNull(mergedResponse.hits.getTotalHits()); } else { @@ -347,6 +346,7 @@ public void testMerge() { assertThat(mergedResponse.profile(), is(anEmptyMap())); } } finally { + mergedResponse.decRef(); fetchResults.asList().forEach(TransportMessage::decRef); } } finally { @@ -411,8 +411,8 @@ protected boolean lessThan(RankDoc a, RankDoc b) { reducedQueryPhase.suggest(), false ); + SearchResponseSections mergedResponse = SearchPhaseController.merge(false, reducedQueryPhase, fetchResults); try { - InternalSearchResponse mergedResponse = SearchPhaseController.merge(false, reducedQueryPhase, fetchResults); if (trackTotalHits == SearchContext.TRACK_TOTAL_HITS_DISABLED) { assertNull(mergedResponse.hits.getTotalHits()); } else { @@ -428,6 +428,7 @@ protected boolean lessThan(RankDoc a, RankDoc b) { assertThat(mergedResponse.hits().getHits().length, equalTo(reducedQueryPhase.sortedTopDocs().scoreDocs().length)); assertThat(mergedResponse.profile(), is(anEmptyMap())); } finally { + mergedResponse.decRef(); fetchResults.asList().forEach(TransportMessage::decRef); } } finally { @@ -578,7 +579,7 @@ private static AtomicArray generateFetchResults( } } } - SearchHit[] hits = searchHits.toArray(new SearchHit[0]); + SearchHit[] hits = searchHits.toArray(SearchHits.EMPTY); ProfileResult profileResult = profile && searchHits.size() > 0 ? new ProfileResult("fetch", "fetch", Map.of(), Map.of(), randomNonNegativeLong(), List.of()) : null; diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java index 0c8496081ff19..8c0ffeabf0ea6 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java @@ -457,6 +457,42 @@ public void testValidate() throws IOException { assertEquals(1, validationErrors.validationErrors().size()); assertEquals("[rank] requires [explain] is [false]", validationErrors.validationErrors().get(0)); } + { + SearchRequest searchRequest = new SearchRequest("test").source( + new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder("")) + ); + ActionRequestValidationException validationErrors = searchRequest.validate(); + assertNotNull(validationErrors); + assertEquals(1, validationErrors.validationErrors().size()); + assertEquals( + "[indices] cannot be used with point in time. Do not specify any index with point in time.", + validationErrors.validationErrors().get(0) + ); + } + { + SearchRequest searchRequest = new SearchRequest().indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED) + .source(new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(""))); + ActionRequestValidationException validationErrors = searchRequest.validate(); + assertNotNull(validationErrors); + assertEquals(1, validationErrors.validationErrors().size()); + assertEquals("[indicesOptions] cannot be used with point in time", validationErrors.validationErrors().get(0)); + } + { + SearchRequest searchRequest = new SearchRequest().routing("route1") + .source(new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(""))); + ActionRequestValidationException validationErrors = searchRequest.validate(); + assertNotNull(validationErrors); + assertEquals(1, validationErrors.validationErrors().size()); + assertEquals("[routing] cannot be used with point in time", validationErrors.validationErrors().get(0)); + } + { + SearchRequest searchRequest = new SearchRequest().preference("pref1") + .source(new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(""))); + ActionRequestValidationException validationErrors = searchRequest.validate(); + assertNotNull(validationErrors); + assertEquals(1, validationErrors.validationErrors().size()); + assertEquals("[preference] cannot be used with point in time", validationErrors.validationErrors().get(0)); + } } public void testCopyConstructor() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java index e57b204df0836..e81d7a2246e03 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.InternalAggregations; @@ -29,7 +30,6 @@ import org.elasticsearch.search.aggregations.bucket.range.Range; import org.elasticsearch.search.aggregations.metrics.Max; import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.profile.SearchProfileResults; import org.elasticsearch.search.profile.SearchProfileResultsTests; @@ -108,8 +108,7 @@ public void testMergeTookInMillis() throws InterruptedException { ) ) { for (int i = 0; i < numResponses; i++) { - SearchResponse searchResponse = new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, + SearchResponse searchResponse = SearchResponseUtils.emptyWithTotalHits( null, 1, 1, @@ -169,8 +168,7 @@ public void testMergeShardFailures() throws InterruptedException { shardSearchFailures[j] = failure; priorityQueue.add(Tuple.tuple(searchShardTarget, failure)); } - SearchResponse searchResponse = new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, + SearchResponse searchResponse = SearchResponseUtils.emptyWithTotalHits( null, 1, 1, @@ -231,8 +229,7 @@ public void testMergeShardFailuresNullShardTarget() throws InterruptedException shardSearchFailures[j] = failure; priorityQueue.add(Tuple.tuple(shardId, failure)); } - SearchResponse searchResponse = new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, + SearchResponse searchResponse = SearchResponseUtils.emptyWithTotalHits( null, 1, 1, @@ -291,8 +288,7 @@ public void testMergeShardFailuresNullShardId() throws InterruptedException { shardSearchFailures[j] = shardSearchFailure; expectedFailures.add(shardSearchFailure); } - SearchResponse searchResponse = new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, + SearchResponse searchResponse = SearchResponseUtils.emptyWithTotalHits( null, 1, 1, @@ -334,10 +330,14 @@ public void testMergeProfileResults() throws InterruptedException { for (int i = 0; i < numResponses; i++) { SearchProfileResults profile = SearchProfileResultsTests.createTestItem(); expectedProfile.putAll(profile.getShardResults()); - SearchHits searchHits = new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), Float.NaN); - InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchHits, null, null, profile, false, null, 1); SearchResponse searchResponse = new SearchResponse( - internalSearchResponse, + SearchHits.empty(new TotalHits(0, TotalHits.Relation.EQUAL_TO), Float.NaN), + null, + null, + false, + null, + profile, + 1, null, 1, 1, @@ -407,10 +407,15 @@ public void testMergeCompletionSuggestions() throws InterruptedException { completionSuggestion.addTerm(options); suggestions.add(completionSuggestion); Suggest suggest = new Suggest(suggestions); - SearchHits searchHits = new SearchHits(new SearchHit[0], null, Float.NaN); - InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchHits, null, suggest, null, false, null, 1); + SearchHits searchHits = SearchHits.empty(null, Float.NaN); SearchResponse searchResponse = new SearchResponse( - internalSearchResponse, + searchHits, + null, + suggest, + false, + null, + null, + 1, null, 1, 1, @@ -488,10 +493,14 @@ public void testMergeCompletionSuggestionsTieBreak() throws InterruptedException completionSuggestion.addTerm(options); suggestions.add(completionSuggestion); Suggest suggest = new Suggest(suggestions); - SearchHits searchHits = new SearchHits(new SearchHit[0], null, Float.NaN); - InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchHits, null, suggest, null, false, null, 1); SearchResponse searchResponse = new SearchResponse( - internalSearchResponse, + SearchHits.empty(null, Float.NaN), + null, + suggest, + false, + null, + null, + 1, null, 1, 1, @@ -554,7 +563,6 @@ public void testMergeEmptyFormat() throws InterruptedException { Collections.emptyMap() ); - SearchHits searchHits = new SearchHits(new SearchHit[0], null, Float.NaN); try ( SearchResponseMerger searchResponseMerger = new SearchResponseMerger( 0, @@ -566,9 +574,14 @@ public void testMergeEmptyFormat() throws InterruptedException { ) { for (Max max : Arrays.asList(max1, max2)) { InternalAggregations aggs = InternalAggregations.from(Arrays.asList(max)); - InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchHits, aggs, null, null, false, null, 1); SearchResponse searchResponse = new SearchResponse( - internalSearchResponse, + SearchHits.empty(null, Float.NaN), + aggs, + null, + false, + null, + null, + 1, null, 1, 1, @@ -629,10 +642,14 @@ public void testMergeAggs() throws InterruptedException { ); InternalDateRange range = factory.create(rangeAggName, singletonList(bucket), DocValueFormat.RAW, false, emptyMap()); InternalAggregations aggs = InternalAggregations.from(Arrays.asList(range, max)); - SearchHits searchHits = new SearchHits(new SearchHit[0], null, Float.NaN); - InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchHits, aggs, null, null, false, null, 1); SearchResponse searchResponse = new SearchResponse( - internalSearchResponse, + SearchHits.empty(null, Float.NaN), + aggs, + null, + false, + null, + null, + 1, null, 1, 1, @@ -787,18 +804,14 @@ public void testMergeSearchHits() throws InterruptedException { Boolean terminatedEarly = frequently() ? null : true; expectedTerminatedEarly = expectedTerminatedEarly == null ? terminatedEarly : expectedTerminatedEarly; - InternalSearchResponse internalSearchResponse = new InternalSearchResponse( + SearchResponse searchResponse = new SearchResponse( searchHits, null, null, - null, timedOut, terminatedEarly, - numReducePhases - ); - - SearchResponse searchResponse = new SearchResponse( - internalSearchResponse, + null, + numReducePhases, null, total, successful, @@ -937,9 +950,14 @@ public void testMergeEmptySearchHitsWithNonEmpty() { null, null ); - InternalSearchResponse response = new InternalSearchResponse(searchHits, null, null, null, false, false, 1); SearchResponse searchResponse = new SearchResponse( - response, + searchHits, + null, + null, + false, + false, + null, + 1, null, 1, 1, @@ -955,17 +973,14 @@ public void testMergeEmptySearchHitsWithNonEmpty() { } } { - SearchHits empty = new SearchHits( - new SearchHit[0], - new TotalHits(0, TotalHits.Relation.EQUAL_TO), - Float.NaN, + SearchResponse searchResponse = new SearchResponse( + SearchHits.empty(new TotalHits(0, TotalHits.Relation.EQUAL_TO), Float.NaN), null, null, - null - ); - InternalSearchResponse response = new InternalSearchResponse(empty, null, null, null, false, false, 1); - SearchResponse searchResponse = new SearchResponse( - response, + false, + false, + null, + 1, null, 1, 1, @@ -1014,10 +1029,14 @@ public void testMergeOnlyEmptyHits() { long previousValue = expectedTotalHits == null ? 0 : expectedTotalHits.value; expectedTotalHits = new TotalHits(Math.min(previousValue + totalHits.value, trackTotalHitsUpTo), totalHitsRelation); } - SearchHits empty = new SearchHits(new SearchHit[0], totalHits, Float.NaN, null, null, null); - InternalSearchResponse response = new InternalSearchResponse(empty, null, null, null, false, false, 1); SearchResponse searchResponse = new SearchResponse( - response, + SearchHits.empty(totalHits, Float.NaN), + null, + null, + false, + false, + null, + 1, null, 1, 1, diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java index b45a04922c187..ef759279e095f 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java @@ -25,9 +25,9 @@ import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchHitsTests; import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.search.aggregations.AggregationsTests; import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.profile.SearchProfileResults; import org.elasticsearch.search.profile.SearchProfileResultsTests; import org.elasticsearch.search.suggest.Suggest; @@ -107,42 +107,44 @@ private SearchResponse createTestItem(boolean minimal, ShardSearchFailure... sha int totalShards = randomIntBetween(1, Integer.MAX_VALUE); int successfulShards = randomIntBetween(0, totalShards); int skippedShards = randomIntBetween(0, totalShards); - InternalSearchResponse internalSearchResponse; + SearchResponse.Clusters clusters; + if (minimal) { + clusters = randomSimpleClusters(); + } else { + clusters = randomClusters(); + } if (minimal == false) { SearchHits hits = SearchHitsTests.createTestItem(true, true); InternalAggregations aggregations = aggregationsTests.createTestInstance(); Suggest suggest = SuggestTests.createTestItem(); SearchProfileResults profileResults = SearchProfileResultsTests.createTestItem(); - internalSearchResponse = new InternalSearchResponse( + return new SearchResponse( hits, aggregations, suggest, - profileResults, timedOut, terminatedEarly, - numReducePhases + profileResults, + numReducePhases, + null, + totalShards, + successfulShards, + skippedShards, + tookInMillis, + shardSearchFailures, + clusters ); } else { - internalSearchResponse = InternalSearchResponse.EMPTY_WITH_TOTAL_HITS; - } - - SearchResponse.Clusters clusters; - if (minimal) { - clusters = randomSimpleClusters(); - } else { - clusters = randomClusters(); + return SearchResponseUtils.emptyWithTotalHits( + null, + totalShards, + successfulShards, + skippedShards, + tookInMillis, + shardSearchFailures, + clusters + ); } - - return new SearchResponse( - internalSearchResponse, - null, - totalShards, - successfulShards, - skippedShards, - tookInMillis, - shardSearchFailures, - clusters - ); } /** @@ -381,15 +383,13 @@ public void testToXContent() throws IOException { SearchHit[] hits = new SearchHit[] { hit }; { SearchResponse response = new SearchResponse( - new InternalSearchResponse( - new SearchHits(hits, new TotalHits(100, TotalHits.Relation.EQUAL_TO), 1.5f), - null, - null, - null, - false, - null, - 1 - ), + new SearchHits(hits, new TotalHits(100, TotalHits.Relation.EQUAL_TO), 1.5f), + null, + null, + false, + null, + null, + 1, null, 0, 0, @@ -425,15 +425,13 @@ public void testToXContent() throws IOException { } { SearchResponse response = new SearchResponse( - new InternalSearchResponse( - new SearchHits(hits, new TotalHits(100, TotalHits.Relation.EQUAL_TO), 1.5f), - null, - null, - null, - false, - null, - 1 - ), + new SearchHits(hits, new TotalHits(100, TotalHits.Relation.EQUAL_TO), 1.5f), + null, + null, + false, + null, + null, + 1, null, 0, 0, @@ -477,15 +475,13 @@ public void testToXContent() throws IOException { } { SearchResponse response = new SearchResponse( - new InternalSearchResponse( - new SearchHits(hits, new TotalHits(100, TotalHits.Relation.EQUAL_TO), 1.5f), - null, - null, - null, - false, - null, - 1 - ), + new SearchHits(hits, new TotalHits(100, TotalHits.Relation.EQUAL_TO), 1.5f), + null, + null, + false, + null, + null, + 1, null, 20, 9, @@ -654,8 +650,7 @@ public void testSerialization() throws IOException { } public void testToXContentEmptyClusters() throws IOException { - SearchResponse searchResponse = new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, + SearchResponse searchResponse = SearchResponseUtils.emptyWithTotalHits( null, 1, 1, diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java index 1097174628e58..fb27d824417b1 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java @@ -23,7 +23,7 @@ import org.elasticsearch.common.Randomness; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -166,8 +166,7 @@ public void search(final SearchRequest request, final ActionListener { counter.decrementAndGet(); - var response = new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, + var response = SearchResponseUtils.emptyWithTotalHits( null, 0, 0, diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java index 6230a24a0768f..fea6e39ea881b 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java @@ -61,7 +61,6 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.DummyQueryBuilder; import org.elasticsearch.search.Scroll; -import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.SearchShardTarget; @@ -69,7 +68,6 @@ import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.collapse.CollapseBuilder; import org.elasticsearch.search.internal.AliasFilter; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.sort.SortBuilders; @@ -77,6 +75,7 @@ import org.elasticsearch.search.suggest.term.TermSuggestionBuilder; import org.elasticsearch.search.vectors.KnnSearchBuilder; import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.telemetry.TelemetryProvider; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.TransportVersionUtils; import org.elasticsearch.test.transport.MockTransportService; @@ -479,19 +478,6 @@ private MockTransportService[] startTransport( return mockTransportServices; } - private static SearchResponse emptySearchResponse() { - InternalSearchResponse response = new InternalSearchResponse( - new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), Float.NaN), - InternalAggregations.EMPTY, - null, - null, - false, - null, - 1 - ); - return new SearchResponse(response, null, 1, 1, 0, 100, ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY, null); - } - public void testCCSRemoteReduceMergeFails() throws Exception { int numClusters = randomIntBetween(2, 10); DiscoveryNode[] nodes = new DiscoveryNode[numClusters]; @@ -869,12 +855,26 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti } private static void resolveWithEmptySearchResponse(Tuple> tuple) { - var resp = emptySearchResponse(); - try { - tuple.v2().onResponse(resp); - } finally { - resp.decRef(); - } + ActionListener.respondAndRelease( + tuple.v2(), + new SearchResponse( + SearchHits.empty(new TotalHits(0, TotalHits.Relation.EQUAL_TO), Float.NaN), + InternalAggregations.EMPTY, + null, + false, + null, + null, + 1, + null, + 1, + 1, + 0, + 100, + ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY, + null + ) + ); } public void testCollectSearchShards() throws Exception { @@ -1594,7 +1594,8 @@ protected void doWriteTo(StreamOutput out) throws IOException { actionFilters, null, null, - null + null, + new SearchTransportAPMMetrics(TelemetryProvider.NOOP.getMeterRegistry()) ); CountDownLatch latch = new CountDownLatch(1); diff --git a/server/src/test/java/org/elasticsearch/action/support/PlainActionFutureTests.java b/server/src/test/java/org/elasticsearch/action/support/PlainActionFutureTests.java index abc482a34a070..2ca914eb23c61 100644 --- a/server/src/test/java/org/elasticsearch/action/support/PlainActionFutureTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/PlainActionFutureTests.java @@ -34,13 +34,11 @@ public void onResponse(Object value) { // test all possible methods that can be interrupted final Runnable runnable = () -> { - final int method = randomIntBetween(0, 4); + final int method = randomIntBetween(0, 2); switch (method) { case 0 -> future.actionGet(); - case 1 -> future.actionGet("30s"); - case 2 -> future.actionGet(30000); - case 3 -> future.actionGet(TimeValue.timeValueSeconds(30)); - case 4 -> future.actionGet(30, TimeUnit.SECONDS); + case 1 -> future.actionGet(TimeValue.timeValueSeconds(30)); + case 2 -> future.actionGet(30, TimeUnit.SECONDS); default -> throw new AssertionError(method); } }; diff --git a/server/src/test/java/org/elasticsearch/action/support/SubscribableListenerTests.java b/server/src/test/java/org/elasticsearch/action/support/SubscribableListenerTests.java index d2acf8f397f2f..d784e2ac040a1 100644 --- a/server/src/test/java/org/elasticsearch/action/support/SubscribableListenerTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/SubscribableListenerTests.java @@ -409,6 +409,26 @@ public void testAndThenSuccess() { assertFalse(chainedListener.isDone()); } + public void testAndThenThrowException() { + final var initialListener = new SubscribableListener<>(); + final var forked = new AtomicReference>(); + final var result = new AtomicReference<>(); + + final var chainedListener = initialListener.andThen((l, o) -> { + forked.set(l); + result.set(o); + throw new ElasticsearchException("simulated"); + }); + assertNull(forked.get()); + assertNull(result.get()); + + final var o1 = new Object(); + initialListener.onResponse(o1); + assertSame(o1, result.get()); + assertSame(chainedListener, forked.get()); + assertComplete(chainedListener, "simulated"); + } + public void testAndThenFailure() { final var initialListener = new SubscribableListener<>(); @@ -488,7 +508,7 @@ private static void runAndThenThreadingTest(boolean testSuccess) { assertTrue(isComplete.get()); } - private static void assertComplete(SubscribableListener listener, @Nullable String expectedFailureMessage) { + private static void assertComplete(SubscribableListener listener, @Nullable String expectedFailureMessage) { assertTrue(listener.isDone()); if (expectedFailureMessage == null) { try { @@ -500,4 +520,88 @@ private static void assertComplete(SubscribableListener listener, @Nulla assertEquals(expectedFailureMessage, expectThrows(ElasticsearchException.class, listener::rawResult).getMessage()); } } + + public void testAndThenApplySuccess() throws Exception { + final var initialListener = new SubscribableListener<>(); + final var result = new AtomicReference<>(); + + final var oResult = new Object(); + final var chainedListener = initialListener.andThenApply(o -> { + result.set(o); + return oResult; + }); + assertNull(result.get()); + + final var o1 = new Object(); + initialListener.onResponse(o1); + assertSame(o1, result.get()); + assertTrue(chainedListener.isDone()); + assertSame(oResult, chainedListener.rawResult()); + } + + public void testAndThenApplyThrowException() { + final var initialListener = new SubscribableListener<>(); + final var result = new AtomicReference<>(); + + final var chainedListener = initialListener.andThenApply(o -> { + result.set(o); + throw new ElasticsearchException("simulated exception in fn"); + }); + assertNull(result.get()); + + final var o1 = new Object(); + initialListener.onResponse(o1); + assertSame(o1, result.get()); + assertComplete(chainedListener, "simulated exception in fn"); + } + + public void testAndThenApplyFailure() { + final var initialListener = new SubscribableListener<>(); + + final var chainedListener = initialListener.andThenApply(o -> fail(null, "should not be called")); + assertFalse(chainedListener.isDone()); + + initialListener.onFailure(new ElasticsearchException("simulated")); + assertComplete(chainedListener, "simulated"); + } + + public void testAndThenAcceptSuccess() throws Exception { + final var initialListener = new SubscribableListener<>(); + final var result = new AtomicReference<>(); + + final var chainedListener = initialListener.andThenAccept(result::set); + assertNull(result.get()); + + final var o1 = new Object(); + initialListener.onResponse(o1); + assertSame(o1, result.get()); + assertTrue(chainedListener.isDone()); + assertNull(chainedListener.rawResult()); + } + + public void testAndThenAcceptThrowException() { + final var initialListener = new SubscribableListener<>(); + final var result = new AtomicReference<>(); + + final var chainedListener = initialListener.andThenAccept(o -> { + result.set(o); + throw new ElasticsearchException("simulated exception in fn"); + }); + assertNull(result.get()); + + final var o1 = new Object(); + initialListener.onResponse(o1); + assertSame(o1, result.get()); + assertComplete(chainedListener, "simulated exception in fn"); + } + + public void testAndThenAcceptFailure() { + final var initialListener = new SubscribableListener<>(); + + final var chainedListener = initialListener.andThenAccept(o -> fail(null, "should not be called")); + assertFalse(chainedListener.isDone()); + + initialListener.onFailure(new ElasticsearchException("simulated")); + assertComplete(chainedListener, "simulated"); + } } diff --git a/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainRefCountingTests.java b/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainRefCountingTests.java new file mode 100644 index 0000000000000..8062bfea5a637 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainRefCountingTests.java @@ -0,0 +1,207 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.support; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.AbstractRefCounted; +import org.elasticsearch.core.RefCounted; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.LeakTracker; +import org.elasticsearch.transport.TransportService; + +import java.util.Collection; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.CountDownLatch; + +public class TransportActionFilterChainRefCountingTests extends ESSingleNodeTestCase { + + @Override + protected Collection> getPlugins() { + return List.of(TestPlugin.class); + } + + static final ActionType TYPE = ActionType.localOnly("test:action"); + + public void testAsyncActionFilterRefCounting() { + final var countDownLatch = new CountDownLatch(2); + final var request = new Request(); + try { + client().execute(TYPE, request, ActionListener.running(countDownLatch::countDown).delegateResponse((delegate, e) -> { + // _If_ we got an exception then it must be an ElasticsearchException with message "short-circuit failure", i.e. we're + // checking that nothing else can go wrong here. But it's also ok for everything to succeed too, in which case we countDown + // the latch without running this block. + assertEquals("short-circuit failure", asInstanceOf(ElasticsearchException.class, e).getMessage()); + delegate.onResponse(null); + })); + } finally { + request.decRef(); + } + request.addCloseListener(ActionListener.running(countDownLatch::countDown)); + safeAwait(countDownLatch); + } + + public static class TestPlugin extends Plugin implements ActionPlugin { + + private ThreadPool threadPool; + + @Override + public Collection createComponents(PluginServices services) { + threadPool = services.threadPool(); + return List.of(); + } + + @Override + public List> getActions() { + return List.of(new ActionHandler<>(TYPE, TestAction.class)); + } + + @Override + public List getActionFilters() { + return randomSubsetOf( + List.of( + new TestAsyncActionFilter(threadPool), + new TestAsyncActionFilter(threadPool), + new TestAsyncMappedActionFilter(threadPool), + new TestAsyncMappedActionFilter(threadPool) + ) + ); + } + } + + private static class TestAsyncActionFilter implements ActionFilter { + + private final ThreadPool threadPool; + private final int order = randomInt(); + + private TestAsyncActionFilter(ThreadPool threadPool) { + this.threadPool = Objects.requireNonNull(threadPool); + } + + @Override + public int order() { + return order; + } + + @Override + public void apply( + Task task, + String action, + Req request, + ActionListener listener, + ActionFilterChain chain + ) { + if (action.equals(TYPE.name())) { + randomFrom(EsExecutors.DIRECT_EXECUTOR_SERVICE, threadPool.generic()).execute(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + fail(e); + } + + @Override + protected void doRun() { + assertTrue(request.hasReferences()); + if (randomBoolean()) { + chain.proceed(task, action, request, listener); + } else { + listener.onFailure(new ElasticsearchException("short-circuit failure")); + } + } + }); + } else { + chain.proceed(task, action, request, listener); + } + } + } + + private static class TestAsyncMappedActionFilter extends TestAsyncActionFilter implements MappedActionFilter { + + private TestAsyncMappedActionFilter(ThreadPool threadPool) { + super(threadPool); + } + + @Override + public String actionName() { + return TYPE.name(); + } + } + + public static class TestAction extends TransportAction { + + private final ThreadPool threadPool; + + @Inject + public TestAction(TransportService transportService, ActionFilters actionFilters) { + super(TYPE.name(), actionFilters, transportService.getTaskManager()); + threadPool = transportService.getThreadPool(); + } + + @Override + protected void doExecute(Task task, Request request, ActionListener listener) { + request.mustIncRef(); + threadPool.generic().execute(ActionRunnable.supply(ActionListener.runBefore(listener, request::decRef), () -> { + assert request.hasReferences(); + return new Response(); + })); + } + } + + private static class Request extends ActionRequest { + private final SubscribableListener closeListeners = new SubscribableListener<>(); + private final RefCounted refs = LeakTracker.wrap(AbstractRefCounted.of(() -> closeListeners.onResponse(null))); + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void incRef() { + refs.incRef(); + } + + @Override + public boolean tryIncRef() { + return refs.tryIncRef(); + } + + @Override + public boolean decRef() { + return refs.decRef(); + } + + @Override + public boolean hasReferences() { + return refs.hasReferences(); + } + + void addCloseListener(ActionListener listener) { + closeListeners.addListener(listener); + } + } + + private static class Response extends ActionResponse { + @Override + public void writeTo(StreamOutput out) {} + } +} diff --git a/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java b/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java index 59567c1ee9783..64ab7a9819190 100644 --- a/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java @@ -135,8 +135,6 @@ protected void doExecute(Task task, TestRequest request, ActionListener void execute( @@ -146,15 +144,18 @@ public void exe ActionListener listener, ActionFilterChain actionFilterChain ) { - for (int i = 0; i <= additionalContinueCount; i++) { - actionFilterChain.proceed(task, action, request, listener); - } + // expected proceed() call: + actionFilterChain.proceed(task, action, request, listener); + + // extra, invalid, proceed() call: + actionFilterChain.proceed(task, action, request, listener); } }); Set filters = new HashSet<>(); filters.add(testFilter); + final CountDownLatch latch = new CountDownLatch(2); String actionName = randomAlphaOfLength(randomInt(30)); ActionFilters actionFilters = new ActionFilters(filters); TransportAction transportAction = new TransportAction( @@ -164,18 +165,16 @@ public void exe ) { @Override protected void doExecute(Task task, TestRequest request, ActionListener listener) { - listener.onResponse(new TestResponse()); + latch.countDown(); } }; - final CountDownLatch latch = new CountDownLatch(additionalContinueCount + 1); - final AtomicInteger responses = new AtomicInteger(); final List failures = new CopyOnWriteArrayList<>(); ActionTestUtils.execute(transportAction, null, new TestRequest(), new LatchedActionListener<>(new ActionListener<>() { @Override public void onResponse(TestResponse testResponse) { - responses.incrementAndGet(); + fail("should not complete listener"); } @Override @@ -191,8 +190,7 @@ public void onFailure(Exception e) { assertThat(testFilter.runs.get(), equalTo(1)); assertThat(testFilter.lastActionName, equalTo(actionName)); - assertThat(responses.get(), equalTo(1)); - assertThat(failures.size(), equalTo(additionalContinueCount)); + assertThat(failures.size(), equalTo(1)); for (Throwable failure : failures) { assertThat(failure, instanceOf(IllegalStateException.class)); } diff --git a/server/src/test/java/org/elasticsearch/action/support/broadcast/unpromotable/TransportBroadcastUnpromotableActionTests.java b/server/src/test/java/org/elasticsearch/action/support/broadcast/unpromotable/TransportBroadcastUnpromotableActionTests.java index b5bcb8c54668a..a0d0b1809e1f7 100644 --- a/server/src/test/java/org/elasticsearch/action/support/broadcast/unpromotable/TransportBroadcastUnpromotableActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/broadcast/unpromotable/TransportBroadcastUnpromotableActionTests.java @@ -379,6 +379,7 @@ private ActionResponse brodcastUnpromotableRequest(IndexShardRoutingTable wrongR public void testNullIndexShardRoutingTable() { IndexShardRoutingTable shardRoutingTable = null; assertThat( + expectThrows( NullPointerException.class, () -> PlainActionFuture.get( diff --git a/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java b/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java index 92155333dc507..3de66184b49fa 100644 --- a/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java @@ -676,7 +676,7 @@ protected ClusterBlockException checkBlock(Request request, ClusterState state) } setState(clusterService, newStateBuilder.build()); } - expectThrows(TaskCancelledException.class, listener::actionGet); + expectThrows(TaskCancelledException.class, listener); } public void testTaskCancellationOnceActionItIsDispatchedToMaster() throws Exception { @@ -703,7 +703,7 @@ public void testTaskCancellationOnceActionItIsDispatchedToMaster() throws Except releaseBlockedThreads.run(); - expectThrows(TaskCancelledException.class, listener::actionGet); + expectThrows(TaskCancelledException.class, listener); } public void testGlobalBlocksAreCheckedAfterIndexNotFoundException() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java b/server/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java index 9458d0fe962e8..4d26ae610da3c 100644 --- a/server/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java @@ -9,10 +9,12 @@ package org.elasticsearch.action.support.nodes; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeActionTests; +import org.elasticsearch.action.support.RefCountingListener; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -35,6 +37,7 @@ import org.elasticsearch.test.transport.CapturingTransport; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.LeakTracker; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportService; import org.hamcrest.Matchers; @@ -55,6 +58,9 @@ import java.util.Set; import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; +import java.util.function.ObjLongConsumer; import static java.util.Collections.emptyMap; import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; @@ -118,7 +124,11 @@ public void testResponseAggregation() { final TestTransportNodesAction action = getTestTransportNodesAction(); final PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(null, new TestNodesRequest(), listener); + action.execute(null, new TestNodesRequest(), listener.delegateFailure((l, response) -> { + assertTrue(response.getNodes().stream().allMatch(TestNodeResponse::hasReferences)); + assertTrue(response.hasReferences()); + l.onResponse(response); + })); assertFalse(listener.isDone()); final Set failedNodeIds = new HashSet<>(); @@ -127,7 +137,9 @@ public void testResponseAggregation() { for (CapturingTransport.CapturedRequest capturedRequest : transport.getCapturedRequestsAndClear()) { if (randomBoolean()) { successfulNodes.add(capturedRequest.node()); - transport.handleResponse(capturedRequest.requestId(), new TestNodeResponse(capturedRequest.node())); + final var response = new TestNodeResponse(capturedRequest.node()); + transport.handleResponse(capturedRequest.requestId(), response); + assertFalse(response.hasReferences()); // response is copied (via the wire protocol) so this instance is released } else { failedNodeIds.add(capturedRequest.node().getId()); if (randomBoolean()) { @@ -138,7 +150,18 @@ public void testResponseAggregation() { } } - TestNodesResponse response = listener.actionGet(10, TimeUnit.SECONDS); + final TestNodesResponse response = listener.actionGet(10, TimeUnit.SECONDS); + + final var allResponsesReleasedListener = new SubscribableListener(); + try (var listeners = new RefCountingListener(allResponsesReleasedListener)) { + response.addCloseListener(listeners.acquire()); + for (final var nodeResponse : response.getNodes()) { + nodeResponse.addCloseListener(listeners.acquire()); + } + } + safeAwait(allResponsesReleasedListener); + assertTrue(response.getNodes().stream().noneMatch(TestNodeResponse::hasReferences)); + assertFalse(response.hasReferences()); for (TestNodeResponse nodeResponse : response.getNodes()) { assertThat(successfulNodes, Matchers.hasItem(nodeResponse.getNode())); @@ -164,7 +187,7 @@ public void testResponsesReleasedOnCancellation() { final CancellableTask cancellableTask = new CancellableTask(randomLong(), "transport", "action", "", null, emptyMap()); final PlainActionFuture listener = new PlainActionFuture<>(); action.execute(cancellableTask, new TestNodesRequest(), listener.delegateResponse((l, e) -> { - assert Thread.currentThread().getName().contains("[" + ThreadPool.Names.GENERIC + "]"); + assert ThreadPool.assertCurrentThreadPool(ThreadPool.Names.GENERIC); l.onFailure(e); })); @@ -173,13 +196,31 @@ public void testResponsesReleasedOnCancellation() { ); Randomness.shuffle(capturedRequests); + final AtomicInteger liveResponseCount = new AtomicInteger(); + final Function responseCreator = node -> { + liveResponseCount.incrementAndGet(); + final var testNodeResponse = new TestNodeResponse(node); + testNodeResponse.addCloseListener(ActionListener.running(liveResponseCount::decrementAndGet)); + return testNodeResponse; + }; + + final ObjLongConsumer responseSender = (response, requestId) -> { + try { + // transport.handleResponse may de/serialize the response, releasing it early, so send the response straight to the handler + transport.getTransportResponseHandler(requestId).handleResponse(response); + } finally { + response.decRef(); + } + }; + final ReachabilityChecker reachabilityChecker = new ReachabilityChecker(); final Runnable nextRequestProcessor = () -> { var capturedRequest = capturedRequests.remove(0); if (randomBoolean()) { - // transport.handleResponse may de/serialize the response, releasing it early, so send the response straight to the handler - transport.getTransportResponseHandler(capturedRequest.requestId()) - .handleResponse(reachabilityChecker.register(new TestNodeResponse(capturedRequest.node()))); + responseSender.accept( + reachabilityChecker.register(responseCreator.apply(capturedRequest.node())), + capturedRequest.requestId() + ); } else { // handleRemoteError may de/serialize the exception, releasing it early, so just use handleLocalError transport.handleLocalError( @@ -200,20 +241,23 @@ public void testResponsesReleasedOnCancellation() { // responses captured before cancellation are now unreachable reachabilityChecker.ensureUnreachable(); + assertEquals(0, liveResponseCount.get()); while (capturedRequests.size() > 0) { // a response sent after cancellation is dropped immediately assertFalse(listener.isDone()); nextRequestProcessor.run(); reachabilityChecker.ensureUnreachable(); + assertEquals(0, liveResponseCount.get()); } expectThrows(TaskCancelledException.class, () -> listener.actionGet(10, TimeUnit.SECONDS)); + assertTrue(cancellableTask.isCancelled()); // keep task alive } @BeforeClass public static void startThreadPool() { - THREAD_POOL = new TestThreadPool(TransportBroadcastByNodeActionTests.class.getSimpleName()); + THREAD_POOL = new TestThreadPool(TransportNodesActionTests.class.getSimpleName()); } @AfterClass @@ -268,11 +312,9 @@ public void tearDown() throws Exception { public TestTransportNodesAction getTestTransportNodesAction() { return new TestTransportNodesAction( - THREAD_POOL, clusterService, transportService, new ActionFilters(Collections.emptySet()), - TestNodesRequest::new, TestNodeRequest::new, THREAD_POOL.executor(ThreadPool.Names.GENERIC) ); @@ -302,11 +344,9 @@ private static class TestTransportNodesAction extends TransportNodesAction< TestNodeResponse> { TestTransportNodesAction( - ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, - Writeable.Reader request, Writeable.Reader nodeRequest, Executor nodeExecutor ) { @@ -319,7 +359,7 @@ protected TestNodesResponse newResponse( List responses, List failures ) { - return new TestNodesResponse(clusterService.getClusterName(), request, responses, failures); + return new TestNodesResponse(clusterService.getClusterName(), responses, failures); } @Override @@ -350,7 +390,7 @@ private static class DataNodesOnlyTransportNodesAction extends TestTransportNode Writeable.Reader nodeRequest, Executor nodeExecutor ) { - super(threadPool, clusterService, transportService, actionFilters, request, nodeRequest, nodeExecutor); + super(clusterService, transportService, actionFilters, nodeRequest, nodeExecutor); } @Override @@ -371,16 +411,11 @@ private static class TestNodesRequest extends BaseNodesRequest private static class TestNodesResponse extends BaseNodesResponse { - private final TestNodesRequest request; + private final SubscribableListener onClose = new SubscribableListener<>(); + private final RefCounted refCounted = LeakTracker.wrap(AbstractRefCounted.of(() -> onClose.onResponse(null))); - TestNodesResponse( - ClusterName clusterName, - TestNodesRequest request, - List nodeResponses, - List failures - ) { + TestNodesResponse(ClusterName clusterName, List nodeResponses, List failures) { super(clusterName, nodeResponses, failures); - this.request = request; } @Override @@ -392,6 +427,30 @@ protected List readNodesFrom(StreamInput in) throws IOExceptio protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { out.writeCollection(nodes); } + + @Override + public void incRef() { + refCounted.incRef(); + } + + @Override + public boolean tryIncRef() { + return refCounted.tryIncRef(); + } + + @Override + public boolean decRef() { + return refCounted.decRef(); + } + + @Override + public boolean hasReferences() { + return refCounted.hasReferences(); + } + + void addCloseListener(ActionListener listener) { + onClose.addListener(listener); + } } private static class TestNodeRequest extends TransportRequest { @@ -425,6 +484,10 @@ public boolean hasReferences() { } private static class TestNodeResponse extends BaseNodeResponse { + + private final SubscribableListener onClose = new SubscribableListener<>(); + private final RefCounted refCounted = LeakTracker.wrap(AbstractRefCounted.of(() -> onClose.onResponse(null))); + TestNodeResponse() { this(mock(DiscoveryNode.class)); } @@ -436,6 +499,30 @@ private static class TestNodeResponse extends BaseNodeResponse { protected TestNodeResponse(StreamInput in) throws IOException { super(in); } + + @Override + public void incRef() { + refCounted.incRef(); + } + + @Override + public boolean tryIncRef() { + return refCounted.tryIncRef(); + } + + @Override + public boolean decRef() { + return refCounted.decRef(); + } + + @Override + public boolean hasReferences() { + return refCounted.hasReferences(); + } + + void addCloseListener(ActionListener listener) { + onClose.addListener(listener); + } } } diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java index a787a50798e05..8bda62b91bc7e 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java @@ -305,7 +305,7 @@ public BaseBroadcastResponse executeAndAssertImmediateResponse( ) { PlainActionFuture response = new PlainActionFuture<>(); ActionTestUtils.execute(broadcastAction, null, request, response); - return response.actionGet("5s"); + return response.actionGet(5, TimeUnit.SECONDS); } private void assertBroadcastResponse(int total, int successful, int failed, BaseBroadcastResponse response, Class exceptionClass) { diff --git a/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java b/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java index 0df492b080254..be8255cd766c8 100644 --- a/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java +++ b/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java @@ -294,17 +294,18 @@ public void testStreamRequestLegacyVersion() throws IOException { public void testMultiParser() throws Exception { byte[] bytes = StreamsUtils.copyToBytesFromClasspath("/org/elasticsearch/action/termvectors/multiRequest1.json"); - XContentParser data = createParser(JsonXContent.jsonXContent, bytes); - MultiTermVectorsRequest request = new MultiTermVectorsRequest(); - request.add(new TermVectorsRequest(), data); - checkParsedParameters(request); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, bytes)) { + MultiTermVectorsRequest request = new MultiTermVectorsRequest(); + request.add(new TermVectorsRequest(), parser); + checkParsedParameters(request); + } bytes = StreamsUtils.copyToBytesFromClasspath("/org/elasticsearch/action/termvectors/multiRequest2.json"); - data = createParser(JsonXContent.jsonXContent, new BytesArray(bytes)); - request = new MultiTermVectorsRequest(); - request.add(new TermVectorsRequest(), data); - - checkParsedParameters(request); + try (var parser = createParser(JsonXContent.jsonXContent, new BytesArray(bytes))) { + MultiTermVectorsRequest request = new MultiTermVectorsRequest(); + request.add(new TermVectorsRequest(), parser); + checkParsedParameters(request); + } } void checkParsedParameters(MultiTermVectorsRequest request) { diff --git a/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java b/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java index bd789891f2330..735ae41558240 100644 --- a/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java @@ -120,7 +120,9 @@ public void setUp() throws Exception { public void testFromXContent() throws Exception { UpdateRequest request = new UpdateRequest("test", "1"); // simple script - request.fromXContent(createParser(XContentFactory.jsonBuilder().startObject().field("script", "script1").endObject())); + try (var parser = createParser(XContentFactory.jsonBuilder().startObject().field("script", "script1").endObject())) { + request.fromXContent(parser); + } Script script = request.script(); assertThat(script, notNullValue()); assertThat(script.getIdOrCode(), equalTo("script1")); @@ -130,11 +132,13 @@ public void testFromXContent() throws Exception { assertThat(params, equalTo(emptyMap())); // simple verbose script - request.fromXContent( - createParser( + try ( + var parser = createParser( XContentFactory.jsonBuilder().startObject().startObject("script").field("source", "script1").endObject().endObject() ) - ); + ) { + request.fromXContent(parser); + } script = request.script(); assertThat(script, notNullValue()); assertThat(script.getIdOrCode(), equalTo("script1")); @@ -145,8 +149,8 @@ public void testFromXContent() throws Exception { // script with params request = new UpdateRequest("test", "1"); - request.fromXContent( - createParser( + try ( + var parser = createParser( XContentFactory.jsonBuilder() .startObject() .startObject("script") @@ -157,7 +161,9 @@ public void testFromXContent() throws Exception { .endObject() .endObject() ) - ); + ) { + request.fromXContent(parser); + } script = request.script(); assertThat(script, notNullValue()); assertThat(script.getIdOrCode(), equalTo("script1")); @@ -169,8 +175,8 @@ public void testFromXContent() throws Exception { assertThat(params.get("param1").toString(), equalTo("value1")); request = new UpdateRequest("test", "1"); - request.fromXContent( - createParser( + try ( + var parser = createParser( XContentFactory.jsonBuilder() .startObject() .startObject("script") @@ -181,7 +187,9 @@ public void testFromXContent() throws Exception { .endObject() .endObject() ) - ); + ) { + request.fromXContent(parser); + } script = request.script(); assertThat(script, notNullValue()); assertThat(script.getIdOrCode(), equalTo("script1")); @@ -194,8 +202,8 @@ public void testFromXContent() throws Exception { // script with params and upsert request = new UpdateRequest("test", "1"); - request.fromXContent( - createParser( + try ( + var parser = createParser( XContentFactory.jsonBuilder() .startObject() .startObject("script") @@ -212,7 +220,9 @@ public void testFromXContent() throws Exception { .endObject() .endObject() ) - ); + ) { + request.fromXContent(parser); + } script = request.script(); assertThat(script, notNullValue()); assertThat(script.getIdOrCode(), equalTo("script1")); @@ -231,8 +241,8 @@ public void testFromXContent() throws Exception { assertThat(((Map) upsertDoc.get("compound")).get("field2").toString(), equalTo("value2")); request = new UpdateRequest("test", "1"); - request.fromXContent( - createParser( + try ( + var parser = createParser( XContentFactory.jsonBuilder() .startObject() .startObject("upsert") @@ -249,7 +259,9 @@ public void testFromXContent() throws Exception { .endObject() .endObject() ) - ); + ) { + request.fromXContent(parser); + } script = request.script(); assertThat(script, notNullValue()); assertThat(script.getIdOrCode(), equalTo("script1")); @@ -265,8 +277,8 @@ public void testFromXContent() throws Exception { // script with doc request = new UpdateRequest("test", "1"); - request.fromXContent( - createParser( + try ( + var parser = createParser( XContentFactory.jsonBuilder() .startObject() .startObject("doc") @@ -277,7 +289,9 @@ public void testFromXContent() throws Exception { .endObject() .endObject() ) - ); + ) { + request.fromXContent(parser); + } Map doc = request.doc().sourceAsMap(); assertThat(doc.get("field1").toString(), equalTo("value1")); assertThat(((Map) doc.get("compound")).get("field2").toString(), equalTo("value2")); @@ -285,23 +299,30 @@ public void testFromXContent() throws Exception { public void testUnknownFieldParsing() throws Exception { UpdateRequest request = new UpdateRequest("test", "1"); - XContentParser contentParser = createParser(XContentFactory.jsonBuilder().startObject().field("unknown_field", "test").endObject()); - - XContentParseException ex = expectThrows(XContentParseException.class, () -> request.fromXContent(contentParser)); - assertEquals("[1:2] [UpdateRequest] unknown field [unknown_field]", ex.getMessage()); + try ( + XContentParser contentParser = createParser( + XContentFactory.jsonBuilder().startObject().field("unknown_field", "test").endObject() + ) + ) { + XContentParseException ex = expectThrows(XContentParseException.class, () -> request.fromXContent(contentParser)); + assertEquals("[1:2] [UpdateRequest] unknown field [unknown_field]", ex.getMessage()); + } UpdateRequest request2 = new UpdateRequest("test", "1"); - XContentParser unknownObject = createParser( - XContentFactory.jsonBuilder() - .startObject() - .field("script", "ctx.op = ctx._source.views == params.count ? 'delete' : 'none'") - .startObject("params") - .field("count", 1) - .endObject() - .endObject() - ); - ex = expectThrows(XContentParseException.class, () -> request2.fromXContent(unknownObject)); - assertEquals("[1:76] [UpdateRequest] unknown field [params]", ex.getMessage()); + try ( + XContentParser unknownObject = createParser( + XContentFactory.jsonBuilder() + .startObject() + .field("script", "ctx.op = ctx._source.views == params.count ? 'delete' : 'none'") + .startObject("params") + .field("count", 1) + .endObject() + .endObject() + ) + ) { + XContentParseException ex = expectThrows(XContentParseException.class, () -> request2.fromXContent(unknownObject)); + assertEquals("[1:76] [UpdateRequest] unknown field [params]", ex.getMessage()); + } } public void testFetchSourceParsing() throws Exception { @@ -543,9 +564,10 @@ public void testNoopDetection() throws Exception { ShardId shardId = new ShardId("test", "", 0); GetResult getResult = new GetResult("test", "1", 0, 1, 0, true, new BytesArray("{\"body\": \"foo\"}"), null, null); - UpdateRequest request = new UpdateRequest("test", "1").fromXContent( - createParser(JsonXContent.jsonXContent, new BytesArray("{\"doc\": {\"body\": \"foo\"}}")) - ); + UpdateRequest request; + try (var parser = createParser(JsonXContent.jsonXContent, new BytesArray("{\"doc\": {\"body\": \"foo\"}}"))) { + request = new UpdateRequest("test", "1").fromXContent(parser); + } UpdateHelper.Result result = UpdateHelper.prepareUpdateIndexRequest(shardId, request, getResult, true); @@ -558,15 +580,15 @@ public void testNoopDetection() throws Exception { assertThat(result.getResponseResult(), equalTo(DocWriteResponse.Result.UPDATED)); assertThat(result.updatedSourceAsMap().get("body").toString(), equalTo("foo")); - // Change the request to be a different doc - request = new UpdateRequest("test", "1").fromXContent( - createParser(JsonXContent.jsonXContent, new BytesArray("{\"doc\": {\"body\": \"bar\"}}")) - ); - result = UpdateHelper.prepareUpdateIndexRequest(shardId, request, getResult, true); + try (var parser = createParser(JsonXContent.jsonXContent, new BytesArray("{\"doc\": {\"body\": \"bar\"}}"))) { + // Change the request to be a different doc + request = new UpdateRequest("test", "1").fromXContent(parser); + result = UpdateHelper.prepareUpdateIndexRequest(shardId, request, getResult, true); - assertThat(result.action(), instanceOf(IndexRequest.class)); - assertThat(result.getResponseResult(), equalTo(DocWriteResponse.Result.UPDATED)); - assertThat(result.updatedSourceAsMap().get("body").toString(), equalTo("bar")); + assertThat(result.action(), instanceOf(IndexRequest.class)); + assertThat(result.getResponseResult(), equalTo(DocWriteResponse.Result.UPDATED)); + assertThat(result.updatedSourceAsMap().get("body").toString(), equalTo("bar")); + } } @@ -614,11 +636,11 @@ public void testToString() throws IOException { assertThat(request.toString(), equalTo(""" update {[test][1], doc_as_upsert[false], script[Script{type=inline, lang='mock', idOrCode='ctx._source.body = "foo"', \ options={}, params={}}], scripted_upsert[false], detect_noop[true]}""")); - request = new UpdateRequest("test", "1").fromXContent( - createParser(JsonXContent.jsonXContent, new BytesArray("{\"doc\": {\"body\": \"bar\"}}")) - ); - assertThat(request.toString(), equalTo(""" - update {[test][1], doc_as_upsert[false], doc[index {[null][null], source[{"body":"bar"}]}], \ - scripted_upsert[false], detect_noop[true]}""")); + try (var parser = createParser(JsonXContent.jsonXContent, new BytesArray("{\"doc\": {\"body\": \"bar\"}}"))) { + request = new UpdateRequest("test", "1").fromXContent(parser); + assertThat(request.toString(), equalTo(""" + update {[test][1], doc_as_upsert[false], doc[index {[null][null], source[{"body":"bar"}]}], \ + scripted_upsert[false], detect_noop[true]}""")); + } } } diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterSnapshotStatsTests.java b/server/src/test/java/org/elasticsearch/cluster/ClusterSnapshotStatsTests.java index 06a772d50c393..55cd6e5790f84 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterSnapshotStatsTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterSnapshotStatsTests.java @@ -49,7 +49,8 @@ protected Writeable.Reader instanceReader() { "ABORTED", "MISSING", "WAITING", - "QUEUED" }; + "QUEUED", + "PAUSED_FOR_NODE_REMOVAL" }; @Override protected ClusterSnapshotStats createTestInstance() { @@ -370,7 +371,9 @@ public void testComputation() { SnapshotsInProgress.ShardState.WAITING, 0, SnapshotsInProgress.ShardState.QUEUED, - 1 + 1, + SnapshotsInProgress.ShardState.PAUSED_FOR_NODE_REMOVAL, + 0 ) ) ) @@ -392,7 +395,7 @@ public void testComputation() { new Snapshot("test-repo", new SnapshotId("snapshot", "uuid")), randomBoolean(), randomBoolean(), - SnapshotsInProgress.State.INIT, + SnapshotsInProgress.State.STARTED, Map.of("index", new IndexId("index", "uuid")), List.of(), List.of(), diff --git a/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java b/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java index 7b2795abfd62d..75439578448a4 100644 --- a/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java @@ -13,7 +13,6 @@ import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.routing.RecoverySource.PeerRecoverySource; -import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingHelper; import org.elasticsearch.cluster.routing.UnassignedInfo; @@ -137,14 +136,7 @@ public void testFillShardLevelInfo() { Map shardSizes = new HashMap<>(); Map shardDataSetSizes = new HashMap<>(); Map routingToPath = new HashMap<>(); - InternalClusterInfoService.buildShardLevelInfo( - RoutingTable.EMPTY_ROUTING_TABLE, - stats, - shardSizes, - shardDataSetSizes, - routingToPath, - new HashMap<>() - ); + InternalClusterInfoService.buildShardLevelInfo(stats, shardSizes, shardDataSetSizes, routingToPath, new HashMap<>()); assertThat( shardSizes, diff --git a/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java index d93a844476463..85e16821ecb96 100644 --- a/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java @@ -13,8 +13,8 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.ListenableActionFuture; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; @@ -620,8 +620,8 @@ public void openConnection(DiscoveryNode node, ConnectionProfile profile, Action threadPool.generic().execute(() -> { runConnectionBlock(connectionBlock); listener.onResponse(new Connection() { - private final ListenableActionFuture closeListener = new ListenableActionFuture<>(); - private final ListenableActionFuture removedListener = new ListenableActionFuture<>(); + private final SubscribableListener closeListener = new SubscribableListener<>(); + private final SubscribableListener removedListener = new SubscribableListener<>(); private final RefCounted refCounted = AbstractRefCounted.of(() -> closeListener.onResponse(null)); diff --git a/server/src/test/java/org/elasticsearch/cluster/action/index/MappingUpdatedActionTests.java b/server/src/test/java/org/elasticsearch/cluster/action/index/MappingUpdatedActionTests.java index 05969b14f2f9b..533f2a2f81121 100644 --- a/server/src/test/java/org/elasticsearch/cluster/action/index/MappingUpdatedActionTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/action/index/MappingUpdatedActionTests.java @@ -8,7 +8,7 @@ package org.elasticsearch.cluster.action.index; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.mapping.put.AutoPutMappingAction; +import org.elasticsearch.action.admin.indices.mapping.put.TransportAutoPutMappingAction; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.internal.AdminClient; import org.elasticsearch.client.internal.Client; @@ -150,6 +150,6 @@ public void testSendUpdateMappingUsingAutoPutMappingAction() { Mapping update = new Mapping(rootObjectMapper, new MetadataFieldMapper[0], Map.of()); mua.sendUpdateMapping(new Index("name", "uuid"), update, ActionListener.noop()); - verify(indicesAdminClient).execute(eq(AutoPutMappingAction.INSTANCE), any(), any()); + verify(indicesAdminClient).execute(eq(TransportAutoPutMappingAction.TYPE), any(), any()); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinValidationServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinValidationServiceTests.java index 9287e279fe2f5..9b1ce4611169b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinValidationServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinValidationServiceTests.java @@ -397,7 +397,7 @@ public void testJoinValidationRejectsMismatchedClusterUUID() { deterministicTaskQueue.runAllTasks(); assertThat( - expectThrows(CoordinationStateRejectedException.class, future::actionGet).getMessage(), + expectThrows(CoordinationStateRejectedException.class, future).getMessage(), allOf( containsString("This node previously joined a cluster with UUID"), containsString("and is now trying to join a different cluster"), @@ -447,10 +447,7 @@ public void testJoinValidationRunsJoinValidators() { ); deterministicTaskQueue.runAllTasks(); - assertThat( - expectThrows(IllegalStateException.class, future::actionGet).getMessage(), - allOf(containsString("simulated validation failure")) - ); + assertThat(expectThrows(IllegalStateException.class, future).getMessage(), allOf(containsString("simulated validation failure"))); } public void testJoinValidationFallsBackToPingIfNotMaster() { diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/LagDetectorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/LagDetectorTests.java index 9a8aface0990c..f69596be8ce65 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/LagDetectorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/LagDetectorTests.java @@ -266,7 +266,7 @@ public void testHotThreadsChunkedLoggingEncoding() { + node.descriptionWithoutAttributes() + "] lagging at version [1] despite commit of cluster state version [2]", ReferenceDocs.LAGGING_NODE_TROUBLESHOOTING, - new LagDetector.HotThreadsLoggingTask(node, 1, 2, expectedBody)::run + new LagDetector.HotThreadsLoggingTask(node, 1, 2, expectedBody, () -> {})::run ).utf8ToString() ); } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTransportHandlerTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTransportHandlerTests.java index 0cdc5de86a8d3..6fce5927a62dd 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTransportHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTransportHandlerTests.java @@ -27,7 +27,6 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.compress.Compressor; import org.elasticsearch.common.compress.CompressorFactory; -import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.io.stream.RecyclerBytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -147,7 +146,7 @@ private static boolean isDiff(BytesTransportRequest request, TransportVersion ve in = request.bytes().streamInput(); final Compressor compressor = CompressorFactory.compressor(request.bytes()); if (compressor != null) { - in = new InputStreamStreamInput(compressor.threadLocalInputStream(in)); + in = compressor.threadLocalStreamInput(in); } in.setTransportVersion(version); return in.readBoolean() == false; diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/StableMasterHealthIndicatorServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/StableMasterHealthIndicatorServiceTests.java index b9d1cb50444e3..18385b1d7ad44 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/StableMasterHealthIndicatorServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/StableMasterHealthIndicatorServiceTests.java @@ -312,8 +312,11 @@ private static StableMasterHealthIndicatorService createStableMasterHealthIndica private Map xContentToMap(ToXContent xcontent) throws IOException { XContentBuilder builder = XContentFactory.jsonBuilder(); xcontent.toXContent(builder, ToXContent.EMPTY_PARAMS); - XContentParser parser = XContentType.JSON.xContent() - .createParser(xContentRegistry(), LoggingDeprecationHandler.INSTANCE, BytesReference.bytes(builder).streamInput()); - return parser.map(); + try ( + XContentParser parser = XContentType.JSON.xContent() + .createParser(xContentRegistry(), LoggingDeprecationHandler.INSTANCE, BytesReference.bytes(builder).streamInput()) + ) { + return parser.map(); + } } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java index 1bda67030eca1..1c4cb8c0681ff 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java @@ -1653,7 +1653,8 @@ public void testXContentSerializationWithRollover() throws IOException { randomBoolean() ? IndexMode.STANDARD : null, // IndexMode.TIME_SERIES triggers validation that many unit tests doesn't pass lifecycle, failureStore, - failureIndices + failureIndices, + false ); try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexGraveyardTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexGraveyardTests.java index e539087de7b8e..e8892278879b9 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexGraveyardTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexGraveyardTests.java @@ -72,9 +72,10 @@ public void testXContent() throws IOException { ) ); } - XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); - parser.nextToken(); // the beginning of the parser - assertThat(IndexGraveyard.fromXContent(parser), equalTo(graveyard)); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder))) { + parser.nextToken(); // the beginning of the parser + assertThat(IndexGraveyard.fromXContent(parser), equalTo(graveyard)); + } } public void testChunking() { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java index 1925c869cdb81..58b8adcf53538 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java @@ -116,8 +116,10 @@ public void testIndexMetadataSerialization() throws IOException { builder.startObject(); IndexMetadata.FORMAT.toXContent(builder, metadata); builder.endObject(); - XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); - final IndexMetadata fromXContentMeta = IndexMetadata.fromXContent(parser); + final IndexMetadata fromXContentMeta; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder))) { + fromXContentMeta = IndexMetadata.fromXContent(parser); + } assertEquals( "expected: " + Strings.toString(metadata) + "\nactual : " + Strings.toString(fromXContentMeta), metadata, diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexStateServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexStateServiceTests.java index 57f1265debf00..6e24735eba454 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexStateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexStateServiceTests.java @@ -367,7 +367,7 @@ private static ClusterState addSnapshotIndex(final String index, final int numSh snapshot, randomBoolean(), false, - SnapshotsInProgress.State.INIT, + SnapshotsInProgress.State.STARTED, Collections.singletonMap(index, new IndexId(index, index)), Collections.emptyList(), Collections.emptyList(), diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/ReservedStateMetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/ReservedStateMetadataTests.java index 2383c0b513ead..46be49ad7111f 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/ReservedStateMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/ReservedStateMetadataTests.java @@ -63,9 +63,10 @@ private void xContentTest(boolean addHandlers, boolean addErrors) throws IOExcep builder.startObject(); meta.toXContent(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); - XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); - parser.nextToken(); // the beginning of the object - assertThat(ReservedStateMetadata.fromXContent(parser), equalTo(meta)); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder))) { + parser.nextToken(); // the beginning of the object + assertThat(ReservedStateMetadata.fromXContent(parser), equalTo(meta)); + } } public void testXContent() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetadataTests.java index cb681b57b58dd..e7f49bc773404 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetadataTests.java @@ -138,7 +138,10 @@ public void testSimpleJsonFromAndTo() throws IOException { Metadata.FORMAT.toXContent(builder, metadata); builder.endObject(); - Metadata parsedMetadata = Metadata.Builder.fromXContent(createParser(builder)); + Metadata parsedMetadata; + try (var parser = createParser(builder)) { + parsedMetadata = Metadata.Builder.fromXContent(parser); + } // templates assertThat(parsedMetadata.templates().get("foo").name(), is("foo")); diff --git a/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeRoleSettingTests.java b/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeRoleSettingTests.java index cbd07f3410910..6389f94df0be1 100644 --- a/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeRoleSettingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeRoleSettingTests.java @@ -14,6 +14,8 @@ import java.util.Set; import java.util.function.Predicate; +import static org.elasticsearch.test.LambdaMatchers.falseWith; +import static org.elasticsearch.test.LambdaMatchers.trueWith; import static org.elasticsearch.test.NodeRoles.addRoles; import static org.elasticsearch.test.NodeRoles.nonDataNode; import static org.elasticsearch.test.NodeRoles.onlyRole; @@ -40,10 +42,10 @@ public void testIsRemoteClusterClient() { } private void runRoleTest(final Predicate predicate, final DiscoveryNodeRole role) { - assertTrue(predicate.test(onlyRole(role))); + assertThat(predicate, trueWith(onlyRole(role))); assertThat(DiscoveryNode.getRolesFromSettings(onlyRole(role)), hasItem(role)); - assertFalse(predicate.test(removeRoles(Set.of(role)))); + assertThat(predicate, falseWith(removeRoles(Set.of(role)))); assertThat(DiscoveryNode.getRolesFromSettings(removeRoles(Set.of(role))), not(hasItem(role))); } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java index e425b0e305050..aa4b4ec6dbbeb 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java @@ -142,8 +142,10 @@ public void testSerialization() throws IOException { allocationId = AllocationId.newRelocation(allocationId); } BytesReference bytes = BytesReference.bytes(allocationId.toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS)); - AllocationId parsedAllocationId = AllocationId.fromXContent(createParser(JsonXContent.jsonXContent, bytes)); - assertEquals(allocationId, parsedAllocationId); + try (var parser = createParser(JsonXContent.jsonXContent, bytes)) { + AllocationId parsedAllocationId = AllocationId.fromXContent(parser); + assertEquals(allocationId, parsedAllocationId); + } } public void testEquals() { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/ExpectedShardSizeEstimatorTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/ExpectedShardSizeEstimatorTests.java index f81d99c55e84e..dd92589f0af89 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/ExpectedShardSizeEstimatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/ExpectedShardSizeEstimatorTests.java @@ -158,7 +158,7 @@ public void testShouldReadSizeFromClonedShard() { var state = ClusterState.builder(ClusterName.DEFAULT) .metadata( metadata( - IndexMetadata.builder("source").settings(indexSettings(IndexVersion.current(), 2, 0)), + IndexMetadata.builder("source").settings(indexSettings(IndexVersion.current(), 1, 0)), IndexMetadata.builder("target") .settings( indexSettings(IndexVersion.current(), 1, 0) // diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java index be8807292350b..4640392d7b164 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java @@ -811,23 +811,24 @@ public void testXContent() throws Exception { ] } """; - XContentParser parser = createParser(JsonXContent.jsonXContent, commands); - // move two tokens, parser expected to be "on" `commands` field - parser.nextToken(); - parser.nextToken(); - - assertThat( - AllocationCommands.fromXContent(parser), - equalTo( - new AllocationCommands( - new AllocateEmptyPrimaryAllocationCommand("test", 1, "node1", true), - new AllocateStalePrimaryAllocationCommand("test", 2, "node1", true), - new AllocateReplicaAllocationCommand("test", 2, "node1"), - new MoveAllocationCommand("test", 3, "node2", "node3"), - new CancelAllocationCommand("test", 4, "node5", true) + try (XContentParser parser = createParser(JsonXContent.jsonXContent, commands)) { + // move two tokens, parser expected to be "on" `commands` field + parser.nextToken(); + parser.nextToken(); + + assertThat( + AllocationCommands.fromXContent(parser), + equalTo( + new AllocationCommands( + new AllocateEmptyPrimaryAllocationCommand("test", 1, "node1", true), + new AllocateStalePrimaryAllocationCommand("test", 2, "node1", true), + new AllocateReplicaAllocationCommand("test", 2, "node1"), + new MoveAllocationCommand("test", 3, "node2", "node3"), + new CancelAllocationCommand("test", 4, "node5", true) + ) ) - ) - ); + ); + } } @Override diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationServiceTests.java index 5ba7d396b2bf7..ab6efb3d2c367 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationServiceTests.java @@ -44,6 +44,7 @@ import java.util.List; import java.util.Map; import java.util.function.Function; +import java.util.function.Predicate; import java.util.stream.IntStream; import static org.elasticsearch.cluster.routing.RoutingNodesHelper.shardsWithState; @@ -314,7 +315,7 @@ private static class UnrealisticAllocator implements ExistingShardsAllocator { public void beforeAllocation(RoutingAllocation allocation) {} @Override - public void afterPrimariesBeforeReplicas(RoutingAllocation allocation) {} + public void afterPrimariesBeforeReplicas(RoutingAllocation allocation, Predicate isRelevantShardPredicate) {} @Override public void allocateUnassigned( diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocatorTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocatorTests.java index 3cafaf216eb39..fa1a542fff7dd 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocatorTests.java @@ -59,14 +59,19 @@ import static java.util.stream.Collectors.toSet; import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; import static org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.Balancer.getIndexDiskUsageInBytes; +import static org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.DISK_USAGE_BALANCE_FACTOR_SETTING; +import static org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider.SETTING_IGNORE_DISK_WATERMARKS; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThanOrEqualTo; public class BalancedShardsAllocatorTests extends ESAllocationTestCase { + private static final Settings WITH_DISK_BALANCING = Settings.builder().put(DISK_USAGE_BALANCE_FACTOR_SETTING.getKey(), "1e-9").build(); + public void testDecideShardAllocation() { BalancedShardsAllocator allocator = new BalancedShardsAllocator(Settings.EMPTY); ClusterState clusterState = ClusterStateCreationUtils.state("idx", false, ShardRoutingState.STARTED); @@ -105,22 +110,19 @@ public void testBalanceByForecastWriteLoad() { var allocationService = new MockAllocationService( yesAllocationDeciders(), new TestGatewayAllocator(), - new BalancedShardsAllocator( - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - TEST_WRITE_LOAD_FORECASTER - ), + new BalancedShardsAllocator(ClusterSettings.createBuiltInClusterSettings(), TEST_WRITE_LOAD_FORECASTER), EmptyClusterInfoService.INSTANCE, SNAPSHOT_INFO_SERVICE_WITH_NO_SHARD_SIZES ); var clusterState = applyStartedShardsUntilNoChange( stateWithStartedIndices( - IndexMetadata.builder("heavy-index").indexWriteLoadForecast(8.0), - IndexMetadata.builder("light-index-1").indexWriteLoadForecast(1.0), - IndexMetadata.builder("light-index-2").indexWriteLoadForecast(2.0), - IndexMetadata.builder("light-index-3").indexWriteLoadForecast(3.0), - IndexMetadata.builder("zero-write-load-index").indexWriteLoadForecast(0.0), - IndexMetadata.builder("no-write-load-index") + anIndex("heavy-index").indexWriteLoadForecast(8.0), + anIndex("light-index-1").indexWriteLoadForecast(1.0), + anIndex("light-index-2").indexWriteLoadForecast(2.0), + anIndex("light-index-3").indexWriteLoadForecast(3.0), + anIndex("zero-write-load-index").indexWriteLoadForecast(0.0), + anIndex("no-write-load-index") ), allocationService ); @@ -146,21 +148,16 @@ public void testBalanceByForecastWriteLoad() { public void testBalanceByForecastDiskUsage() { - var allocationService = createAllocationService( - Settings.builder() - // enable disk based balancing - .put(BalancedShardsAllocator.DISK_USAGE_BALANCE_FACTOR_SETTING.getKey(), "1e-9") - .build() - ); + var allocationService = createAllocationService(WITH_DISK_BALANCING); var clusterState = applyStartedShardsUntilNoChange( stateWithStartedIndices( - IndexMetadata.builder("heavy-index").shardSizeInBytesForecast(ByteSizeValue.ofGb(8).getBytes()), - IndexMetadata.builder("light-index-1").shardSizeInBytesForecast(ByteSizeValue.ofGb(1).getBytes()), - IndexMetadata.builder("light-index-2").shardSizeInBytesForecast(ByteSizeValue.ofGb(2).getBytes()), - IndexMetadata.builder("light-index-3").shardSizeInBytesForecast(ByteSizeValue.ofGb(3).getBytes()), - IndexMetadata.builder("zero-disk-usage-index").shardSizeInBytesForecast(0L), - IndexMetadata.builder("no-disk-usage-index") + anIndex("heavy-index").shardSizeInBytesForecast(ByteSizeValue.ofGb(8).getBytes()), + anIndex("light-index-1").shardSizeInBytesForecast(ByteSizeValue.ofGb(1).getBytes()), + anIndex("light-index-2").shardSizeInBytesForecast(ByteSizeValue.ofGb(2).getBytes()), + anIndex("light-index-3").shardSizeInBytesForecast(ByteSizeValue.ofGb(3).getBytes()), + anIndex("zero-disk-usage-index").shardSizeInBytesForecast(0L), + anIndex("no-disk-usage-index") ), allocationService ); @@ -185,10 +182,7 @@ public void testBalanceByForecastDiskUsage() { public void testBalanceByActualDiskUsage() { var allocationService = createAllocationService( - Settings.builder() - // enable disk based balancing - .put(BalancedShardsAllocator.DISK_USAGE_BALANCE_FACTOR_SETTING.getKey(), "1e-9") - .build(), + WITH_DISK_BALANCING, () -> createClusterInfo( Map.ofEntries( Map.entry("[heavy-index][0][p]", ByteSizeValue.ofGb(8).getBytes()), @@ -203,12 +197,12 @@ public void testBalanceByActualDiskUsage() { var clusterState = applyStartedShardsUntilNoChange( stateWithStartedIndices( - IndexMetadata.builder("heavy-index"), - IndexMetadata.builder("light-index-1"), - IndexMetadata.builder("light-index-2"), - IndexMetadata.builder("light-index-3"), - IndexMetadata.builder("zero-disk-usage-index"), - IndexMetadata.builder("no-disk-usage-index") + anIndex("heavy-index"), + anIndex("light-index-1"), + anIndex("light-index-2"), + anIndex("light-index-3"), + anIndex("zero-disk-usage-index"), + anIndex("no-disk-usage-index") ), allocationService ); @@ -233,21 +227,18 @@ public void testBalanceByActualDiskUsage() { public void testBalanceByActualAndForecastDiskUsage() { var allocationService = createAllocationService( - Settings.builder() - // enable disk based balancing - .put(BalancedShardsAllocator.DISK_USAGE_BALANCE_FACTOR_SETTING.getKey(), "1e-9") - .build(), + WITH_DISK_BALANCING, () -> createClusterInfo(Map.of("[heavy-index][0][p]", ByteSizeValue.ofGb(8).getBytes())) ); var clusterState = applyStartedShardsUntilNoChange( stateWithStartedIndices( - IndexMetadata.builder("heavy-index"),// size is set in cluster info - IndexMetadata.builder("light-index-1").shardSizeInBytesForecast(ByteSizeValue.ofGb(1).getBytes()), - IndexMetadata.builder("light-index-2").shardSizeInBytesForecast(ByteSizeValue.ofGb(2).getBytes()), - IndexMetadata.builder("light-index-3").shardSizeInBytesForecast(ByteSizeValue.ofGb(3).getBytes()), - IndexMetadata.builder("zero-disk-usage-index").shardSizeInBytesForecast(0L), - IndexMetadata.builder("no-disk-usage-index") + anIndex("heavy-index"),// size is set in cluster info + anIndex("light-index-1").shardSizeInBytesForecast(ByteSizeValue.ofGb(1).getBytes()), + anIndex("light-index-2").shardSizeInBytesForecast(ByteSizeValue.ofGb(2).getBytes()), + anIndex("light-index-3").shardSizeInBytesForecast(ByteSizeValue.ofGb(3).getBytes()), + anIndex("zero-disk-usage-index").shardSizeInBytesForecast(0L), + anIndex("no-disk-usage-index") ), allocationService ); @@ -269,6 +260,26 @@ public void testBalanceByActualAndForecastDiskUsage() { ); } + public void testDoNotBalancePartialIndicesByDiskUsage() { + + var allocationService = createAllocationService(WITH_DISK_BALANCING, () -> createClusterInfo(Map.of())); + + var partialSearchableSnapshotSettings = indexSettings(IndexVersion.current(), 1, 0) // + .put(SETTING_IGNORE_DISK_WATERMARKS.getKey(), true); + + var clusterState = applyStartedShardsUntilNoChange( + stateWithStartedIndices( + anIndex("frozen-index-1", partialSearchableSnapshotSettings).shardSizeInBytesForecast(ByteSizeValue.ofGb(1).getBytes()), + anIndex("frozen-index-2", partialSearchableSnapshotSettings).shardSizeInBytesForecast(ByteSizeValue.ofGb(1).getBytes()), + anIndex("frozen-index-3", partialSearchableSnapshotSettings).shardSizeInBytesForecast(ByteSizeValue.ofGb(1).getBytes()), + anIndex("frozen-index-4", partialSearchableSnapshotSettings).shardSizeInBytesForecast(ByteSizeValue.ofGb(10).getBytes()) + ), + allocationService + ); + + assertThat(getShardsPerNode(clusterState).values(), everyItem(hasSize(2))); + } + private static Map> getShardsPerNode(ClusterState clusterState) { return getPerNode(clusterState, mapping(ShardRouting::getIndexName, toSet())); } @@ -411,6 +422,20 @@ public void testGetIndexDiskUsageInBytes() { // should pick the max shard size among forecast and cluster info assertThat(indexDiskUsageInBytes, equalTo(Math.max(forecastedShardSize, observedShardSize))); } + + { + final var indexMetadata = IndexMetadata.builder("index") + .settings(indexSettings(IndexVersion.current(), 1, 0).put(SETTING_IGNORE_DISK_WATERMARKS.getKey(), true)) + .build(); + + final var indexDiskUsageInBytes = getIndexDiskUsageInBytes( + createClusterInfo(Map.of("[index][0][p]", randomLongBetween(1024, 10240))), + indexMetadata + ); + + // partially cached indices should not be balanced by disk usage + assertThat(indexDiskUsageInBytes, equalTo(0L)); + } } public void testThresholdLimit() { @@ -503,11 +528,19 @@ private static ClusterInfo createClusterInfo(Map indexSizes) { return new ClusterInfo(Map.of(), Map.of(), indexSizes, Map.of(), Map.of(), Map.of()); } + private static IndexMetadata.Builder anIndex(String name) { + return anIndex(name, indexSettings(IndexVersion.current(), 1, 0)); + } + + private static IndexMetadata.Builder anIndex(String name, Settings.Builder settings) { + return IndexMetadata.builder(name).settings(settings); + } + private static ClusterState stateWithStartedIndices(IndexMetadata.Builder... indices) { var metadataBuilder = Metadata.builder(); var routingTableBuilder = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY); for (var index : indices) { - var build = index.settings(indexSettings(IndexVersion.current(), 1, 0)).build(); + var build = index.build(); metadataBuilder.put(build, false); routingTableBuilder.addAsNew(build); } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterInfoSimulatorTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterInfoSimulatorTests.java index 1d2a7f05ff1f2..d09a6525c9d76 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterInfoSimulatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterInfoSimulatorTests.java @@ -9,54 +9,74 @@ package org.elasticsearch.cluster.routing.allocation.allocator; import org.elasticsearch.cluster.ClusterInfo; +import org.elasticsearch.cluster.ClusterInfo.NodeAndPath; +import org.elasticsearch.cluster.ClusterInfo.ReservedSpace; import org.elasticsearch.cluster.ClusterInfoSimulator; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.DiskUsage; +import org.elasticsearch.cluster.ESAllocationTestCase; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodeRole; -import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider; -import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.snapshots.InternalSnapshotsInfoService; +import org.elasticsearch.snapshots.Snapshot; +import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotShardSizeInfo; -import org.elasticsearch.test.ESTestCase; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; +import static org.elasticsearch.cluster.ClusterInfo.shardIdentifierFromRouting; +import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_RESIZE_SOURCE_NAME_KEY; +import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_RESIZE_SOURCE_UUID_KEY; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; -import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED; import static org.elasticsearch.cluster.routing.TestShardRouting.newShardRouting; +import static org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider.SETTING_IGNORE_DISK_WATERMARKS; +import static org.elasticsearch.index.IndexModule.INDEX_STORE_TYPE_SETTING; +import static org.elasticsearch.snapshots.SearchableSnapshotsSettings.SEARCHABLE_SNAPSHOT_STORE_TYPE; +import static org.elasticsearch.snapshots.SearchableSnapshotsSettings.SNAPSHOT_PARTIAL_SETTING; import static org.hamcrest.Matchers.equalTo; -public class ClusterInfoSimulatorTests extends ESTestCase { +public class ClusterInfoSimulatorTests extends ESAllocationTestCase { public void testInitializeNewPrimary() { - var newPrimary = newShardRouting("index-1", 0, "node-0", true, INITIALIZING); - - var simulator = new ClusterInfoSimulator( - new ClusterInfoTestBuilder() // - .withNode("node-0", new DiskUsageBuilder(1000, 1000)) - .withNode("node-1", new DiskUsageBuilder(1000, 1000)) - .withShard(newPrimary, 0) - .build() + var newPrimary = newShardRouting( + new ShardId("my-index", "_na_", 0), + "node-0", + true, + ShardRoutingState.INITIALIZING, + RecoverySource.EmptyStoreRecoverySource.INSTANCE ); + + var initialClusterInfo = new ClusterInfoTestBuilder() // + .withNode("node-0", new DiskUsageBuilder(1000, 1000)) + .build(); + + var state = ClusterState.builder(ClusterName.DEFAULT) + .metadata(Metadata.builder().put(IndexMetadata.builder("my-index").settings(indexSettings(IndexVersion.current(), 1, 0)))) + .build(); + var allocation = createRoutingAllocation(state, initialClusterInfo, SnapshotShardSizeInfo.EMPTY); + var simulator = new ClusterInfoSimulator(allocation); simulator.simulateShardStarted(newPrimary); assertThat( @@ -64,32 +84,39 @@ public void testInitializeNewPrimary() { equalTo( new ClusterInfoTestBuilder() // .withNode("node-0", new DiskUsageBuilder(1000, 1000)) - .withNode("node-1", new DiskUsageBuilder(1000, 1000)) - .withShard(newPrimary, 0) .build() ) ); } - public void testInitializeNewPrimaryWithKnownExpectedSize() { + public void testInitializePreviouslyExistingPrimary() { - var newPrimary = newShardRouting("index-1", 0, null, true, UNASSIGNED).initialize("node-0", null, 100); - - var simulator = new ClusterInfoSimulator( - new ClusterInfoTestBuilder() // - .withNode("node-0", new DiskUsageBuilder(1000, 1000)) - .withNode("node-1", new DiskUsageBuilder(1000, 1000)) - .build() + var existingPrimary = newShardRouting( + new ShardId("my-index", "_na_", 0), + "node-0", + true, + ShardRoutingState.INITIALIZING, + RecoverySource.ExistingStoreRecoverySource.INSTANCE ); - simulator.simulateShardStarted(newPrimary); + + var initialClusterInfo = new ClusterInfoTestBuilder() // + .withNode("node-0", new DiskUsageBuilder(1000, 900)) + .withShard(existingPrimary, 100) + .build(); + + var state = ClusterState.builder(ClusterName.DEFAULT) + .metadata(Metadata.builder().put(IndexMetadata.builder("my-index").settings(indexSettings(IndexVersion.current(), 1, 0)))) + .build(); + var allocation = createRoutingAllocation(state, initialClusterInfo, SnapshotShardSizeInfo.EMPTY); + var simulator = new ClusterInfoSimulator(allocation); + simulator.simulateShardStarted(existingPrimary); assertThat( simulator.getClusterInfo(), equalTo( new ClusterInfoTestBuilder() // .withNode("node-0", new DiskUsageBuilder(1000, 900)) - .withNode("node-1", new DiskUsageBuilder(1000, 1000)) - .withShard(newPrimary, 100) + .withShard(existingPrimary, 100) .build() ) ); @@ -97,17 +124,26 @@ public void testInitializeNewPrimaryWithKnownExpectedSize() { public void testInitializeNewReplica() { - var existingPrimary = newShardRouting("index-1", 0, "node-0", true, STARTED); - var newReplica = newShardRouting("index-1", 0, "node-1", false, INITIALIZING); - - var simulator = new ClusterInfoSimulator( - new ClusterInfoTestBuilder() // - .withNode("node-0", new DiskUsageBuilder(1000, 900)) - .withNode("node-1", new DiskUsageBuilder(1000, 1000)) - .withShard(existingPrimary, 100) - .withShard(newReplica, 0) - .build() + var existingPrimary = newShardRouting(new ShardId("my-index", "_na_", 0), "node-0", true, STARTED); + var newReplica = newShardRouting( + new ShardId("my-index", "_na_", 0), + "node-1", + false, + INITIALIZING, + RecoverySource.PeerRecoverySource.INSTANCE ); + + var initialClusterInfo = new ClusterInfoTestBuilder() // + .withNode("node-0", new DiskUsageBuilder(1000, 900)) + .withNode("node-1", new DiskUsageBuilder(1000, 1000)) + .withShard(existingPrimary, 100) + .build(); + + var state = ClusterState.builder(ClusterName.DEFAULT) + .metadata(Metadata.builder().put(IndexMetadata.builder("my-index").settings(indexSettings(IndexVersion.current(), 1, 1)))) + .build(); + var allocation = createRoutingAllocation(state, initialClusterInfo, SnapshotShardSizeInfo.EMPTY); + var simulator = new ClusterInfoSimulator(allocation); simulator.simulateShardStarted(newReplica); assertThat( @@ -123,20 +159,78 @@ public void testInitializeNewReplica() { ); } + public void testInitializeNewReplicaWithReservedSpace() { + + var recoveredSize = 70; + var remainingSize = 30; + var totalShardSize = recoveredSize + remainingSize; + + var indexMetadata = IndexMetadata.builder("my-index").settings(indexSettings(IndexVersion.current(), 1, 1)).build(); + var existingPrimary = newShardRouting(new ShardId(indexMetadata.getIndex(), 0), "node-0", true, STARTED); + var newReplica = newShardRouting( + new ShardId(indexMetadata.getIndex(), 0), + "node-1", + false, + INITIALIZING, + RecoverySource.PeerRecoverySource.INSTANCE + ); + + var initialClusterInfo = new ClusterInfoTestBuilder() // + .withNode("node-0", new DiskUsageBuilder("/data", 1000, 1000 - totalShardSize)) + .withNode("node-1", new DiskUsageBuilder("/data", 1000, 1000 - recoveredSize)) + .withShard(existingPrimary, totalShardSize) + .withReservedSpace("node-1", "/data", remainingSize, newReplica.shardId()) + .build(); + + var state = ClusterState.builder(ClusterName.DEFAULT) + .metadata(Metadata.builder().put(indexMetadata, false)) + .routingTable( + RoutingTable.builder() + .add(IndexRoutingTable.builder(indexMetadata.getIndex()).addShard(existingPrimary).addShard(newReplica)) + ) + .build(); + var allocation = createRoutingAllocation(state, initialClusterInfo, SnapshotShardSizeInfo.EMPTY); + var simulator = new ClusterInfoSimulator(allocation); + simulator.simulateShardStarted(newReplica); + + assertThat( + simulator.getClusterInfo(), + equalTo( + new ClusterInfoTestBuilder() // + .withNode("node-0", new DiskUsageBuilder("/data", 1000, 1000 - totalShardSize)) + .withNode("node-1", new DiskUsageBuilder("/data", 1000, 1000 - totalShardSize)) + .withShard(existingPrimary, totalShardSize) + .withShard(newReplica, totalShardSize) + .build() + ) + ); + } + public void testRelocateShard() { var fromNodeId = "node-0"; var toNodeId = "node-1"; - var shard = newShardRouting("index-1", 0, toNodeId, fromNodeId, true, INITIALIZING); - - var simulator = new ClusterInfoSimulator( - new ClusterInfoTestBuilder() // - .withNode(fromNodeId, new DiskUsageBuilder(1000, 900)) - .withNode(toNodeId, new DiskUsageBuilder(1000, 1000)) - .withShard(shard, 100) - .build() + var shard = newShardRouting( + new ShardId("my-index", "_na_", 0), + toNodeId, + fromNodeId, + true, + INITIALIZING, + RecoverySource.PeerRecoverySource.INSTANCE ); + + var initialClusterInfo = new ClusterInfoTestBuilder() // + .withNode(fromNodeId, new DiskUsageBuilder(1000, 900)) + .withNode(toNodeId, new DiskUsageBuilder(1000, 1000)) + .withShard(shard, 100) + .build(); + + var state = ClusterState.builder(ClusterName.DEFAULT) + .metadata(Metadata.builder().put(IndexMetadata.builder("my-index").settings(indexSettings(IndexVersion.current(), 1, 0)))) + .build(); + var allocation = createRoutingAllocation(state, initialClusterInfo, SnapshotShardSizeInfo.EMPTY); + var simulator = new ClusterInfoSimulator(allocation); simulator.simulateShardStarted(shard); assertThat( @@ -151,20 +245,31 @@ public void testRelocateShard() { ); } - public void testRelocateShardWithMultipleDataPath1() { + public void testRelocateShardWithMultipleDataPath() { var fromNodeId = "node-0"; var toNodeId = "node-1"; - var shard = newShardRouting("index-1", 0, toNodeId, fromNodeId, true, INITIALIZING); - - var simulator = new ClusterInfoSimulator( - new ClusterInfoTestBuilder() // - .withNode(fromNodeId, new DiskUsageBuilder("/data-1", 1000, 500), new DiskUsageBuilder("/data-2", 1000, 750)) - .withNode(toNodeId, new DiskUsageBuilder("/data-1", 1000, 750), new DiskUsageBuilder("/data-2", 1000, 900)) - .withShard(shard, 100) - .build() + var shard = newShardRouting( + new ShardId("my-index", "_na_", 0), + toNodeId, + fromNodeId, + true, + INITIALIZING, + RecoverySource.PeerRecoverySource.INSTANCE ); + + var initialClusterInfo = new ClusterInfoTestBuilder() // + .withNode(fromNodeId, new DiskUsageBuilder("/data-1", 1000, 500), new DiskUsageBuilder("/data-2", 1000, 750)) + .withNode(toNodeId, new DiskUsageBuilder("/data-1", 1000, 750), new DiskUsageBuilder("/data-2", 1000, 900)) + .withShard(shard, 100) + .build(); + + var state = ClusterState.builder(ClusterName.DEFAULT) + .metadata(Metadata.builder().put(IndexMetadata.builder("my-index").settings(indexSettings(IndexVersion.current(), 1, 0)))) + .build(); + var allocation = createRoutingAllocation(state, initialClusterInfo, SnapshotShardSizeInfo.EMPTY); + var simulator = new ClusterInfoSimulator(allocation); simulator.simulateShardStarted(shard); assertThat( @@ -179,12 +284,201 @@ public void testRelocateShardWithMultipleDataPath1() { ); } + public void testInitializeShardFromSnapshot() { + + var shardSize = 100; + var indexSettings = indexSettings(IndexVersion.current(), 1, 0); + if (randomBoolean()) { + indexSettings.put(INDEX_STORE_TYPE_SETTING.getKey(), SEARCHABLE_SNAPSHOT_STORE_TYPE); + } + + var state = ClusterState.builder(ClusterName.DEFAULT) + .metadata(Metadata.builder().put(IndexMetadata.builder("my-index").settings(indexSettings))) + .build(); + + var snapshot = new Snapshot("repository", new SnapshotId("snapshot-1", "na")); + var indexId = new IndexId("my-index", "_na_"); + var shard = newShardRouting( + new ShardId(state.metadata().index("my-index").getIndex(), 0), + "node-0", + true, + ShardRoutingState.INITIALIZING, + new RecoverySource.SnapshotRecoverySource(randomUUID(), snapshot, IndexVersion.current(), indexId) + ); + + var initialClusterInfo = new ClusterInfoTestBuilder() // + .withNode("node-0", new DiskUsageBuilder(1000, 1000)) + .withNode("node-1", new DiskUsageBuilder(1000, 1000)) + .build(); + var snapshotShardSizeInfo = new SnapshotShardSizeInfoTestBuilder() // + .withShard(snapshot, indexId, shard.shardId(), shardSize) + .build(); + + var allocation = createRoutingAllocation(state, initialClusterInfo, snapshotShardSizeInfo); + var simulator = new ClusterInfoSimulator(allocation); + simulator.simulateShardStarted(shard); + + assertThat( + simulator.getClusterInfo(), + equalTo( + new ClusterInfoTestBuilder() // + .withNode("node-0", new DiskUsageBuilder(1000, 1000 - shardSize)) + .withNode("node-1", new DiskUsageBuilder(1000, 1000)) + .withShard(shard, shardSize) + .build() + ) + ); + } + + public void testInitializeShardFromPartialSearchableSnapshot() { + + var shardSize = 100; + var indexSettings = indexSettings(IndexVersion.current(), 1, 0) // + .put(INDEX_STORE_TYPE_SETTING.getKey(), SEARCHABLE_SNAPSHOT_STORE_TYPE) + .put(SNAPSHOT_PARTIAL_SETTING.getKey(), true) + .put(SETTING_IGNORE_DISK_WATERMARKS.getKey(), true); + + var state = ClusterState.builder(ClusterName.DEFAULT) + .metadata(Metadata.builder().put(IndexMetadata.builder("my-index").settings(indexSettings))) + .build(); + + var snapshot = new Snapshot("repository", new SnapshotId("snapshot-1", "na")); + var indexId = new IndexId("my-index", "_na_"); + var shard = newShardRouting( + new ShardId(state.metadata().index("my-index").getIndex(), 0), + "node-0", + true, + ShardRoutingState.INITIALIZING, + new RecoverySource.SnapshotRecoverySource(randomUUID(), snapshot, IndexVersion.current(), indexId) + ); + + var initialClusterInfo = new ClusterInfoTestBuilder() // + .withNode("node-0", new DiskUsageBuilder(1000, 1000)) + .withNode("node-1", new DiskUsageBuilder(1000, 1000)) + .build(); + var snapshotShardSizeInfo = new SnapshotShardSizeInfoTestBuilder() // + .withShard(snapshot, indexId, shard.shardId(), shardSize) + .build(); + + var allocation = createRoutingAllocation(state, initialClusterInfo, snapshotShardSizeInfo); + var simulator = new ClusterInfoSimulator(allocation); + simulator.simulateShardStarted(shard); + + assertThat( + simulator.getClusterInfo(), + equalTo( + new ClusterInfoTestBuilder() // + .withNode("node-0", new DiskUsageBuilder(1000, 1000)) + .withNode("node-1", new DiskUsageBuilder(1000, 1000)) + .withShard(shard, 0) // partial searchable snapshot always reports 0 size + .build() + ) + ); + } + + public void testRelocatePartialSearchableSnapshotShard() { + + var shardSize = 100; + var indexSettings = indexSettings(IndexVersion.current(), 1, 0) // + .put(INDEX_STORE_TYPE_SETTING.getKey(), SEARCHABLE_SNAPSHOT_STORE_TYPE) + .put(SNAPSHOT_PARTIAL_SETTING.getKey(), true) + .put(SETTING_IGNORE_DISK_WATERMARKS.getKey(), true); + + var state = ClusterState.builder(ClusterName.DEFAULT) + .metadata(Metadata.builder().put(IndexMetadata.builder("my-index").settings(indexSettings))) + .build(); + + var snapshot = new Snapshot("repository", new SnapshotId("snapshot-1", "na")); + var indexId = new IndexId("my-index", "_na_"); + + var fromNodeId = "node-0"; + var toNodeId = "node-1"; + + var shard = newShardRouting( + new ShardId("my-index", "_na_", 0), + toNodeId, + fromNodeId, + true, + INITIALIZING, + RecoverySource.PeerRecoverySource.INSTANCE + ); + + var initialClusterInfo = new ClusterInfoTestBuilder() // + .withNode(fromNodeId, new DiskUsageBuilder(1000, 1000)) + .withNode(toNodeId, new DiskUsageBuilder(1000, 1000)) + .withShard(shard, 0) + .build(); + var snapshotShardSizeInfo = new SnapshotShardSizeInfoTestBuilder() // + .withShard(snapshot, indexId, shard.shardId(), shardSize) + .build(); + + var allocation = createRoutingAllocation(state, initialClusterInfo, snapshotShardSizeInfo); + var simulator = new ClusterInfoSimulator(allocation); + simulator.simulateShardStarted(shard); + + assertThat( + simulator.getClusterInfo(), + equalTo( + new ClusterInfoTestBuilder() // + .withNode(fromNodeId, new DiskUsageBuilder(1000, 1000)) + .withNode(toNodeId, new DiskUsageBuilder(1000, 1000)) + .withShard(shard, 0) // partial searchable snapshot always reports 0 size + .build() + ) + ); + } + + public void testInitializeShardFromClone() { + + var sourceShardSize = randomLongBetween(100, 1000); + var source = newShardRouting(new ShardId("source", "_na_", 0), randomIdentifier(), true, ShardRoutingState.STARTED); + var target = newShardRouting( + new ShardId("target", "_na_", 0), + randomIdentifier(), + true, + ShardRoutingState.INITIALIZING, + RecoverySource.LocalShardsRecoverySource.INSTANCE + ); + + var state = ClusterState.builder(ClusterName.DEFAULT) + .metadata( + Metadata.builder() + .put(IndexMetadata.builder("source").settings(indexSettings(IndexVersion.current(), 1, 0))) + .put( + IndexMetadata.builder("target") + .settings( + indexSettings(IndexVersion.current(), 1, 0) // + .put(INDEX_RESIZE_SOURCE_NAME_KEY, "source") // + .put(INDEX_RESIZE_SOURCE_UUID_KEY, "_na_") + ) + ) + ) + .routingTable(RoutingTable.builder().add(IndexRoutingTable.builder(source.index()).addShard(source))) + .build(); + + var initialClusterInfo = new ClusterInfoTestBuilder().withNode("node-0", new DiskUsageBuilder(1000, 1000 - sourceShardSize)) + .withShard(source, sourceShardSize) + .build(); + + var allocation = createRoutingAllocation(state, initialClusterInfo, SnapshotShardSizeInfo.EMPTY); + var simulator = new ClusterInfoSimulator(allocation); + simulator.simulateShardStarted(target); + + assertThat( + simulator.getClusterInfo(), + equalTo( + new ClusterInfoTestBuilder() // + .withNode("node-0", new DiskUsageBuilder(1000, 1000 - sourceShardSize)) + .withShard(source, sourceShardSize) + .withShard(target, sourceShardSize) + .build() + ) + ); + } + public void testDiskUsageSimulationWithSingleDataPathAndDiskThresholdDecider() { - var discoveryNodesBuilder = DiscoveryNodes.builder() - .add(createDiscoveryNode("node-0", DiscoveryNodeRole.roles())) - .add(createDiscoveryNode("node-1", DiscoveryNodeRole.roles())) - .add(createDiscoveryNode("node-2", DiscoveryNodeRole.roles())); + var discoveryNodesBuilder = DiscoveryNodes.builder().add(newNode("node-0")).add(newNode("node-1")).add(newNode("node-2")); var metadataBuilder = Metadata.builder(); var routingTableBuilder = RoutingTable.builder(); @@ -192,28 +486,35 @@ public void testDiskUsageSimulationWithSingleDataPathAndDiskThresholdDecider() { var shard1 = newShardRouting("index-1", 0, "node-0", null, true, STARTED); addIndex(metadataBuilder, routingTableBuilder, shard1); - var shard2 = newShardRouting("index-2", 0, "node-0", "node-1", true, INITIALIZING); + var shard2 = newShardRouting( + new ShardId("index-2", "_na_", 0), + "node-0", + "node-1", + true, + INITIALIZING, + RecoverySource.PeerRecoverySource.INSTANCE + ); addIndex(metadataBuilder, routingTableBuilder, shard2); var shard3 = newShardRouting("index-3", 0, "node-1", null, true, STARTED); addIndex(metadataBuilder, routingTableBuilder, shard3); - var clusterState = ClusterState.builder(ClusterName.DEFAULT) + var state = ClusterState.builder(ClusterName.DEFAULT) .nodes(discoveryNodesBuilder) .metadata(metadataBuilder) .routingTable(routingTableBuilder) .build(); - var simulator = new ClusterInfoSimulator( - new ClusterInfoTestBuilder() // - .withNode("node-0", new DiskUsageBuilder("/data-1", 1000, 500)) - .withNode("node-1", new DiskUsageBuilder("/data-1", 1000, 300)) - .withShard(shard1, 500) - .withShard(shard2, 400) - .withShard(shard3, 300) - .build() - ); + var initialClusterInfo = new ClusterInfoTestBuilder() // + .withNode("node-0", new DiskUsageBuilder("/data-1", 1000, 500)) + .withNode("node-1", new DiskUsageBuilder("/data-1", 1000, 300)) + .withShard(shard1, 500) + .withShard(shard2, 400) + .withShard(shard3, 300) + .build(); + var allocation = createRoutingAllocation(state, initialClusterInfo, SnapshotShardSizeInfo.EMPTY); + var simulator = new ClusterInfoSimulator(allocation); simulator.simulateShardStarted(shard2); assertThat( @@ -229,42 +530,29 @@ public void testDiskUsageSimulationWithSingleDataPathAndDiskThresholdDecider() { ) ); - var decider = new DiskThresholdDecider( - Settings.EMPTY, - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) - ); - var allocation = new RoutingAllocation( - new AllocationDeciders(List.of(decider)), - clusterState, - simulator.getClusterInfo(), - SnapshotShardSizeInfo.EMPTY, - 0L - ); - var routingNodes = allocation.routingNodes(); + var decider = new DiskThresholdDecider(Settings.EMPTY, ClusterSettings.createBuiltInClusterSettings(Settings.EMPTY)); + allocation = createRoutingAllocation(state, simulator.getClusterInfo(), SnapshotShardSizeInfo.EMPTY, decider); assertThat( "Should keep index-1 on node-0", - decider.canRemain(clusterState.metadata().index("index-1"), shard1, routingNodes.node("node-0"), allocation).type(), + decider.canRemain(state.metadata().index("index-1"), shard1, allocation.routingNodes().node("node-0"), allocation).type(), equalTo(Decision.Type.YES) ); assertThat( "Should keep index-2 on node-0", - decider.canRemain(clusterState.metadata().index("index-2"), shard2, routingNodes.node("node-0"), allocation).type(), + decider.canRemain(state.metadata().index("index-2"), shard2, allocation.routingNodes().node("node-0"), allocation).type(), equalTo(Decision.Type.YES) ); assertThat( "Should not allocate index-3 on node-0 (not enough space)", - decider.canAllocate(shard3, routingNodes.node("node-0"), allocation).type(), + decider.canAllocate(shard3, allocation.routingNodes().node("node-0"), allocation).type(), equalTo(Decision.Type.NO) ); } public void testDiskUsageSimulationWithMultipleDataPathAndDiskThresholdDecider() { - var discoveryNodesBuilder = DiscoveryNodes.builder() - .add(createDiscoveryNode("node-0", DiscoveryNodeRole.roles())) - .add(createDiscoveryNode("node-1", DiscoveryNodeRole.roles())) - .add(createDiscoveryNode("node-2", DiscoveryNodeRole.roles())); + var discoveryNodesBuilder = DiscoveryNodes.builder().add(newNode("node-0")).add(newNode("node-1")).add(newNode("node-2")); var metadataBuilder = Metadata.builder(); var routingTableBuilder = RoutingTable.builder(); @@ -272,28 +560,35 @@ public void testDiskUsageSimulationWithMultipleDataPathAndDiskThresholdDecider() var shard1 = newShardRouting("index-1", 0, "node-0", null, true, STARTED); addIndex(metadataBuilder, routingTableBuilder, shard1); - var shard2 = newShardRouting("index-2", 0, "node-0", "node-1", true, INITIALIZING); + var shard2 = newShardRouting( + new ShardId("index-2", "_na_", 0), + "node-0", + "node-1", + true, + INITIALIZING, + RecoverySource.PeerRecoverySource.INSTANCE + ); addIndex(metadataBuilder, routingTableBuilder, shard2); var shard3 = newShardRouting("index-3", 0, "node-1", null, true, STARTED); addIndex(metadataBuilder, routingTableBuilder, shard3); - var clusterState = ClusterState.builder(ClusterName.DEFAULT) + var state = ClusterState.builder(ClusterName.DEFAULT) .nodes(discoveryNodesBuilder) .metadata(metadataBuilder) .routingTable(routingTableBuilder) .build(); - var simulator = new ClusterInfoSimulator( - new ClusterInfoTestBuilder() // - .withNode("node-0", new DiskUsageBuilder("/data-1", 1000, 100), new DiskUsageBuilder("/data-2", 1000, 500)) - .withNode("node-1", new DiskUsageBuilder("/data-1", 1000, 100), new DiskUsageBuilder("/data-2", 1000, 300)) - .withShard(shard1, 500) - .withShard(shard2, 400) - .withShard(shard3, 300) - .build() - ); + var initialClusterInfo = new ClusterInfoTestBuilder() // + .withNode("node-0", new DiskUsageBuilder("/data-1", 1000, 100), new DiskUsageBuilder("/data-2", 1000, 500)) + .withNode("node-1", new DiskUsageBuilder("/data-1", 1000, 100), new DiskUsageBuilder("/data-2", 1000, 300)) + .withShard(shard1, 500) + .withShard(shard2, 400) + .withShard(shard3, 300) + .build(); + var allocation = createRoutingAllocation(state, initialClusterInfo, SnapshotShardSizeInfo.EMPTY); + var simulator = new ClusterInfoSimulator(allocation); simulator.simulateShardStarted(shard2); assertThat( @@ -309,53 +604,63 @@ public void testDiskUsageSimulationWithMultipleDataPathAndDiskThresholdDecider() ) ); - var decider = new DiskThresholdDecider( - Settings.EMPTY, - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) - ); - var allocation = new RoutingAllocation( - new AllocationDeciders(List.of(decider)), - clusterState, - simulator.getClusterInfo(), - SnapshotShardSizeInfo.EMPTY, - 0L - ); - var routingNodes = allocation.routingNodes(); + var decider = new DiskThresholdDecider(Settings.EMPTY, ClusterSettings.createBuiltInClusterSettings(Settings.EMPTY)); + allocation = createRoutingAllocation(state, simulator.getClusterInfo(), SnapshotShardSizeInfo.EMPTY, decider); assertThat( "Should keep index-1 on node-0", - decider.canRemain(clusterState.metadata().index("index-1"), shard1, routingNodes.node("node-0"), allocation).type(), + decider.canRemain(state.metadata().index("index-1"), shard1, allocation.routingNodes().node("node-0"), allocation).type(), equalTo(Decision.Type.YES) ); assertThat( "Should keep index-2 on node-0", - decider.canRemain(clusterState.metadata().index("index-2"), shard2, routingNodes.node("node-0"), allocation).type(), + decider.canRemain(state.metadata().index("index-2"), shard2, allocation.routingNodes().node("node-0"), allocation).type(), equalTo(Decision.Type.YES) ); assertThat( "Should not allocate index-3 on node-0 (not enough space)", - decider.canAllocate(shard3, routingNodes.node("node-0"), allocation).type(), + decider.canAllocate(shard3, allocation.routingNodes().node("node-0"), allocation).type(), equalTo(Decision.Type.NO) ); } - private static DiscoveryNode createDiscoveryNode(String id, Set roles) { - return DiscoveryNodeUtils.builder(id).name(id).externalId(UUIDs.randomBase64UUID(random())).roles(roles).build(); - } - private static void addIndex(Metadata.Builder metadataBuilder, RoutingTable.Builder routingTableBuilder, ShardRouting shardRouting) { var name = shardRouting.getIndexName(); metadataBuilder.put(IndexMetadata.builder(name).settings(indexSettings(IndexVersion.current(), 1, 0))); routingTableBuilder.add(IndexRoutingTable.builder(metadataBuilder.get(name).getIndex()).addShard(shardRouting)); } + private static RoutingAllocation createRoutingAllocation( + ClusterState state, + ClusterInfo clusterInfo, + SnapshotShardSizeInfo snapshotShardSizeInfo, + AllocationDecider... deciders + ) { + return new RoutingAllocation(new AllocationDeciders(List.of(deciders)), state, clusterInfo, snapshotShardSizeInfo, 0); + } + + private static class SnapshotShardSizeInfoTestBuilder { + + private final Map snapshotShardSizes = new HashMap<>(); + + public SnapshotShardSizeInfoTestBuilder withShard(Snapshot snapshot, IndexId indexId, ShardId shardId, long size) { + snapshotShardSizes.put(new InternalSnapshotsInfoService.SnapshotShard(snapshot, indexId, shardId), size); + return this; + } + + public SnapshotShardSizeInfo build() { + return new SnapshotShardSizeInfo(snapshotShardSizes); + } + } + private static class ClusterInfoTestBuilder { private final Map leastAvailableSpaceUsage = new HashMap<>(); private final Map mostAvailableSpaceUsage = new HashMap<>(); private final Map shardSizes = new HashMap<>(); + private final Map reservedSpace = new HashMap<>(); public ClusterInfoTestBuilder withNode(String name, DiskUsageBuilder diskUsageBuilderBuilder) { leastAvailableSpaceUsage.put(name, diskUsageBuilderBuilder.toDiskUsage(name)); @@ -370,12 +675,17 @@ public ClusterInfoTestBuilder withNode(String name, DiskUsageBuilder leastAvaila } public ClusterInfoTestBuilder withShard(ShardRouting shard, long size) { - shardSizes.put(ClusterInfo.shardIdentifierFromRouting(shard), size); + shardSizes.put(shardIdentifierFromRouting(shard), size); + return this; + } + + public ClusterInfoTestBuilder withReservedSpace(String nodeId, String path, long size, ShardId... shardIds) { + reservedSpace.put(new NodeAndPath(nodeId, nodeId + path), new ReservedSpace(size, Set.of(shardIds))); return this; } public ClusterInfo build() { - return new ClusterInfo(leastAvailableSpaceUsage, mostAvailableSpaceUsage, shardSizes, Map.of(), Map.of(), Map.of()); + return new ClusterInfo(leastAvailableSpaceUsage, mostAvailableSpaceUsage, shardSizes, Map.of(), Map.of(), reservedSpace); } } @@ -385,8 +695,8 @@ private DiskUsageBuilder(long total, long free) { this("/data", total, free); } - public DiskUsage toDiskUsage(String name) { - return new DiskUsage(name, name, name + path, total, free); + public DiskUsage toDiskUsage(String nodeId) { + return new DiskUsage(nodeId, nodeId, nodeId + path, total, free); } } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java index 5e3b6cd02f830..9fe168074f41e 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java @@ -10,8 +10,9 @@ import org.apache.logging.log4j.Level; import org.elasticsearch.cluster.ClusterInfo; +import org.elasticsearch.cluster.ClusterInfo.NodeAndPath; import org.elasticsearch.cluster.ClusterInfo.NodeAndShard; -import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.cluster.ClusterInfo.ReservedSpace; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.DiskUsage; @@ -39,13 +40,13 @@ import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.Maps; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.snapshots.InternalSnapshotsInfoService; import org.elasticsearch.snapshots.InternalSnapshotsInfoService.SnapshotShard; import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.snapshots.SnapshotId; @@ -82,6 +83,7 @@ import static org.hamcrest.Matchers.everyItem; import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.notNullValue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -804,29 +806,25 @@ public void testComputeConsideringShardSizes() { var node0RemainingBytes = (index0ReplicaShard.started() || index0ReplicaShard.relocating()) && Objects.equals(index0ReplicaShard.currentNodeId(), "node-0") ? 100 : 600; - var node0Usage = new DiskUsage("node-0", "node-0", "/data", 1000, node0RemainingBytes); - var node1Usage = new DiskUsage("node-1", "node-1", "/data", 1000, 100); - var node2Usage = new DiskUsage("node-2", "node-2", "/data", 1000, 1000); - var clusterInfo = createClusterInfo( - List.of(node0Usage, node1Usage, node2Usage), - Map.ofEntries( - // node-0 & node-1 - indexSize(clusterState, "index-0", 500, true), - indexSize(clusterState, "index-0", 500, false), - // node-0 - indexSize(clusterState, "index-1", 400, true), - // node-1 - indexSize(clusterState, "index-2", 50, true), - indexSize(clusterState, "index-3", 50, true), - indexSize(clusterState, "index-4", 50, true), - indexSize(clusterState, "index-5", 50, true), - indexSize(clusterState, "index-6", 50, true), - indexSize(clusterState, "index-7", 50, true), - indexSize(clusterState, "index-8", 50, true), - indexSize(clusterState, "index-9", 50, true) - ) - ); + var clusterInfo = new ClusterInfoTestBuilder().withNode("node-0", 1000, node0RemainingBytes) + .withNode("node-1", 1000, 100) + .withNode("node-2", 1000, 1000) + // node-0 & node-1 + .withShard(findShardId(clusterState, "index-0"), true, 500) + .withShard(findShardId(clusterState, "index-0"), false, 500) + // node-0 + .withShard(findShardId(clusterState, "index-1"), true, 400) + // node-1 + .withShard(findShardId(clusterState, "index-2"), true, 50) + .withShard(findShardId(clusterState, "index-3"), true, 50) + .withShard(findShardId(clusterState, "index-4"), true, 50) + .withShard(findShardId(clusterState, "index-5"), true, 50) + .withShard(findShardId(clusterState, "index-6"), true, 50) + .withShard(findShardId(clusterState, "index-7"), true, 50) + .withShard(findShardId(clusterState, "index-8"), true, 50) + .withShard(findShardId(clusterState, "index-9"), true, 50) + .build(); var settings = Settings.builder() // force as many iterations as possible to accumulate the diff @@ -839,11 +837,9 @@ public void testComputeConsideringShardSizes() { var initial = new DesiredBalance( 1, - Map.of( - findShardId(clusterState, "index-0"), - new ShardAssignment(Set.of("node-0", "node-1"), 2, 0, 0), - findShardId(clusterState, "index-1"), - new ShardAssignment(Set.of("node-0"), 1, 0, 0) + Map.ofEntries( + Map.entry(findShardId(clusterState, "index-0"), new ShardAssignment(Set.of("node-0", "node-1"), 2, 0, 0)), + Map.entry(findShardId(clusterState, "index-1"), new ShardAssignment(Set.of("node-0"), 1, 0, 0)) ) ); @@ -865,16 +861,182 @@ public void testComputeConsideringShardSizes() { assertThat(resultDiskUsage, allOf(aMapWithSize(2), hasEntry("node-0", 950L), hasEntry("node-1", 850L))); } - private static ClusterInfo createClusterInfo(List diskUsages, Map shardSizes) { - var diskUsage = diskUsages.stream().collect(toMap(DiskUsage::getNodeId, Function.identity())); - return new ClusterInfo(diskUsage, diskUsage, shardSizes, Map.of(), Map.of(), Map.of()); + public void testAccountForSizeOfMisplacedShardsDuringNewComputation() { + + var snapshot = new Snapshot("repository", new SnapshotId("snapshot", randomUUID())); + + var clusterInfoBuilder = new ClusterInfoTestBuilder().withNode( + "node-1", + ByteSizeValue.ofGb(10).getBytes(), + ByteSizeValue.ofGb(2).getBytes() + ).withNode("node-2", ByteSizeValue.ofGb(10).getBytes(), ByteSizeValue.ofGb(2).getBytes()); + var snapshotShardSizes = Maps.newHashMapWithExpectedSize(5); + + var routingTableBuilder = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY); + // index-1 is allocated according to the desired balance + var indexMetadata1 = IndexMetadata.builder("index-1").settings(indexSettings(IndexVersion.current(), 2, 0)).build(); + routingTableBuilder.add( + IndexRoutingTable.builder(indexMetadata1.getIndex()) + .addShard(newShardRouting(shardIdFrom(indexMetadata1, 0), "node-1", true, STARTED)) + .addShard(newShardRouting(shardIdFrom(indexMetadata1, 1), "node-2", true, STARTED)) + ); + clusterInfoBuilder.withShard(shardIdFrom(indexMetadata1, 0), true, ByteSizeValue.ofGb(8).getBytes()) + .withShard(shardIdFrom(indexMetadata1, 1), true, ByteSizeValue.ofGb(8).getBytes()); + + // index-2 is restored earlier but is not started on the desired node yet + var indexMetadata2 = IndexMetadata.builder("index-2").settings(indexSettings(IndexVersion.current(), 1, 0)).build(); + snapshotShardSizes.put( + new SnapshotShard(snapshot, indexIdFrom(indexMetadata2), shardIdFrom(indexMetadata2, 0)), + ByteSizeValue.ofGb(1).getBytes() + ); + var index2SnapshotRecoverySource = new RecoverySource.SnapshotRecoverySource( + "restore", + snapshot, + IndexVersion.current(), + indexIdFrom(indexMetadata2) + ); + switch (randomInt(3)) { + // index is still unassigned + case 0 -> routingTableBuilder.addAsNewRestore(indexMetadata2, index2SnapshotRecoverySource, Set.of()); + // index is initializing on desired node + case 1 -> { + ShardId index2ShardId = shardIdFrom(indexMetadata2, 0); + routingTableBuilder.add( + IndexRoutingTable.builder(indexMetadata2.getIndex()) + .addShard(newShardRouting(index2ShardId, "node-1", true, INITIALIZING, index2SnapshotRecoverySource)) + ); + if (randomBoolean()) { + // Shard is 75% downloaded + clusterInfoBuilder // + .withNodeUsedSpace("node-1", ByteSizeValue.ofMb(768).getBytes()) + .withReservedSpace("node-1", ByteSizeValue.ofMb(256).getBytes(), index2ShardId); + } + } + // index is initializing on undesired node + case 2 -> { + ShardId index2ShardId = shardIdFrom(indexMetadata2, 0); + routingTableBuilder.add( + IndexRoutingTable.builder(indexMetadata2.getIndex()) + .addShard(newShardRouting(index2ShardId, "node-2", true, INITIALIZING, index2SnapshotRecoverySource)) + ); + if (randomBoolean()) { + // Shard is 75% downloaded + clusterInfoBuilder // + .withNodeUsedSpace("node-2", ByteSizeValue.ofMb(768).getBytes()) + .withReservedSpace("node-2", ByteSizeValue.ofMb(256).getBytes(), index2ShardId); + } + } + // index is started on undesired node + case 3 -> { + routingTableBuilder.add( + IndexRoutingTable.builder(indexMetadata2.getIndex()) + .addShard(newShardRouting(shardIdFrom(indexMetadata2, 0), "node-2", true, STARTED)) + ); + clusterInfoBuilder.withNodeUsedSpace("node-2", ByteSizeValue.ofGb(1).getBytes()) + .withShard(shardIdFrom(indexMetadata2, 0), true, ByteSizeValue.ofGb(1).getBytes()); + } + default -> throw new AssertionError("unexpected randomization"); + } + + // index-3 is restored as new from snapshot + var indexMetadata3 = IndexMetadata.builder("index-3").settings(indexSettings(IndexVersion.current(), 2, 0)).build(); + routingTableBuilder.addAsNewRestore( + indexMetadata3, + new RecoverySource.SnapshotRecoverySource("restore", snapshot, IndexVersion.current(), indexIdFrom(indexMetadata3)), + Set.of() + ); + snapshotShardSizes.put( + new SnapshotShard(snapshot, indexIdFrom(indexMetadata3), shardIdFrom(indexMetadata3, 0)), + ByteSizeValue.ofMb(512).getBytes() + ); + snapshotShardSizes.put( + new SnapshotShard(snapshot, indexIdFrom(indexMetadata3), shardIdFrom(indexMetadata3, 1)), + ByteSizeValue.ofMb(512).getBytes() + ); + + var clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(newNode("node-1")).add(newNode("node-2"))) + .metadata(Metadata.builder().put(indexMetadata1, false).put(indexMetadata2, false).put(indexMetadata3, false).build()) + .routingTable(routingTableBuilder) + .customs( + Map.of( + RestoreInProgress.TYPE, + new RestoreInProgress.Builder().add( + new RestoreInProgress.Entry( + "restore", + snapshot, + RestoreInProgress.State.STARTED, + randomBoolean(), + List.of(indexMetadata2.getIndex().getName(), indexMetadata3.getIndex().getName()), + Map.ofEntries( + Map.entry(shardIdFrom(indexMetadata2, 0), new RestoreInProgress.ShardRestoreStatus(randomUUID())), + Map.entry(shardIdFrom(indexMetadata3, 0), new RestoreInProgress.ShardRestoreStatus(randomUUID())), + Map.entry(shardIdFrom(indexMetadata3, 1), new RestoreInProgress.ShardRestoreStatus(randomUUID())) + ) + ) + ).build() + ) + ) + .build(); + + var settings = Settings.EMPTY; + var allocation = new RoutingAllocation( + randomAllocationDeciders(settings, createBuiltInClusterSettings(settings)), + clusterState, + clusterInfoBuilder.build(), + new SnapshotShardSizeInfo(snapshotShardSizes), + 0L + ); + var initialDesiredBalance = new DesiredBalance( + 1, + Map.ofEntries( + Map.entry(shardIdFrom(indexMetadata1, 0), new ShardAssignment(Set.of("node-1"), 1, 0, 0)), + Map.entry(shardIdFrom(indexMetadata1, 1), new ShardAssignment(Set.of("node-2"), 1, 0, 0)), + Map.entry(shardIdFrom(indexMetadata2, 0), new ShardAssignment(Set.of("node-1"), 1, 0, 0)) + ) + ); + var nextDesiredBalance = createDesiredBalanceComputer(new BalancedShardsAllocator()).compute( + initialDesiredBalance, + new DesiredBalanceInput(2, allocation, List.of()), + queue(), + input -> true + ); + + // both node-1 and node-2 has enough space to allocate either only [index-2] shard or both [index-3] shards + assertThat( + nextDesiredBalance.assignments(), + anyOf( + equalTo( + Map.ofEntries( + Map.entry(shardIdFrom(indexMetadata1, 0), new ShardAssignment(Set.of("node-1"), 1, 0, 0)), + Map.entry(shardIdFrom(indexMetadata1, 1), new ShardAssignment(Set.of("node-2"), 1, 0, 0)), + Map.entry(shardIdFrom(indexMetadata2, 0), new ShardAssignment(Set.of("node-1"), 1, 0, 0)), + Map.entry(shardIdFrom(indexMetadata3, 0), new ShardAssignment(Set.of("node-2"), 1, 0, 0)), + Map.entry(shardIdFrom(indexMetadata3, 1), new ShardAssignment(Set.of("node-2"), 1, 0, 0)) + ) + ), + equalTo( + Map.ofEntries( + Map.entry(shardIdFrom(indexMetadata1, 0), new ShardAssignment(Set.of("node-1"), 1, 0, 0)), + Map.entry(shardIdFrom(indexMetadata1, 1), new ShardAssignment(Set.of("node-2"), 1, 0, 0)), + Map.entry(shardIdFrom(indexMetadata2, 0), new ShardAssignment(Set.of("node-2"), 1, 0, 0)), + Map.entry(shardIdFrom(indexMetadata3, 0), new ShardAssignment(Set.of("node-1"), 1, 0, 0)), + Map.entry(shardIdFrom(indexMetadata3, 1), new ShardAssignment(Set.of("node-1"), 1, 0, 0)) + ) + ) + ) + ); } public void testAccountForSizeOfAllInitializingShardsDuringAllocation() { var snapshot = new Snapshot("repository", new SnapshotId("snapshot", randomUUID())); - var shardSizeInfo = Maps.newHashMapWithExpectedSize(5); + var clusterInfoBuilder = new ClusterInfoTestBuilder().withNode( + "node-1", + ByteSizeValue.ofGb(10).getBytes(), + ByteSizeValue.ofGb(2).getBytes() + ).withNode("node-2", ByteSizeValue.ofGb(10).getBytes(), ByteSizeValue.ofGb(2).getBytes()); var snapshotShardSizes = Maps.newHashMapWithExpectedSize(5); var routingTableBuilder = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY); @@ -885,8 +1047,8 @@ public void testAccountForSizeOfAllInitializingShardsDuringAllocation() { .addShard(newShardRouting(shardIdFrom(indexMetadata1, 0), "node-1", true, STARTED)) .addShard(newShardRouting(shardIdFrom(indexMetadata1, 1), "node-2", true, STARTED)) ); - shardSizeInfo.put(shardIdentifierFromRouting(shardIdFrom(indexMetadata1, 0), true), ByteSizeValue.ofGb(8).getBytes()); - shardSizeInfo.put(shardIdentifierFromRouting(shardIdFrom(indexMetadata1, 1), true), ByteSizeValue.ofGb(8).getBytes()); + clusterInfoBuilder.withShard(shardIdFrom(indexMetadata1, 0), true, ByteSizeValue.ofGb(8).getBytes()) + .withShard(shardIdFrom(indexMetadata1, 1), true, ByteSizeValue.ofGb(8).getBytes()); // index-2 & index-3 are restored as new from snapshot var indexMetadata2 = IndexMetadata.builder("index-2") @@ -944,23 +1106,12 @@ public void testAccountForSizeOfAllInitializingShardsDuringAllocation() { ) .build(); - var clusterInfo = createClusterInfo( - List.of( - // node-1 has enough space to only allocate the only [index-2] shard - new DiskUsage("node-1", "data-1", "/data", ByteSizeValue.ofGb(10).getBytes(), ByteSizeValue.ofGb(2).getBytes()), - // node-2 has enough space to only allocate both shards of [index-3] - new DiskUsage("node-2", "data-2", "/data", ByteSizeValue.ofGb(10).getBytes(), ByteSizeValue.ofGb(2).getBytes()) - ), - shardSizeInfo - ); - var snapshotShardSizeInfo = new SnapshotShardSizeInfo(snapshotShardSizes); - var settings = Settings.EMPTY; var allocation = new RoutingAllocation( randomAllocationDeciders(settings, createBuiltInClusterSettings(settings)), clusterState, - clusterInfo, - snapshotShardSizeInfo, + clusterInfoBuilder.build(), + new SnapshotShardSizeInfo(snapshotShardSizes), 0L ); var initialDesiredBalance = new DesiredBalance( @@ -977,6 +1128,7 @@ public void testAccountForSizeOfAllInitializingShardsDuringAllocation() { input -> true ); + // both node-1 and node-2 has enough space to allocate either only [index-2] shard or both [index-3] shards assertThat( nextDesiredBalance.assignments(), anyOf( @@ -1002,6 +1154,46 @@ public void testAccountForSizeOfAllInitializingShardsDuringAllocation() { ); } + @Deprecated + private static ClusterInfo createClusterInfo(List diskUsages, Map shardSizes) { + var diskUsage = diskUsages.stream().collect(toMap(DiskUsage::getNodeId, Function.identity())); + return new ClusterInfo(diskUsage, diskUsage, shardSizes, Map.of(), Map.of(), Map.of()); + } + + private static class ClusterInfoTestBuilder { + + private final Map diskUsage = new HashMap<>(); + private final Map shardSizes = new HashMap<>(); + private final Map reservedSpace = new HashMap<>(); + + public ClusterInfoTestBuilder withNode(String nodeId, long totalBytes, long freeBytes) { + diskUsage.put(nodeId, new DiskUsage(nodeId, nodeId, "/path", totalBytes, freeBytes)); + return this; + } + + public ClusterInfoTestBuilder withNodeUsedSpace(String nodeId, long usedBytes) { + diskUsage.compute(nodeId, (key, usage) -> { + assertThat(usage, notNullValue()); + return new DiskUsage(usage.nodeId(), usage.nodeName(), usage.path(), usage.totalBytes(), usage.freeBytes() - usedBytes); + }); + return this; + } + + public ClusterInfoTestBuilder withShard(ShardId shardId, boolean primary, long size) { + shardSizes.put(shardIdentifierFromRouting(shardId, primary), size); + return this; + } + + public ClusterInfoTestBuilder withReservedSpace(String nodeId, long size, ShardId... shardIds) { + reservedSpace.put(new NodeAndPath(nodeId, "/path"), new ReservedSpace(size, Set.of(shardIds))); + return this; + } + + public ClusterInfo build() { + return new ClusterInfo(diskUsage, diskUsage, shardSizes, Map.of(), Map.of(), reservedSpace); + } + } + private static IndexId indexIdFrom(IndexMetadata indexMetadata) { return new IndexId(indexMetadata.getIndex().getName(), indexMetadata.getIndex().getUUID()); } @@ -1188,8 +1380,8 @@ public ShardAllocationDecision decideShardAllocation(ShardRouting shard, Routing }); } - private static DesiredBalanceComputer createDesiredBalanceComputer(ShardsAllocator shardsAllocator) { - return new DesiredBalanceComputer(createBuiltInClusterSettings(), mock(ThreadPool.class), shardsAllocator); + private static DesiredBalanceComputer createDesiredBalanceComputer(ShardsAllocator allocator) { + return new DesiredBalanceComputer(createBuiltInClusterSettings(), mock(ThreadPool.class), allocator); } private static void assertDesiredAssignments(DesiredBalance desiredBalance, Map expected) { @@ -1210,13 +1402,7 @@ private static RoutingAllocation routingAllocationWithDecidersOf( Settings settings ) { return new RoutingAllocation( - new AllocationDeciders( - ClusterModule.createAllocationDeciders( - settings, - new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - List.of() - ) - ), + randomAllocationDeciders(settings, createBuiltInClusterSettings(settings)), clusterState, clusterInfo, SnapshotShardSizeInfo.EMPTY, diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java index 1b3fa260db1fa..f50418bf20e6c 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java @@ -82,6 +82,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiPredicate; import java.util.function.Consumer; +import java.util.function.Predicate; import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.stream.StreamSupport; @@ -1380,7 +1381,7 @@ private static class NoOpExistingShardsAllocator implements ExistingShardsAlloca public void beforeAllocation(RoutingAllocation allocation) {} @Override - public void afterPrimariesBeforeReplicas(RoutingAllocation allocation) {} + public void afterPrimariesBeforeReplicas(RoutingAllocation allocation, Predicate isRelevantShardPredicate) {} @Override public void allocateUnassigned( diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java index a4e5ccb7e6fa4..3c58eb8c57573 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java @@ -689,7 +689,7 @@ public void allocateUnassigned( } @Override - public void afterPrimariesBeforeReplicas(RoutingAllocation allocation) {} + public void afterPrimariesBeforeReplicas(RoutingAllocation allocation, Predicate isRelevantShardPredicate) {} }; } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDeciderTests.java index 519184800d443..b9bf565ee58cd 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDeciderTests.java @@ -12,6 +12,8 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.SnapshotsInProgress; +import org.elasticsearch.cluster.metadata.NodesShutdownMetadata; +import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; @@ -25,7 +27,9 @@ import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotShardSizeInfo; +import org.elasticsearch.snapshots.SnapshotsInProgressSerializationTests; import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matchers; import java.util.Arrays; import java.util.List; @@ -197,6 +201,115 @@ public void testThrottleWhenSnapshotInProgress() { ); } + public void testYesWhenSnapshotInProgressButShardIsPausedDueToShutdown() { + + // need to have a shard in INIT state to avoid the fast-path + final var otherIndex = randomIdentifier(); + + final var clusterStateWithShutdownMetadata = SnapshotsInProgressSerializationTests.CLUSTER_STATE_FOR_NODE_SHUTDOWNS + .copyAndUpdateMetadata( + mdb -> mdb.putCustom( + NodesShutdownMetadata.TYPE, + new NodesShutdownMetadata( + Map.of( + nodeId, + SingleNodeShutdownMetadata.builder() + .setNodeId(nodeId) + .setType(SingleNodeShutdownMetadata.Type.REMOVE) + .setStartedAtMillis(randomNonNegativeLong()) + .setReason("test") + .build() + ) + ) + ) + ); + final var snapshotsInProgress = SnapshotsInProgress.EMPTY + // mark nodeID as shutting down for removal + .withUpdatedNodeIdsForRemoval(clusterStateWithShutdownMetadata) + // create a running snapshot with shardId paused + .withUpdatedEntriesForRepo( + repositoryName, + List.of( + SnapshotsInProgress.Entry.snapshot( + snapshot, + randomBoolean(), + randomBoolean(), + SnapshotsInProgress.State.STARTED, + Map.of( + shardId.getIndexName(), + new IndexId(shardId.getIndexName(), randomUUID()), + otherIndex, + new IndexId(otherIndex, randomUUID()) + ), + List.of(), + List.of(), + randomNonNegativeLong(), + randomNonNegativeLong(), + Map.of( + shardId, + new SnapshotsInProgress.ShardSnapshotStatus( + nodeId, + SnapshotsInProgress.ShardState.PAUSED_FOR_NODE_REMOVAL, + ShardGeneration.newGeneration(random()) + ), + new ShardId(otherIndex, randomUUID(), 0), + new SnapshotsInProgress.ShardSnapshotStatus( + nodeId, + SnapshotsInProgress.ShardState.INIT, + ShardGeneration.newGeneration(random()) + ) + ), + null, + Map.of(), + IndexVersion.current() + ) + ) + ); + + // if the node is marked for shutdown then the shard can move + + final var routingAllocationWithShutdownMetadata = new RoutingAllocation( + new AllocationDeciders(List.of(decider)), + ClusterState.builder(clusterStateWithShutdownMetadata).putCustom(SnapshotsInProgress.TYPE, snapshotsInProgress).build(), + ClusterInfo.EMPTY, + SnapshotShardSizeInfo.EMPTY, + randomNonNegativeLong() + ); + routingAllocationWithShutdownMetadata.setDebugMode(RoutingAllocation.DebugMode.ON); + + final var decisionWithShutdownMetadata = decider.canAllocate( + TestShardRouting.newShardRouting(shardId, nodeId, true, ShardRoutingState.STARTED), + null, + routingAllocationWithShutdownMetadata + ); + + assertEquals(Decision.Type.YES, decisionWithShutdownMetadata.type()); + assertEquals("the shard is not being snapshotted", decisionWithShutdownMetadata.getExplanation()); + + // if the node is not marked for shutdown then the shard is fixed in place + + final var routingAllocationWithoutShutdownMetadata = new RoutingAllocation( + new AllocationDeciders(List.of(decider)), + ClusterState.builder(ClusterName.DEFAULT).putCustom(SnapshotsInProgress.TYPE, snapshotsInProgress).build(), + ClusterInfo.EMPTY, + SnapshotShardSizeInfo.EMPTY, + randomNonNegativeLong() + ); + routingAllocationWithoutShutdownMetadata.setDebugMode(RoutingAllocation.DebugMode.ON); + + final var decisionWithoutShutdownMetadata = decider.canAllocate( + TestShardRouting.newShardRouting(shardId, nodeId, true, ShardRoutingState.STARTED), + null, + routingAllocationWithoutShutdownMetadata + ); + + assertEquals(Decision.Type.THROTTLE, decisionWithoutShutdownMetadata.type()); + assertThat( + decisionWithoutShutdownMetadata.getExplanation(), + Matchers.matchesRegex("waiting for snapshot .* of shard .* to complete on node .*") + ); + } + private ClusterState makeClusterState(ShardId shardId, SnapshotsInProgress.ShardState shardState) { return ClusterState.builder(ClusterName.DEFAULT) .putCustom(SnapshotsInProgress.TYPE, makeSnapshotsInProgress(shardId, shardState)) diff --git a/server/src/test/java/org/elasticsearch/common/bytes/ReleasableBytesReferenceStreamInputTests.java b/server/src/test/java/org/elasticsearch/common/bytes/ReleasableBytesReferenceStreamInputTests.java index dc5e9fa0ec336..ae47c55a33cf9 100644 --- a/server/src/test/java/org/elasticsearch/common/bytes/ReleasableBytesReferenceStreamInputTests.java +++ b/server/src/test/java/org/elasticsearch/common/bytes/ReleasableBytesReferenceStreamInputTests.java @@ -71,7 +71,7 @@ public void testBigIntArrayLivesAfterReleasableIsDecremented() throws IOExceptio BytesStreamOutput out = new BytesStreamOutput(); testData.writeTo(out); - ReleasableBytesReference ref = ReleasableBytesReference.wrap(out.bytes()); + ReleasableBytesReference ref = wrapAsReleasable(out.bytes()); try (IntArray in = IntArray.readFrom(ref.streamInput())) { ref.decRef(); @@ -90,7 +90,7 @@ public void testBigDoubleArrayLivesAfterReleasableIsDecremented() throws IOExcep BytesStreamOutput out = new BytesStreamOutput(); testData.writeTo(out); - ReleasableBytesReference ref = ReleasableBytesReference.wrap(out.bytes()); + ReleasableBytesReference ref = wrapAsReleasable(out.bytes()); try (DoubleArray in = DoubleArray.readFrom(ref.streamInput())) { ref.decRef(); @@ -109,7 +109,7 @@ public void testBigLongArrayLivesAfterReleasableIsDecremented() throws IOExcepti BytesStreamOutput out = new BytesStreamOutput(); testData.writeTo(out); - ReleasableBytesReference ref = ReleasableBytesReference.wrap(out.bytes()); + ReleasableBytesReference ref = wrapAsReleasable(out.bytes()); try (LongArray in = LongArray.readFrom(ref.streamInput())) { ref.decRef(); @@ -128,7 +128,7 @@ public void testBigByteArrayLivesAfterReleasableIsDecremented() throws IOExcepti BytesStreamOutput out = new BytesStreamOutput(); testData.writeTo(out); - ReleasableBytesReference ref = ReleasableBytesReference.wrap(out.bytes()); + ReleasableBytesReference ref = wrapAsReleasable(out.bytes()); try (ByteArray in = ByteArray.readFrom(ref.streamInput())) { ref.decRef(); @@ -140,4 +140,7 @@ public void testBigByteArrayLivesAfterReleasableIsDecremented() throws IOExcepti assertThat(ref.hasReferences(), equalTo(false)); } + public static ReleasableBytesReference wrapAsReleasable(BytesReference bytesReference) { + return new ReleasableBytesReference(bytesReference, () -> {}); + } } diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoBoundingBoxTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoBoundingBoxTests.java index 5e20f954d079d..e7b1404306920 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeoBoundingBoxTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoBoundingBoxTests.java @@ -29,10 +29,11 @@ public class GeoBoundingBoxTests extends ESTestCase { public void testInvalidParseInvalidWKT() throws IOException { XContentBuilder bboxBuilder = XContentFactory.jsonBuilder().startObject().field("wkt", "invalid").endObject(); - XContentParser parser = createParser(bboxBuilder); - parser.nextToken(); - ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> GeoBoundingBox.parseBoundingBox(parser)); - assertThat(e.getMessage(), equalTo("failed to parse WKT bounding box")); + try (XContentParser parser = createParser(bboxBuilder)) { + parser.nextToken(); + ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> GeoBoundingBox.parseBoundingBox(parser)); + assertThat(e.getMessage(), equalTo("failed to parse WKT bounding box")); + } } public void testInvalidParsePoint() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/common/geo/SpatialPointTests.java b/server/src/test/java/org/elasticsearch/common/geo/SpatialPointTests.java new file mode 100644 index 0000000000000..bb26193eb5023 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/geo/SpatialPointTests.java @@ -0,0 +1,108 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.geo; + +import org.apache.lucene.tests.geo.GeoTestUtil; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.Matchers.not; + +public class SpatialPointTests extends ESTestCase { + + public void testEqualsAndHashcode() { + for (int i = 0; i < 100; i++) { + SpatialPoint point = randomGeoPoint(); + GeoPoint geoPoint = new GeoPoint(point); + TestPoint testPoint = new TestPoint(point); + TestPoint testPoint2 = new TestPoint(point); + assertEqualsAndHashcode("Same point", point, point); + assertEqualsAndHashcode("Same geo-point", point, geoPoint); + assertNotEqualsAndHashcode("Same location, but different class", point, testPoint); + assertEqualsAndHashcode("Same location, same class", testPoint, testPoint2); + } + } + + public void testCompareTo() { + for (int i = 0; i < 100; i++) { + SpatialPoint point = randomValueOtherThanMany(p -> p.getX() < -170 || p.getX() > 170, SpatialPointTests::randomGeoPoint); + GeoPoint smaller = new GeoPoint(point.getY(), point.getX() - 1); + GeoPoint bigger = new GeoPoint(point.getY(), point.getX() + 1); + TestPoint testSmaller = new TestPoint(smaller); + TestPoint testBigger = new TestPoint(bigger); + assertThat(smaller + " smaller than " + point, smaller.compareTo(point), lessThan(0)); + assertThat(bigger + " bigger than " + point, bigger.compareTo(point), greaterThan(0)); + assertThat(testSmaller + " smaller than " + testBigger, testSmaller.compareTo(testBigger), lessThan(0)); + // TestPoint always greater than GeoPoint + assertThat(testSmaller + " bigger than " + point, testSmaller.compareTo(point), greaterThan(0)); + assertThat(testBigger + " bigger than " + point, testBigger.compareTo(point), greaterThan(0)); + } + } + + private void assertEqualsAndHashcode(String message, SpatialPoint a, SpatialPoint b) { + assertThat("Equals: " + message, a, equalTo(b)); + assertThat("Hashcode: " + message, a.hashCode(), equalTo(b.hashCode())); + assertThat("Compare: " + message, a.compareTo(b), equalTo(0)); + } + + private void assertNotEqualsAndHashcode(String message, SpatialPoint a, SpatialPoint b) { + assertThat("Equals: " + message, a, not(equalTo(b))); + assertThat("Hashcode: " + message, a.hashCode(), not(equalTo(b.hashCode()))); + assertThat("Compare: " + message, a.compareTo(b), not(equalTo(0))); + } + + private static GeoPoint randomGeoPoint() { + return new GeoPoint(GeoTestUtil.nextLatitude(), GeoTestUtil.nextLongitude()); + } + + /** + * This test class used to be trivial, when SpatialPoint was a concrete class. + * If we ever revert back to a concrete class, we can simplify this test class. + * The only requirement is that it extends SpatialPoint, but have a different class name. + */ + private static class TestPoint implements SpatialPoint { + double x; + double y; + + private TestPoint(SpatialPoint template) { + this.x = template.getX(); + this.y = template.getY(); + } + + @Override + public double getX() { + return x; + } + + @Override + public double getY() { + return y; + } + + @Override + public int hashCode() { + return 31 * 31 * getClass().getSimpleName().hashCode() + 31 * Double.hashCode(x) + Double.hashCode(y); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null || getClass() != obj.getClass()) return false; + SpatialPoint point = (SpatialPoint) obj; + return (Double.compare(point.getX(), x) == 0) && Double.compare(point.getY(), y) == 0; + } + + @Override + public String toString() { + return toWKT(); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java b/server/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java index 0e54a9a49aa00..f60a5a5fc601a 100644 --- a/server/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java +++ b/server/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java @@ -692,7 +692,7 @@ public void testReadMapByUsingWriteMapWithConsistentOrder() throws IOException { try (TestStreamOutput streamOut = new TestStreamOutput()) { streamOut.writeMapWithConsistentOrder(streamOutMap); StreamInput in = StreamInput.wrap(BytesReference.toBytes(streamOut.bytes())); - Map streamInMap = in.readMap(); + Map streamInMap = in.readGenericMap(); assertEquals(streamOutMap, streamInMap); } } diff --git a/server/src/test/java/org/elasticsearch/common/io/stream/RecyclerBytesStreamOutputTests.java b/server/src/test/java/org/elasticsearch/common/io/stream/RecyclerBytesStreamOutputTests.java index 3077944490d5a..fd54dd12ce189 100644 --- a/server/src/test/java/org/elasticsearch/common/io/stream/RecyclerBytesStreamOutputTests.java +++ b/server/src/test/java/org/elasticsearch/common/io/stream/RecyclerBytesStreamOutputTests.java @@ -788,7 +788,7 @@ public void testReadMapByUsingWriteMapWithConsistentOrder() throws IOException { try (RecyclerBytesStreamOutput streamOut = new RecyclerBytesStreamOutput(recycler)) { streamOut.writeMapWithConsistentOrder(streamOutMap); StreamInput in = StreamInput.wrap(BytesReference.toBytes(streamOut.bytes())); - Map streamInMap = in.readMap(); + Map streamInMap = in.readGenericMap(); assertEquals(streamOutMap, streamInMap); } } diff --git a/server/src/test/java/org/elasticsearch/common/logging/HeaderWarningTests.java b/server/src/test/java/org/elasticsearch/common/logging/HeaderWarningTests.java index 491b3676911c8..997b076b328d9 100644 --- a/server/src/test/java/org/elasticsearch/common/logging/HeaderWarningTests.java +++ b/server/src/test/java/org/elasticsearch/common/logging/HeaderWarningTests.java @@ -318,8 +318,8 @@ public void testAddComplexWarning() { + ".monitoring-beats-mb => [.monitoring-beats-8-*],.monitoring-ent-search-mb => [.monitoring-ent-search-8-*]," + ".monitoring-es-mb => [.monitoring-es-8-*],.monitoring-kibana-mb => [.monitoring-kibana-8-*]," + ".monitoring-logstash-mb => [.monitoring-logstash-8-*],.profiling-ilm-lock => [.profiling-ilm-lock*]," - + ".slm-history => [.slm-history-6*],.watch-history-16 => [.watcher-history-16*]," - + "behavioral_analytics-events-default => [behavioral_analytics-events-*],ilm-history => [ilm-history-6*]," + + ".slm-history => [.slm-history-7*],.watch-history-16 => [.watcher-history-16*]," + + "behavioral_analytics-events-default => [behavioral_analytics-events-*],ilm-history => [ilm-history-7*]," + "logs => [logs-*-*],metrics => [metrics-*-*],profiling-events => [profiling-events*],profiling-executables => " + "[profiling-executables*],profiling-metrics => [profiling-metrics*],profiling-returnpads-private => " + "[.profiling-returnpads-private*],profiling-sq-executables => [.profiling-sq-executables*]," diff --git a/server/src/test/java/org/elasticsearch/common/network/NetworkUtilsTests.java b/server/src/test/java/org/elasticsearch/common/network/NetworkUtilsTests.java index 2c6b4e0983b2b..30687e35a8ad9 100644 --- a/server/src/test/java/org/elasticsearch/common/network/NetworkUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/common/network/NetworkUtilsTests.java @@ -9,7 +9,6 @@ package org.elasticsearch.common.network; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.hamcrest.OptionalMatchers; import java.net.InetAddress; import java.net.NetworkInterface; @@ -17,6 +16,8 @@ import java.util.Optional; import static org.elasticsearch.common.network.NetworkUtils.getInterfaces; +import static org.elasticsearch.test.LambdaMatchers.transformedMatch; +import static org.elasticsearch.test.hamcrest.OptionalMatchers.isPresentWith; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -80,8 +81,7 @@ public void testMaybeGetInterfaceByName() throws Exception { networkInterfaces, netIf.getName() ); - assertThat(maybeNetworkInterface, OptionalMatchers.isPresent()); - assertThat(maybeNetworkInterface.get().getName(), equalTo(netIf.getName())); + assertThat(maybeNetworkInterface, isPresentWith(transformedMatch(NetworkInterface::getName, equalTo(netIf.getName())))); } } diff --git a/server/src/test/java/org/elasticsearch/common/regex/RegexTests.java b/server/src/test/java/org/elasticsearch/common/regex/RegexTests.java index f7667a948c048..f010b233f40c2 100644 --- a/server/src/test/java/org/elasticsearch/common/regex/RegexTests.java +++ b/server/src/test/java/org/elasticsearch/common/regex/RegexTests.java @@ -16,6 +16,8 @@ import java.util.Random; import java.util.regex.Pattern; +import static org.elasticsearch.test.LambdaMatchers.falseWith; +import static org.elasticsearch.test.LambdaMatchers.trueWith; import static org.hamcrest.Matchers.equalTo; public class RegexTests extends ESTestCase { @@ -213,25 +215,25 @@ private void assertMatchesNone(Automaton automaton, String... strings) { } public void testSimpleMatcher() { - assertFalse(Regex.simpleMatcher((String[]) null).test("abc")); - assertFalse(Regex.simpleMatcher().test("abc")); - assertTrue(Regex.simpleMatcher("abc").test("abc")); - assertFalse(Regex.simpleMatcher("abc").test("abd")); - - assertTrue(Regex.simpleMatcher("abc", "xyz").test("abc")); - assertTrue(Regex.simpleMatcher("abc", "xyz").test("xyz")); - assertFalse(Regex.simpleMatcher("abc", "xyz").test("abd")); - assertFalse(Regex.simpleMatcher("abc", "xyz").test("xyy")); - - assertTrue(Regex.simpleMatcher("abc", "*").test("abc")); - assertTrue(Regex.simpleMatcher("abc", "*").test("abd")); - - assertTrue(Regex.simpleMatcher("a*c").test("abc")); - assertFalse(Regex.simpleMatcher("a*c").test("abd")); - - assertTrue(Regex.simpleMatcher("a*c", "x*z").test("abc")); - assertTrue(Regex.simpleMatcher("a*c", "x*z").test("xyz")); - assertFalse(Regex.simpleMatcher("a*c", "x*z").test("abd")); - assertFalse(Regex.simpleMatcher("a*c", "x*z").test("xyy")); + assertThat(Regex.simpleMatcher((String[]) null), falseWith("abc")); + assertThat(Regex.simpleMatcher(), falseWith("abc")); + assertThat(Regex.simpleMatcher("abc"), trueWith("abc")); + assertThat(Regex.simpleMatcher("abc"), falseWith("abd")); + + assertThat(Regex.simpleMatcher("abc", "xyz"), trueWith("abc")); + assertThat(Regex.simpleMatcher("abc", "xyz"), trueWith("xyz")); + assertThat(Regex.simpleMatcher("abc", "xyz"), falseWith("abd")); + assertThat(Regex.simpleMatcher("abc", "xyz"), falseWith("xyy")); + + assertThat(Regex.simpleMatcher("abc", "*"), trueWith("abc")); + assertThat(Regex.simpleMatcher("abc", "*"), trueWith("abd")); + + assertThat(Regex.simpleMatcher("a*c"), trueWith("abc")); + assertThat(Regex.simpleMatcher("a*c"), falseWith("abd")); + + assertThat(Regex.simpleMatcher("a*c", "x*z"), trueWith("abc")); + assertThat(Regex.simpleMatcher("a*c", "x*z"), trueWith("xyz")); + assertThat(Regex.simpleMatcher("a*c", "x*z"), falseWith("abd")); + assertThat(Regex.simpleMatcher("a*c", "x*z"), falseWith("xyy")); } } diff --git a/server/src/test/java/org/elasticsearch/common/settings/SettingsTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingsTests.java index 965f305c3c23f..c94ab22352741 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/SettingsTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingsTests.java @@ -507,8 +507,10 @@ public void testToAndFromXContent() throws IOException { builder.startObject(); settings.toXContent(builder, new ToXContent.MapParams(Collections.singletonMap(Settings.FLAT_SETTINGS_PARAM, "" + flatSettings))); builder.endObject(); - XContentParser parser = createParser(builder); - Settings build = Settings.fromXContent(parser); + Settings build; + try (XContentParser parser = createParser(builder)) { + build = Settings.fromXContent(parser); + } assertEquals(5, build.size()); assertEquals(Arrays.asList("1", "2", "3"), build.getAsList("foo.bar.baz")); assertEquals(2, build.getAsInt("foo.foobar", 0).intValue()); diff --git a/server/src/test/java/org/elasticsearch/common/util/CopyOnFirstWriteMapTests.java b/server/src/test/java/org/elasticsearch/common/util/CopyOnFirstWriteMapTests.java index 41870862c437d..0c34bfa66524b 100644 --- a/server/src/test/java/org/elasticsearch/common/util/CopyOnFirstWriteMapTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/CopyOnFirstWriteMapTests.java @@ -21,7 +21,16 @@ public class CopyOnFirstWriteMapTests extends ESTestCase { public void testShouldNotCopyIfThereWereNoUpdates() { var source = Map.of("key", "value"); var copyOnFirstWrite = new CopyOnFirstWriteMap<>(source); - source.get("key"); + var copy = copyOnFirstWrite.toImmutableMap(); + + assertThat(copy, sameInstance(source)); + assertThat(copy, equalTo(source)); + } + + public void testShouldNotCopyWhenPuttingTheSameValue() { + var source = Map.of("key", "value"); + var copyOnFirstWrite = new CopyOnFirstWriteMap<>(source); + copyOnFirstWrite.put("key", "value"); var copy = copyOnFirstWrite.toImmutableMap(); assertThat(copy, sameInstance(source)); diff --git a/server/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java b/server/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java index 0cac71b6751ff..f6b310abac770 100644 --- a/server/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.gateway; import org.apache.lucene.index.CorruptIndexException; +import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ESAllocationTestCase; @@ -519,7 +520,14 @@ private RoutingAllocation routingAllocationWithOnePrimaryNoReplicas( .routingTable(routingTableBuilder.build()) .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3)) .build(); - return new RoutingAllocation(deciders, state.mutableRoutingNodes(), state, null, null, System.nanoTime()); + return new RoutingAllocation( + deciders, + state.mutableRoutingNodes(), + state, + ClusterInfo.EMPTY, + SnapshotShardSizeInfo.EMPTY, + System.nanoTime() + ); } private void assertClusterHealthStatus(RoutingAllocation allocation, ClusterHealthStatus expectedStatus) { diff --git a/server/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java b/server/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java index 06e83b8051bb6..306779e90ef2e 100644 --- a/server/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java @@ -206,7 +206,16 @@ public void testPreferCopyWithHighestMatchingOperations() { ); } + public void testNotCancellingRecoveryOnIrrelevantShard() { + // re-using the setup from testCancelRecoveryIfFoundCopyWithNoopRetentionLease to be sure that the only difference is the relevance + runNoopRetentionLeaseTest(false); + } + public void testCancelRecoveryIfFoundCopyWithNoopRetentionLease() { + runNoopRetentionLeaseTest(true); + } + + private void runNoopRetentionLeaseTest(boolean isRelevantShard) { final UnassignedInfo unassignedInfo; final Set failedNodeIds; if (randomBoolean()) { @@ -237,13 +246,19 @@ public void testCancelRecoveryIfFoundCopyWithNoopRetentionLease() { ); testAllocator.addData(node2, "NO_MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); testAllocator.addData(node3, randomSyncId(), new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); - testAllocator.processExistingRecoveries(allocation); - assertThat(allocation.routingNodesChanged(), equalTo(true)); - List unassignedShards = shardsWithState(allocation.routingNodes(), ShardRoutingState.UNASSIGNED); - assertThat(unassignedShards, hasSize(1)); - assertThat(unassignedShards.get(0).shardId(), equalTo(shardId)); - assertThat(unassignedShards.get(0).unassignedInfo().getNumFailedAllocations(), equalTo(0)); - assertThat(unassignedShards.get(0).unassignedInfo().getFailedNodeIds(), equalTo(failedNodeIds)); + testAllocator.processExistingRecoveries(allocation, shardRouting -> isRelevantShard); + + if (isRelevantShard) { + assertThat(allocation.routingNodesChanged(), equalTo(true)); + List unassignedShards = shardsWithState(allocation.routingNodes(), ShardRoutingState.UNASSIGNED); + assertThat(unassignedShards, hasSize(1)); + assertThat(unassignedShards.get(0).shardId(), equalTo(shardId)); + assertThat(unassignedShards.get(0).unassignedInfo().getNumFailedAllocations(), equalTo(0)); + assertThat(unassignedShards.get(0).unassignedInfo().getFailedNodeIds(), equalTo(failedNodeIds)); + } else { + assertThat(allocation.routingNodesChanged(), equalTo(false)); + assertThat(shardsWithState(allocation.routingNodes(), ShardRoutingState.UNASSIGNED).size(), equalTo(0)); + } } public void testNotCancellingRecoveryIfCurrentRecoveryHasRetentionLease() { @@ -263,7 +278,7 @@ public void testNotCancellingRecoveryIfCurrentRecoveryHasRetentionLease() { ); testAllocator.addData(node2, randomSyncId(), new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); testAllocator.addData(node3, randomSyncId(), new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); - testAllocator.processExistingRecoveries(allocation); + testAllocator.processExistingRecoveries(allocation, shardRouting -> true); assertThat(allocation.routingNodesChanged(), equalTo(false)); assertThat(shardsWithState(allocation.routingNodes(), ShardRoutingState.UNASSIGNED).size(), equalTo(0)); } @@ -278,7 +293,7 @@ public void testNotCancelIfPrimaryDoesNotHaveValidRetentionLease() { ); testAllocator.addData(node2, "NOT_MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); testAllocator.addData(node3, "NOT_MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); - testAllocator.processExistingRecoveries(allocation); + testAllocator.processExistingRecoveries(allocation, shardRouting -> true); assertThat(allocation.routingNodesChanged(), equalTo(false)); assertThat(shardsWithState(allocation.routingNodes(), ShardRoutingState.UNASSIGNED).size(), equalTo(0)); } @@ -427,7 +442,7 @@ public void testCancelRecoveryBetterSyncId() { testAllocator.addData(node1, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) .addData(node2, "NO_MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) .addData(node3, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); - testAllocator.processExistingRecoveries(allocation); + testAllocator.processExistingRecoveries(allocation, shardRouting -> true); assertThat(allocation.routingNodesChanged(), equalTo(true)); assertThat(shardsWithState(allocation.routingNodes(), ShardRoutingState.UNASSIGNED).size(), equalTo(1)); assertThat(shardsWithState(allocation.routingNodes(), ShardRoutingState.UNASSIGNED).get(0).shardId(), equalTo(shardId)); @@ -471,7 +486,7 @@ public void testNotCancellingRecoveryIfSyncedOnExistingRecovery() { ); testAllocator.addData(node2, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); testAllocator.addData(node3, randomSyncId(), new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); - testAllocator.processExistingRecoveries(allocation); + testAllocator.processExistingRecoveries(allocation, shardRouting -> true); assertThat(allocation.routingNodesChanged(), equalTo(false)); assertThat(shardsWithState(allocation.routingNodes(), ShardRoutingState.UNASSIGNED).size(), equalTo(0)); } @@ -480,7 +495,7 @@ public void testNotCancellingRecovery() { RoutingAllocation allocation = onePrimaryOnNode1And1ReplicaRecovering(yesAllocationDeciders()); testAllocator.addData(node1, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) .addData(node2, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); - testAllocator.processExistingRecoveries(allocation); + testAllocator.processExistingRecoveries(allocation, shardRouting -> true); assertThat(allocation.routingNodesChanged(), equalTo(false)); assertThat(shardsWithState(allocation.routingNodes(), ShardRoutingState.UNASSIGNED).size(), equalTo(0)); } @@ -517,7 +532,7 @@ public void testDoNotCancelForBrokenNode() { ) .addData(node2, randomSyncId(), new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) .addData(node3, randomSyncId(), new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); - testAllocator.processExistingRecoveries(allocation); + testAllocator.processExistingRecoveries(allocation, shardRouting -> true); assertThat(allocation.routingNodesChanged(), equalTo(false)); assertThat(shardsWithState(allocation.routingNodes(), ShardRoutingState.UNASSIGNED), empty()); } diff --git a/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java b/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java index 559d3fce9cebf..4f25e00f8c083 100644 --- a/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java +++ b/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java @@ -1013,6 +1013,7 @@ public void testStopForceClosesConnectionDuringRequest() throws Exception { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103782") public void testStopClosesChannelAfterRequest() throws Exception { var grace = LONG_GRACE_PERIOD_MS; try (var noTimeout = LogExpectation.unexpectedTimeout(grace); var transport = new TestHttpServerTransport(gracePeriod(grace))) { diff --git a/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java b/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java index 409023afc4576..a22f17702b157 100644 --- a/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java +++ b/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java @@ -12,7 +12,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.ListenableActionFuture; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -495,7 +495,7 @@ public void testTraceStopped() { executeRequest(Settings.EMPTY, "request-host"); - verify(tracer).setAttribute(argThat(id -> id.getRawId().startsWith("rest-")), eq("http.status_code"), eq(200L)); + verify(tracer).setAttribute(argThat(id -> id.getSpanId().startsWith("rest-")), eq("http.status_code"), eq(200L)); verify(tracer).stopTrace(any(RestRequest.class)); } @@ -570,7 +570,7 @@ public void close() { @TestLogging(reason = "Get HttpTracer to output trace logs", value = "org.elasticsearch.http.HttpTracer:TRACE") public void testHttpTracerSendResponseSuccess() { - final ListenableActionFuture sendResponseFuture = new ListenableActionFuture<>(); + final SubscribableListener sendResponseFuture = new SubscribableListener<>(); final HttpChannel httpChannel = new FakeRestRequest.FakeHttpChannel(InetSocketAddress.createUnresolved("127.0.0.1", 9200)) { @Override public void sendResponse(HttpResponse response, ActionListener listener) { diff --git a/server/src/test/java/org/elasticsearch/index/codec/PerFieldMapperCodecTests.java b/server/src/test/java/org/elasticsearch/index/codec/PerFieldMapperCodecTests.java index e2a2c72d3eae3..d3ad4dd8586d5 100644 --- a/server/src/test/java/org/elasticsearch/index/codec/PerFieldMapperCodecTests.java +++ b/server/src/test/java/org/elasticsearch/index/codec/PerFieldMapperCodecTests.java @@ -15,11 +15,14 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.MapperTestUtils; +import org.elasticsearch.index.codec.bloomfilter.ES87BloomFilterPostingsFormat; +import org.elasticsearch.index.codec.postings.ES812PostingsFormat; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.test.ESTestCase; import java.io.IOException; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; public class PerFieldMapperCodecTests extends ESTestCase { @@ -27,23 +30,29 @@ public class PerFieldMapperCodecTests extends ESTestCase { public void testUseBloomFilter() throws IOException { PerFieldMapperCodec perFieldMapperCodec = createCodec(false, randomBoolean(), false); assertThat(perFieldMapperCodec.useBloomFilter("_id"), is(true)); + assertThat(perFieldMapperCodec.getPostingsFormatForField("_id"), instanceOf(ES87BloomFilterPostingsFormat.class)); assertThat(perFieldMapperCodec.useBloomFilter("another_field"), is(false)); + assertThat(perFieldMapperCodec.getPostingsFormatForField("another_field"), instanceOf(ES812PostingsFormat.class)); } public void testUseBloomFilterWithTimestampFieldEnabled() throws IOException { PerFieldMapperCodec perFieldMapperCodec = createCodec(true, true, false); assertThat(perFieldMapperCodec.useBloomFilter("_id"), is(true)); + assertThat(perFieldMapperCodec.getPostingsFormatForField("_id"), instanceOf(ES87BloomFilterPostingsFormat.class)); assertThat(perFieldMapperCodec.useBloomFilter("another_field"), is(false)); + assertThat(perFieldMapperCodec.getPostingsFormatForField("another_field"), instanceOf(ES812PostingsFormat.class)); } public void testUseBloomFilterWithTimestampFieldEnabled_noTimeSeriesMode() throws IOException { PerFieldMapperCodec perFieldMapperCodec = createCodec(true, false, false); assertThat(perFieldMapperCodec.useBloomFilter("_id"), is(false)); + assertThat(perFieldMapperCodec.getPostingsFormatForField("_id"), instanceOf(ES812PostingsFormat.class)); } public void testUseBloomFilterWithTimestampFieldEnabled_disableBloomFilter() throws IOException { PerFieldMapperCodec perFieldMapperCodec = createCodec(true, true, true); assertThat(perFieldMapperCodec.useBloomFilter("_id"), is(false)); + assertThat(perFieldMapperCodec.getPostingsFormatForField("_id"), instanceOf(ES812PostingsFormat.class)); assertWarnings( "[index.bloom_filter_for_id_field.enabled] setting was deprecated in Elasticsearch and will be removed in a future release." ); diff --git a/server/src/test/java/org/elasticsearch/index/codec/postings/ES812PostingsFormatTests.java b/server/src/test/java/org/elasticsearch/index/codec/postings/ES812PostingsFormatTests.java new file mode 100644 index 0000000000000..b11ab47102288 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/codec/postings/ES812PostingsFormatTests.java @@ -0,0 +1,138 @@ +/* + * @notice + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Modifications copyright (C) 2022 Elasticsearch B.V. + */ +package org.elasticsearch.index.codec.postings; + +import org.apache.lucene.codecs.Codec; +import org.apache.lucene.codecs.CompetitiveImpactAccumulator; +import org.apache.lucene.codecs.lucene90.blocktree.FieldReader; +import org.apache.lucene.codecs.lucene90.blocktree.Stats; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.Impact; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.store.ByteArrayDataInput; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.tests.analysis.MockAnalyzer; +import org.apache.lucene.tests.index.BasePostingsFormatTestCase; +import org.apache.lucene.tests.util.TestUtil; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +public class ES812PostingsFormatTests extends BasePostingsFormatTestCase { + private final Codec codec = TestUtil.alwaysPostingsFormat(new ES812PostingsFormat()); + + @Override + protected Codec getCodec() { + return codec; + } + + /** Make sure the final sub-block(s) are not skipped. */ + public void testFinalBlock() throws Exception { + Directory d = newDirectory(); + IndexWriter w = new IndexWriter(d, new IndexWriterConfig(new MockAnalyzer(random()))); + for (int i = 0; i < 25; i++) { + Document doc = new Document(); + doc.add(newStringField("field", Character.toString((char) (97 + i)), Field.Store.NO)); + doc.add(newStringField("field", "z" + Character.toString((char) (97 + i)), Field.Store.NO)); + w.addDocument(doc); + } + w.forceMerge(1); + + DirectoryReader r = DirectoryReader.open(w); + assertEquals(1, r.leaves().size()); + FieldReader field = (FieldReader) r.leaves().get(0).reader().terms("field"); + // We should see exactly two blocks: one root block (prefix empty string) and one block for z* + // terms (prefix z): + Stats stats = field.getStats(); + assertEquals(0, stats.floorBlockCount); + assertEquals(2, stats.nonFloorBlockCount); + r.close(); + w.close(); + d.close(); + } + + public void testImpactSerialization() throws IOException { + // omit norms and omit freqs + doTestImpactSerialization(Collections.singletonList(new Impact(1, 1L))); + + // omit freqs + doTestImpactSerialization(Collections.singletonList(new Impact(1, 42L))); + // omit freqs with very large norms + doTestImpactSerialization(Collections.singletonList(new Impact(1, -100L))); + + // omit norms + doTestImpactSerialization(Collections.singletonList(new Impact(30, 1L))); + // omit norms with large freq + doTestImpactSerialization(Collections.singletonList(new Impact(500, 1L))); + + // freqs and norms, basic + doTestImpactSerialization( + Arrays.asList( + new Impact(1, 7L), + new Impact(3, 9L), + new Impact(7, 10L), + new Impact(15, 11L), + new Impact(20, 13L), + new Impact(28, 14L) + ) + ); + + // freqs and norms, high values + doTestImpactSerialization( + Arrays.asList( + new Impact(2, 2L), + new Impact(10, 10L), + new Impact(12, 50L), + new Impact(50, -100L), + new Impact(1000, -80L), + new Impact(1005, -3L) + ) + ); + } + + private void doTestImpactSerialization(List impacts) throws IOException { + CompetitiveImpactAccumulator acc = new CompetitiveImpactAccumulator(); + for (Impact impact : impacts) { + acc.add(impact.freq, impact.norm); + } + try (Directory dir = newDirectory()) { + try (IndexOutput out = dir.createOutput("foo", IOContext.DEFAULT)) { + ES812SkipWriter.writeImpacts(acc, out); + } + try (IndexInput in = dir.openInput("foo", IOContext.DEFAULT)) { + byte[] b = new byte[Math.toIntExact(in.length())]; + in.readBytes(b, 0, b.length); + List impacts2 = ES812ScoreSkipReader.readImpacts( + new ByteArrayDataInput(b), + new ES812ScoreSkipReader.MutableImpactList() + ); + assertEquals(impacts, impacts2); + } + } + } +} diff --git a/server/src/test/java/org/elasticsearch/index/codec/postings/ForUtilTests.java b/server/src/test/java/org/elasticsearch/index/codec/postings/ForUtilTests.java new file mode 100644 index 0000000000000..14e8d3344c3dc --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/codec/postings/ForUtilTests.java @@ -0,0 +1,99 @@ +/* + * @notice + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Modifications copyright (C) 2022 Elasticsearch B.V. + */ +package org.elasticsearch.index.codec.postings; + +import com.carrotsearch.randomizedtesting.generators.RandomNumbers; + +import org.apache.lucene.store.ByteBuffersDirectory; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.tests.util.LuceneTestCase; +import org.apache.lucene.tests.util.TestUtil; +import org.apache.lucene.util.ArrayUtil; +import org.apache.lucene.util.packed.PackedInts; + +import java.io.IOException; +import java.util.Arrays; + +public class ForUtilTests extends LuceneTestCase { + + public void testEncodeDecode() throws IOException { + final int iterations = RandomNumbers.randomIntBetween(random(), 50, 1000); + final int[] values = new int[iterations * ForUtil.BLOCK_SIZE]; + + for (int i = 0; i < iterations; ++i) { + final int bpv = TestUtil.nextInt(random(), 1, 31); + for (int j = 0; j < ForUtil.BLOCK_SIZE; ++j) { + values[i * ForUtil.BLOCK_SIZE + j] = RandomNumbers.randomIntBetween(random(), 0, (int) PackedInts.maxValue(bpv)); + } + } + + final Directory d = new ByteBuffersDirectory(); + final long endPointer; + + { + // encode + IndexOutput out = d.createOutput("test.bin", IOContext.DEFAULT); + final ForUtil forUtil = new ForUtil(); + + for (int i = 0; i < iterations; ++i) { + long[] source = new long[ForUtil.BLOCK_SIZE]; + long or = 0; + for (int j = 0; j < ForUtil.BLOCK_SIZE; ++j) { + source[j] = values[i * ForUtil.BLOCK_SIZE + j]; + or |= source[j]; + } + final int bpv = PackedInts.bitsRequired(or); + out.writeByte((byte) bpv); + forUtil.encode(source, bpv, out); + } + endPointer = out.getFilePointer(); + out.close(); + } + + { + // decode + IndexInput in = d.openInput("test.bin", IOContext.READONCE); + final ForUtil forUtil = new ForUtil(); + for (int i = 0; i < iterations; ++i) { + final int bitsPerValue = in.readByte(); + final long currentFilePointer = in.getFilePointer(); + final long[] restored = new long[ForUtil.BLOCK_SIZE]; + forUtil.decode(bitsPerValue, in, restored); + int[] ints = new int[ForUtil.BLOCK_SIZE]; + for (int j = 0; j < ForUtil.BLOCK_SIZE; ++j) { + ints[j] = Math.toIntExact(restored[j]); + } + assertArrayEquals( + Arrays.toString(ints), + ArrayUtil.copyOfSubArray(values, i * ForUtil.BLOCK_SIZE, (i + 1) * ForUtil.BLOCK_SIZE), + ints + ); + assertEquals(forUtil.numBytes(bitsPerValue), in.getFilePointer() - currentFilePointer); + } + assertEquals(endPointer, in.getFilePointer()); + in.close(); + } + + d.close(); + } +} diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 25d3298f82bd7..16bf27207c130 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -91,7 +91,6 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.core.CheckedRunnable; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.TimeValue; @@ -186,6 +185,7 @@ import static org.elasticsearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; +import static org.elasticsearch.test.hamcrest.OptionalMatchers.isPresent; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.sameInstance; import static org.hamcrest.Matchers.contains; @@ -691,8 +691,8 @@ protected void commitIndexWriter(IndexWriter writer, Translog translog) throws I } @Override - public void flush(boolean force, boolean waitIfOngoing, ActionListener listener) throws EngineException { - super.flush(force, waitIfOngoing, listener); + protected void flushHoldingLock(boolean force, boolean waitIfOngoing, ActionListener listener) { + super.flushHoldingLock(force, waitIfOngoing, listener); postFlushSegmentInfoGen.set(getLastCommittedSegmentInfos().getGeneration()); assertThat(getPreCommitSegmentGeneration(), equalTo(preCommitGen.get())); } @@ -1347,7 +1347,7 @@ public void testSyncedFlushVanishesOnReplay() throws IOException { } void syncFlush(IndexWriter writer, InternalEngine engine, String syncId) throws IOException { - try (ReleasableLock ignored = engine.readLock.acquire()) { + try (var ignored = engine.acquireEnsureOpenRef()) { Map userData = new HashMap<>(); writer.getLiveCommitData().forEach(e -> userData.put(e.getKey(), e.getValue())); userData.put(Engine.SYNC_COMMIT_ID, syncId); @@ -7720,7 +7720,7 @@ private static void assertCommitGenerations(List commits, List commits, long generation) { var releasable = commits.keySet().stream().filter(c -> c.getGeneration() == generation).findFirst(); - assertThat(releasable.isPresent(), is(true)); + assertThat(releasable, isPresent()); Engine.IndexCommitRef indexCommitRef = commits.get(releasable.get()); try { indexCommitRef.close(); diff --git a/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTestUtils.java b/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTestUtils.java index 2c1daa09340d7..3185769bdab82 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTestUtils.java +++ b/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTestUtils.java @@ -61,11 +61,11 @@ public static void pruneTombstones(LiveVersionMap map, long maxTimestampToPrune, map.pruneTombstones(maxTimestampToPrune, maxSeqNoToPrune); } - static IndexVersionValue randomIndexVersionValue() { + public static IndexVersionValue randomIndexVersionValue() { return new IndexVersionValue(randomTranslogLocation(), randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()); } - static Translog.Location randomTranslogLocation() { + public static Translog.Location randomTranslogLocation() { if (randomBoolean()) { return null; } else { @@ -93,6 +93,10 @@ public static boolean isSafeAccessRequired(LiveVersionMap map) { return map.isSafeAccessRequired(); } + public static void enforceSafeAccess(LiveVersionMap map) { + map.enforceSafeAccess(); + } + public static LiveVersionMapArchive getArchive(LiveVersionMap map) { return map.getArchive(); } diff --git a/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java b/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java index a9b42ccdef248..5ca7aadc35fa7 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java @@ -13,7 +13,9 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.Constants; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Tuple; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.test.ESTestCase; @@ -23,11 +25,16 @@ import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Supplier; +import java.util.stream.IntStream; +import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency; +import static org.elasticsearch.core.Tuple.tuple; import static org.elasticsearch.index.engine.LiveVersionMapTestUtils.randomIndexVersionValue; import static org.elasticsearch.index.engine.LiveVersionMapTestUtils.randomTranslogLocation; import static org.hamcrest.Matchers.empty; @@ -36,7 +43,6 @@ import static org.hamcrest.Matchers.nullValue; public class LiveVersionMapTests extends ESTestCase { - public void testRamBytesUsed() throws Exception { LiveVersionMap map = new LiveVersionMap(); for (int i = 0; i < 100000; ++i) { @@ -442,4 +448,51 @@ public void testRandomlyIndexDeleteAndRefresh() throws Exception { } } } + + public void testVersionLookupRamBytesUsed() { + var vl = new LiveVersionMap.VersionLookup(newConcurrentMapWithAggressiveConcurrency()); + assertEquals(0, vl.ramBytesUsed()); + Set existingKeys = new HashSet<>(); + Supplier> randomEntry = () -> { + var key = randomBoolean() || existingKeys.isEmpty() ? uid(randomIdentifier()) : randomFrom(existingKeys); + return tuple(key, randomIndexVersionValue()); + }; + IntStream.range(0, randomIntBetween(10, 100)).forEach(i -> { + switch (randomIntBetween(0, 2)) { + case 0: // put + var entry = randomEntry.get(); + var previousValue = vl.put(entry.v1(), entry.v2()); + if (existingKeys.contains(entry.v1())) { + assertNotNull(previousValue); + } else { + assertNull(previousValue); + existingKeys.add(entry.v1()); + } + break; + case 1: // remove + if (existingKeys.isEmpty() == false) { + var key = randomFrom(existingKeys); + assertNotNull(vl.remove(key)); + existingKeys.remove(key); + } + break; + case 2: // merge + var toMerge = new LiveVersionMap.VersionLookup(ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency()); + IntStream.range(0, randomIntBetween(1, 100)) + .mapToObj(n -> randomEntry.get()) + .forEach(kv -> toMerge.put(kv.v1(), kv.v2())); + vl.merge(toMerge); + existingKeys.addAll(toMerge.getMap().keySet()); + break; + default: + throw new IllegalStateException("branch value unexpected"); + } + }); + long actualRamBytesUsed = vl.getMap() + .entrySet() + .stream() + .mapToLong(entry -> LiveVersionMap.VersionLookup.mapEntryBytesUsed(entry.getKey(), entry.getValue())) + .sum(); + assertEquals(actualRamBytesUsed, vl.ramBytesUsed()); + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/BlockSourceReaderTests.java b/server/src/test/java/org/elasticsearch/index/mapper/BlockSourceReaderTests.java index 3364e2e828bf2..72583b9aeb19e 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/BlockSourceReaderTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/BlockSourceReaderTests.java @@ -8,20 +8,12 @@ package org.elasticsearch.index.mapper; -import org.apache.lucene.document.StoredField; -import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.NoMergePolicy; -import org.apache.lucene.store.Directory; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.index.fieldvisitor.StoredFieldLoader; import org.elasticsearch.search.fetch.StoredFieldsSpec; -import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.json.JsonXContent; import java.io.IOException; import java.util.List; @@ -32,7 +24,7 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.nullValue; -public class BlockSourceReaderTests extends ESTestCase { +public class BlockSourceReaderTests extends MapperServiceTestCase { public void testSingle() throws IOException { withIndex( source -> source.field("field", "foo"), @@ -56,7 +48,9 @@ public void testEmptyArray() throws IOException { } private void loadBlock(LeafReaderContext ctx, Consumer test) throws IOException { - BlockLoader loader = new BlockSourceReader.BytesRefsBlockLoader(SourceValueFetcher.toString(Set.of("field"))); + ValueFetcher valueFetcher = SourceValueFetcher.toString(Set.of("field")); + BlockSourceReader.LeafIteratorLookup lookup = BlockSourceReader.lookupFromNorms("field"); + BlockLoader loader = new BlockSourceReader.BytesRefsBlockLoader(valueFetcher, lookup); assertThat(loader.columnAtATimeReader(ctx), nullValue()); BlockLoader.RowStrideReader reader = loader.rowStrideReader(ctx); assertThat(loader.rowStrideStoredFieldSpec(), equalTo(StoredFieldsSpec.NEEDS_SOURCE)); @@ -74,23 +68,13 @@ private void loadBlock(LeafReaderContext ctx, Consumer test) throws I private void withIndex(CheckedConsumer buildSource, CheckedConsumer test) throws IOException { - try ( - Directory directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter( - random(), - directory, - newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE) - ) - ) { - XContentBuilder source = JsonXContent.contentBuilder(); - source.startObject(); - buildSource.accept(source); - source.endObject(); - writer.addDocument(List.of(new StoredField(SourceFieldMapper.NAME, BytesReference.bytes(source).toBytesRef()))); - try (IndexReader reader = writer.getReader()) { - assertThat(reader.leaves(), hasSize(1)); - test.accept(reader.leaves().get(0)); - } - } + MapperService mapperService = createMapperService(fieldMapping(b -> b.field("type", "text"))); + withLuceneIndex(mapperService, writer -> { + ParsedDocument parsed = mapperService.documentParser().parseDocument(source(buildSource), mapperService.mappingLookup()); + writer.addDocuments(parsed.docs()); + }, reader -> { + assertThat(reader.leaves(), hasSize(1)); + test.accept(reader.leaves().get(0)); + }); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java index 70e2fee7a003a..6d80be5167e52 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java @@ -264,7 +264,7 @@ public List invalidExample() throws IOException { } @Override - protected Function loadBlockExpected() { + protected Function loadBlockExpected(MapperService mapper, String loaderFieldName) { // Just assert that we expect a boolean. Otherwise no munging. return v -> (Boolean) v; } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/BooleanScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/BooleanScriptFieldTypeTests.java index d8f063ece35c0..053e4226b3d79 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/BooleanScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/BooleanScriptFieldTypeTests.java @@ -347,65 +347,66 @@ public void testDualingQueries() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { List values = randomList(0, 2, ESTestCase::randomBoolean); String source = "{\"foo\": " + values + "}"; - XContentParser parser = createParser(JsonXContent.jsonXContent, source); - SourceToParse sourceToParse = new SourceToParse("test", new BytesArray(source), XContentType.JSON); - DocumentParserContext ctx = new TestDocumentParserContext(MappingLookup.EMPTY, sourceToParse) { - @Override - public XContentParser parser() { - return parser; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, source)) { + SourceToParse sourceToParse = new SourceToParse("test", new BytesArray(source), XContentType.JSON); + DocumentParserContext ctx = new TestDocumentParserContext(MappingLookup.EMPTY, sourceToParse) { + @Override + public XContentParser parser() { + return parser; + } + }; + ctx.doc().add(new StoredField("_source", new BytesRef(source))); + + ctx.parser().nextToken(); + ctx.parser().nextToken(); + ctx.parser().nextToken(); + while (ctx.parser().nextToken() != Token.END_ARRAY) { + ootb.parse(ctx); } - }; - ctx.doc().add(new StoredField("_source", new BytesRef(source))); - - ctx.parser().nextToken(); - ctx.parser().nextToken(); - ctx.parser().nextToken(); - while (ctx.parser().nextToken() != Token.END_ARRAY) { - ootb.parse(ctx); - } - iw.addDocument(ctx.doc()); - try (DirectoryReader reader = iw.getReader()) { - IndexSearcher searcher = newSearcher(reader); - assertSameCount( - searcher, - source, - "*", - simpleMappedFieldType().existsQuery(mockContext()), - ootb.fieldType().existsQuery(mockContext()) - ); - boolean term = randomBoolean(); - assertSameCount( - searcher, - source, - term, - simpleMappedFieldType().termQuery(term, mockContext()), - ootb.fieldType().termQuery(term, mockContext()) - ); - List terms = randomList(0, 3, ESTestCase::randomBoolean); - assertSameCount( - searcher, - source, - terms, - simpleMappedFieldType().termsQuery(terms, mockContext()), - ootb.fieldType().termsQuery(terms, mockContext()) - ); - boolean low; - boolean high; - if (randomBoolean()) { - low = high = randomBoolean(); - } else { - low = false; - high = true; + iw.addDocument(ctx.doc()); + try (DirectoryReader reader = iw.getReader()) { + IndexSearcher searcher = newSearcher(reader); + assertSameCount( + searcher, + source, + "*", + simpleMappedFieldType().existsQuery(mockContext()), + ootb.fieldType().existsQuery(mockContext()) + ); + boolean term = randomBoolean(); + assertSameCount( + searcher, + source, + term, + simpleMappedFieldType().termQuery(term, mockContext()), + ootb.fieldType().termQuery(term, mockContext()) + ); + List terms = randomList(0, 3, ESTestCase::randomBoolean); + assertSameCount( + searcher, + source, + terms, + simpleMappedFieldType().termsQuery(terms, mockContext()), + ootb.fieldType().termsQuery(terms, mockContext()) + ); + boolean low; + boolean high; + if (randomBoolean()) { + low = high = randomBoolean(); + } else { + low = false; + high = true; + } + boolean includeLow = randomBoolean(); + boolean includeHigh = randomBoolean(); + assertSameCount( + searcher, + source, + (includeLow ? "[" : "(") + low + "," + high + (includeHigh ? "]" : ")"), + simpleMappedFieldType().rangeQuery(low, high, includeLow, includeHigh, null, null, null, mockContext()), + ootb.fieldType().rangeQuery(low, high, includeLow, includeHigh, null, null, null, mockContext()) + ); } - boolean includeLow = randomBoolean(); - boolean includeHigh = randomBoolean(); - assertSameCount( - searcher, - source, - (includeLow ? "[" : "(") + low + "," + high + (includeHigh ? "]" : ")"), - simpleMappedFieldType().rangeQuery(low, high, includeLow, includeHigh, null, null, null, mockContext()), - ootb.fieldType().rangeQuery(low, high, includeLow, includeHigh, null, null, null, mockContext()) - ); } } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java index 1f473d0ade35b..229d16ab85aef 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java @@ -223,7 +223,10 @@ public void testTypeParsing() throws Exception { XContentBuilder builder = jsonBuilder().startObject(); fieldMapper.toXContent(builder, new ToXContent.MapParams(Map.of("include_defaults", "true"))).endObject(); builder.close(); - Map serializedMap = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)).map(); + Map serializedMap; + try (var parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder))) { + serializedMap = parser.map(); + } Map configMap = (Map) serializedMap.get("field"); assertThat(configMap.get("analyzer").toString(), is("simple")); assertThat(configMap.get("search_analyzer").toString(), is("standard")); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java index 9e9437aa6b9db..41628dac2faba 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java @@ -706,7 +706,7 @@ public void execute() { } @Override - protected Function loadBlockExpected() { + protected Function loadBlockExpected(MapperService mapper, String loaderFieldName) { return v -> ((Number) v).longValue(); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocCountFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocCountFieldMapperTests.java index 079a79938c310..c1fd872e89f45 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocCountFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DocCountFieldMapperTests.java @@ -104,8 +104,10 @@ public void testSyntheticSourceMany() throws IOException { LeafStoredFieldLoader storedFieldLoader = StoredFieldLoader.empty().getLoader(leaf, docIds); for (int docId : docIds) { String source = sourceLoaderLeaf.source(storedFieldLoader, docId).internalSourceRef().utf8ToString(); - int doc = (int) JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, source).map().get("doc"); - assertThat("doc " + docId, source, equalTo("{\"_doc_count\":" + counts.get(doc) + ",\"doc\":" + doc + "}")); + try (var parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, source)) { + int doc = (int) parser.map().get("doc"); + assertThat("doc " + docId, source, equalTo("{\"_doc_count\":" + counts.get(doc) + ",\"doc\":" + doc + "}")); + } } } }); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DotExpandingXContentParserTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DotExpandingXContentParserTests.java index c55ffaaa70a16..f089648bce27f 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DotExpandingXContentParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DotExpandingXContentParserTests.java @@ -22,24 +22,32 @@ public class DotExpandingXContentParserTests extends ESTestCase { private void assertXContentMatches(String dotsExpanded, String withDots) throws IOException { - XContentParser inputParser = createParser(JsonXContent.jsonXContent, withDots); final ContentPath contentPath = new ContentPath(); - XContentParser expandedParser = DotExpandingXContentParser.expandDots(inputParser, contentPath); - expandedParser.allowDuplicateKeys(true); - - XContentBuilder actualOutput = XContentBuilder.builder(JsonXContent.jsonXContent).copyCurrentStructure(expandedParser); - assertEquals(dotsExpanded, Strings.toString(actualOutput)); - - XContentParser expectedParser = createParser(JsonXContent.jsonXContent, dotsExpanded); - expectedParser.allowDuplicateKeys(true); - XContentParser actualParser = DotExpandingXContentParser.expandDots(createParser(JsonXContent.jsonXContent, withDots), contentPath); - XContentParser.Token currentToken; - while ((currentToken = actualParser.nextToken()) != null) { - assertEquals(currentToken, expectedParser.nextToken()); - assertEquals(expectedParser.currentToken(), actualParser.currentToken()); - assertEquals(actualParser.currentToken().name(), expectedParser.currentName(), actualParser.currentName()); + try ( + XContentParser inputParser = createParser(JsonXContent.jsonXContent, withDots); + XContentParser expandedParser = DotExpandingXContentParser.expandDots(inputParser, contentPath) + ) { + expandedParser.allowDuplicateKeys(true); + + XContentBuilder actualOutput = XContentBuilder.builder(JsonXContent.jsonXContent).copyCurrentStructure(expandedParser); + assertEquals(dotsExpanded, Strings.toString(actualOutput)); + + try (XContentParser expectedParser = createParser(JsonXContent.jsonXContent, dotsExpanded)) { + expectedParser.allowDuplicateKeys(true); + try ( + var p = createParser(JsonXContent.jsonXContent, withDots); + XContentParser actualParser = DotExpandingXContentParser.expandDots(p, contentPath) + ) { + XContentParser.Token currentToken; + while ((currentToken = actualParser.nextToken()) != null) { + assertEquals(currentToken, expectedParser.nextToken()); + assertEquals(expectedParser.currentToken(), actualParser.currentToken()); + assertEquals(actualParser.currentToken().name(), expectedParser.currentName(), actualParser.currentName()); + } + assertNull(expectedParser.nextToken()); + } + } } - assertNull(expectedParser.nextToken()); } public void testEmbeddedObject() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicFieldsBuilderTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicFieldsBuilderTests.java index f9c332d21a876..0e4945f7faea8 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicFieldsBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicFieldsBuilderTests.java @@ -16,7 +16,6 @@ import java.io.IOException; import java.util.List; -import java.util.Map; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; @@ -55,10 +54,9 @@ public XContentParser parser() { parser.nextToken(); assertTrue(parser.currentToken().isValue()); DynamicFieldsBuilder.DYNAMIC_TRUE.createDynamicFieldFromValue(ctx, fieldname); - Map> dynamicMappers = ctx.getDynamicMappers(); + List dynamicMappers = ctx.getDynamicMappers(); assertEquals(1, dynamicMappers.size()); - Mapper mapper = dynamicMappers.get(fieldname).get(0).build(MapperBuilderContext.root(false, false)); - assertEquals(fieldname, mapper.name()); - assertEquals(expectedType, mapper.typeName()); + assertEquals(fieldname, dynamicMappers.get(0).name()); + assertEquals(expectedType, dynamicMappers.get(0).typeName()); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FloatFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/FloatFieldMapperTests.java index 3798129ccff29..511ce94a09eec 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/FloatFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/FloatFieldMapperTests.java @@ -56,7 +56,7 @@ protected SyntheticSourceSupport syntheticSourceSupport(boolean ignoreMalformed) } @Override - protected Function loadBlockExpected() { + protected Function loadBlockExpected(MapperService mapper, String loaderFieldName) { return v -> { // The test converts the float into a string so we do do Number n = (Number) v; diff --git a/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java index 69cbb1d90b951..3da059803014f 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java @@ -19,7 +19,10 @@ import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.core.Tuple; import org.elasticsearch.geo.GeometryTestUtils; +import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.Point; +import org.elasticsearch.geometry.utils.GeometryValidator; +import org.elasticsearch.geometry.utils.WellKnownBinary; import org.elasticsearch.geometry.utils.WellKnownText; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; @@ -604,15 +607,19 @@ protected SyntheticSourceSupport syntheticSourceSupport(boolean ignoreMalformed) public SyntheticSourceExample example(int maxVals) { if (randomBoolean()) { Tuple v = generateValue(); - return new SyntheticSourceExample(v.v1(), decode(encode(v.v2())), encode(v.v2()), this::mapping); + return new SyntheticSourceExample(v.v1(), v.v2(), v.v2().toWKT(), this::mapping); } List> values = randomList(1, maxVals, this::generateValue); - List in = values.stream().map(Tuple::v1).toList(); - // The results are currently sorted in order of encoded values, so we need to sort the expected values too - List outList = values.stream().map(v -> encode(v.v2())).sorted().map(this::decode).toList(); + // For the synthetic source tests, the results are sorted in order of encoded values, but for row-stride reader + // they are sorted in order of input, so we sort both input and expected here to support both types of tests + List> sorted = values.stream() + .sorted((a, b) -> Long.compare(encode(a.v2()), encode(b.v2()))) + .toList(); + List in = sorted.stream().map(Tuple::v1).toList(); + List outList = sorted.stream().map(v -> encode(v.v2())).sorted().map(this::decode).toList(); Object out = outList.size() == 1 ? outList.get(0) : outList; - List outBlockList = outList.stream().map(this::encode).toList(); + List outBlockList = outList.stream().map(GeoPoint::toWKT).toList(); Object outBlock = outBlockList.size() == 1 ? outBlockList.get(0) : outBlockList; return new SyntheticSourceExample(in, out, outBlock, this::mapping); } @@ -627,7 +634,7 @@ private Tuple generateValue() { private GeoPoint randomGeoPoint() { Point point = GeometryTestUtils.randomPoint(false); - return new GeoPoint(point.getLat(), point.getLon()); + return decode(encode(new GeoPoint(point.getLat(), point.getLon()))); } private Object randomGeoPointInput(GeoPoint point) { @@ -698,9 +705,15 @@ protected IngestScriptSupport ingestScriptSupport() { throw new AssumptionViolatedException("not supported"); } + private boolean useDocValues = false; + @Override - protected Function loadBlockExpected() { - return v -> asJacksonNumberOutput(((Number) v).longValue()); + protected Function loadBlockExpected(MapperService mapper, String loaderFieldName) { + if (useDocValues) { + return v -> asJacksonNumberOutput(((Number) v).longValue()); + } else { + return v -> asWKT((BytesRef) v); + } } protected static Object asJacksonNumberOutput(long l) { @@ -711,4 +724,21 @@ protected static Object asJacksonNumberOutput(long l) { return l; } } + + protected static Object asWKT(BytesRef value) { + // Internally we use WKB in BytesRef, but for test assertions we want to use WKT for readability + Geometry geometry = WellKnownBinary.fromWKB(GeometryValidator.NOOP, false, value.bytes); + return WellKnownText.toWKT(geometry); + } + + @Override + protected boolean supportsColumnAtATimeReader(MapperService mapper, MappedFieldType ft) { + // Currently ESQL support for geo_point is limited to source values + return false; + } + + @Override + public void testBlockLoaderFromRowStrideReaderWithSyntheticSource() { + assumeTrue("Synthetic source not completed supported for geo_point", false); + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldTypeTests.java index aa4dec379f085..8627a236d6401 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldTypeTests.java @@ -11,15 +11,19 @@ import org.apache.lucene.tests.geo.GeoTestUtil; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.SimpleFeatureFactory; +import org.elasticsearch.geometry.Point; +import org.elasticsearch.geometry.utils.WellKnownBinary; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.script.ScriptCompiler; -import org.hamcrest.Matchers; import java.io.IOException; +import java.nio.ByteOrder; import java.util.ArrayList; import java.util.List; import java.util.Map; +import static org.hamcrest.Matchers.equalTo; + public class GeoPointFieldTypeTests extends FieldTypeTestCase { public void testFetchSourceValue() throws IOException { @@ -36,31 +40,50 @@ public void testFetchSourceValue() throws IOException { Map otherJsonPoint = Map.of("type", "Point", "coordinates", List.of(30.0, 50.0)); String wktPoint = "POINT (42.0 27.1)"; String otherWktPoint = "POINT (30.0 50.0)"; + byte[] wkbPoint = WellKnownBinary.toWKB(new Point(42.0, 27.1), ByteOrder.LITTLE_ENDIAN); + byte[] otherWkbPoint = WellKnownBinary.toWKB(new Point(30.0, 50.0), ByteOrder.LITTLE_ENDIAN); // Test a single point in [lon, lat] array format. Object sourceValue = List.of(42.0, 27.1); assertEquals(List.of(jsonPoint), fetchSourceValue(mapper, sourceValue, null)); assertEquals(List.of(wktPoint), fetchSourceValue(mapper, sourceValue, "wkt")); + List wkb = fetchSourceValue(mapper, sourceValue, "wkb"); + assertThat(wkb.size(), equalTo(1)); + assertThat(wkb.get(0), equalTo(wkbPoint)); // Test a single point in "lat, lon" string format. sourceValue = "27.1,42.0"; assertEquals(List.of(jsonPoint), fetchSourceValue(mapper, sourceValue, null)); assertEquals(List.of(wktPoint), fetchSourceValue(mapper, sourceValue, "wkt")); + wkb = fetchSourceValue(mapper, sourceValue, "wkb"); + assertThat(wkb.size(), equalTo(1)); + assertThat(wkb.get(0), equalTo(wkbPoint)); // Test a list of points in [lon, lat] array format. sourceValue = List.of(List.of(42.0, 27.1), List.of(30.0, 50.0)); assertEquals(List.of(jsonPoint, otherJsonPoint), fetchSourceValue(mapper, sourceValue, null)); assertEquals(List.of(wktPoint, otherWktPoint), fetchSourceValue(mapper, sourceValue, "wkt")); + wkb = fetchSourceValue(mapper, sourceValue, "wkb"); + assertThat(wkb.size(), equalTo(2)); + assertThat(wkb.get(0), equalTo(wkbPoint)); + assertThat(wkb.get(1), equalTo(otherWkbPoint)); // Test a list of points in [lat,lon] array format with one malformed sourceValue = List.of(List.of(42.0, 27.1), List.of("a", "b"), List.of(30.0, 50.0)); assertEquals(List.of(jsonPoint, otherJsonPoint), fetchSourceValue(mapper, sourceValue, null)); assertEquals(List.of(wktPoint, otherWktPoint), fetchSourceValue(mapper, sourceValue, "wkt")); + wkb = fetchSourceValue(mapper, sourceValue, "wkb"); + assertThat(wkb.size(), equalTo(2)); + assertThat(wkb.get(0), equalTo(wkbPoint)); + assertThat(wkb.get(1), equalTo(otherWkbPoint)); // Test a single point in well-known text format. sourceValue = "POINT (42.0 27.1)"; assertEquals(List.of(jsonPoint), fetchSourceValue(mapper, sourceValue, null)); assertEquals(List.of(wktPoint), fetchSourceValue(mapper, sourceValue, "wkt")); + wkb = fetchSourceValue(mapper, sourceValue, "wkb"); + assertThat(wkb.size(), equalTo(1)); + assertThat(wkb.get(0), equalTo(wkbPoint)); // Test a malformed value sourceValue = "malformed"; @@ -71,9 +94,13 @@ public void testFetchSourceValue() throws IOException { if (ignoreMalformed) { assertEquals(List.of(jsonPoint), fetchSourceValue(mapper, sourceValue, null)); assertEquals(List.of(wktPoint), fetchSourceValue(mapper, sourceValue, "wkt")); + wkb = fetchSourceValue(mapper, sourceValue, "wkb"); + assertThat(wkb.size(), equalTo(1)); + assertThat(wkb.get(0), equalTo(wkbPoint)); } else { assertEquals(List.of(), fetchSourceValue(mapper, sourceValue, null)); assertEquals(List.of(), fetchSourceValue(mapper, sourceValue, "wkt")); + assertEquals(List.of(), fetchSourceValue(mapper, sourceValue, "wkb")); } // test single point in GeoJSON format @@ -110,13 +137,13 @@ public void testFetchVectorTile() throws IOException { final double lat = GeoTestUtil.nextLatitude(); final double lon = GeoTestUtil.nextLongitude(); List sourceValue = fetchSourceValue(mapper, List.of(lon, lat), mvtString); - assertThat(sourceValue.size(), Matchers.equalTo(1)); - assertThat(sourceValue.get(0), Matchers.equalTo(featureFactory.point(lon, lat))); + assertThat(sourceValue.size(), equalTo(1)); + assertThat(sourceValue.get(0), equalTo(featureFactory.point(lon, lat))); geoPoints.add(new GeoPoint(lat, lon)); values.add(List.of(lon, lat)); } List sourceValue = fetchSourceValue(mapper, values, mvtString); - assertThat(sourceValue.size(), Matchers.equalTo(1)); - assertThat(sourceValue.get(0), Matchers.equalTo(featureFactory.points(geoPoints))); + assertThat(sourceValue.size(), equalTo(1)); + assertThat(sourceValue.get(0), equalTo(featureFactory.points(geoPoints))); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/HalfFloatFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/HalfFloatFieldMapperTests.java index cc024efb5f307..a0545308c3928 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/HalfFloatFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/HalfFloatFieldMapperTests.java @@ -56,7 +56,7 @@ protected SyntheticSourceSupport syntheticSourceSupport(boolean ignoreMalformed) } @Override - protected Function loadBlockExpected() { + protected Function loadBlockExpected(MapperService mapper, String loaderFieldName) { return v -> { // The test converts the float into a string so we do do Number n = (Number) v; diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IgnoreMalformedStoredValuesTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IgnoreMalformedStoredValuesTests.java index 60fe4c4a6d99f..b3684d420d728 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IgnoreMalformedStoredValuesTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IgnoreMalformedStoredValuesTests.java @@ -148,13 +148,13 @@ private static XContentParser ignoreMalformed(XContentType type, Object value) t private static StoredField ignoreMalformedStoredField(XContentType type, Object value) throws IOException { XContentBuilder b = XContentBuilder.builder(type.xContent()); b.startObject().field("name", value).endObject(); - XContentParser p = type.xContent().createParser(XContentParserConfiguration.EMPTY, BytesReference.bytes(b).streamInput()); - assertThat(p.nextToken(), equalTo(XContentParser.Token.START_OBJECT)); - assertThat(p.nextToken(), equalTo(XContentParser.Token.FIELD_NAME)); - assertThat(p.currentName(), equalTo("name")); - p.nextToken(); - - return IgnoreMalformedStoredValues.storedField("foo.name", p); + try (XContentParser p = type.xContent().createParser(XContentParserConfiguration.EMPTY, BytesReference.bytes(b).streamInput())) { + assertThat(p.nextToken(), equalTo(XContentParser.Token.START_OBJECT)); + assertThat(p.nextToken(), equalTo(XContentParser.Token.FIELD_NAME)); + assertThat(p.currentName(), equalTo("name")); + p.nextToken(); + return IgnoreMalformedStoredValues.storedField("foo.name", p); + } } private static XContentParser parserFrom(IgnoreMalformedStoredValues values, String fieldName) throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java index ba9c2e6c4a299..7aa68a6949b7e 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java @@ -423,7 +423,7 @@ public void execute() { } @Override - protected Function loadBlockExpected() { + protected Function loadBlockExpected(MapperService mapper, String loaderFieldName) { return v -> InetAddresses.toAddrString(InetAddressPoint.decode(BytesRef.deepCopyOf((BytesRef) v).bytes)); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java index d6e93fceb713e..983054df2fbe7 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java @@ -650,7 +650,7 @@ protected boolean supportsIgnoreMalformed() { } @Override - protected Function loadBlockExpected() { + protected Function loadBlockExpected(MapperService mapper, String loaderFieldName) { return v -> ((BytesRef) v).utf8ToString(); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/LongFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/LongFieldMapperTests.java index f2d4431e5c79f..0e4502a813c15 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/LongFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/LongFieldMapperTests.java @@ -122,7 +122,7 @@ public void testFetchCoerced() throws IOException { } @Override - protected Function loadBlockExpected() { + protected Function loadBlockExpected(MapperService mapper, String loaderFieldName) { return n -> { Number number = ((Number) n); if (Integer.MIN_VALUE <= number.longValue() && number.longValue() <= Integer.MAX_VALUE) { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java index 7b91c84a05c53..2db0203cb9383 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java @@ -382,7 +382,7 @@ public void testAllowMultipleValuesField() throws IOException { } @Override - protected Function loadBlockExpected() { + protected Function loadBlockExpected(MapperService mapper, String loaderFieldName) { return n -> ((Number) n); // Just assert it's a number } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java index ec6a9ddd53e2c..3c77bf20b37d2 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java @@ -30,20 +30,23 @@ public class ObjectMapperTests extends MapperServiceTestCase { public void testDifferentInnerObjectTokenFailure() throws Exception { DocumentMapper defaultMapper = createDocumentMapper(mapping(b -> {})); - Exception e = expectThrows(IllegalArgumentException.class, () -> defaultMapper.parse(new SourceToParse("1", new BytesArray(""" - { - "object": { - "array":[ - { - "object": { "value": "value" } - }, - { - "object":"value" - } - ] - }, - "value":"value" - }""".indent(1)), XContentType.JSON))); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> defaultMapper.parse(new SourceToParse("1", new BytesArray(""" + { + "object": { + "array":[ + { + "object": { "value": "value" } + }, + { + "object":"value" + } + ] + }, + "value":"value" + }""".indent(1)), XContentType.JSON)) + ); assertThat(e.getMessage(), containsString("can't merge a non object mapping [object.array.object] with an object mapping")); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java index f683cb60c87c3..5601290fed5c7 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java @@ -73,7 +73,7 @@ public void testNoFormat() throws Exception { ) ); - assertThat(XContentFactory.xContentType(doc.source().toBytesRef().bytes), equalTo(XContentType.JSON)); + assertThat(XContentHelper.xContentType(doc.source()), equalTo(XContentType.JSON)); doc = documentMapper.parse( new SourceToParse( diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java index 0460108e565ce..b7489a555be50 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java @@ -1186,10 +1186,25 @@ public List invalidExample() throws IOException { } @Override - protected Function loadBlockExpected() { + protected Function loadBlockExpected(MapperService mapper, String fieldName) { + if (nullLoaderExpected(mapper, fieldName)) { + return null; + } return v -> ((BytesRef) v).utf8ToString(); } + private boolean nullLoaderExpected(MapperService mapper, String fieldName) { + MappedFieldType type = mapper.fieldType(fieldName); + if (type instanceof TextFieldType t) { + if (t.isSyntheticSource() == false || t.canUseSyntheticSourceDelegateForQuerying() || t.isStored()) { + return false; + } + String parentField = mapper.mappingLookup().parentField(fieldName); + return parentField == null || nullLoaderExpected(mapper, parentField); + } + return false; + } + @Override protected IngestScriptSupport ingestScriptSupport() { throw new AssumptionViolatedException("not supported"); diff --git a/server/src/test/java/org/elasticsearch/index/query/SearchIndexNameMatcherTests.java b/server/src/test/java/org/elasticsearch/index/query/SearchIndexNameMatcherTests.java index bf66f4c93d6ef..b038fbb911f00 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SearchIndexNameMatcherTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SearchIndexNameMatcherTests.java @@ -19,6 +19,8 @@ import org.elasticsearch.test.ESTestCase; import org.junit.Before; +import static org.elasticsearch.test.LambdaMatchers.falseWith; +import static org.elasticsearch.test.LambdaMatchers.trueWith; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -46,31 +48,31 @@ private static IndexMetadata.Builder indexBuilder(String index) { } public void testLocalIndex() { - assertTrue(matcher.test("index1")); - assertTrue(matcher.test("ind*x1")); - assertFalse(matcher.test("index2")); + assertThat(matcher, trueWith("index1")); + assertThat(matcher, trueWith("ind*x1")); + assertThat(matcher, falseWith("index2")); - assertTrue(matcher.test("alias")); - assertTrue(matcher.test("*lias")); + assertThat(matcher, trueWith("alias")); + assertThat(matcher, trueWith("*lias")); - assertFalse(matcher.test("cluster:index1")); + assertThat(matcher, falseWith("cluster:index1")); } public void testRemoteIndex() { - assertTrue(remoteMatcher.test("cluster:index1")); - assertTrue(remoteMatcher.test("cluster:ind*x1")); - assertTrue(remoteMatcher.test("*luster:ind*x1")); - assertFalse(remoteMatcher.test("cluster:index2")); + assertThat(remoteMatcher, trueWith("cluster:index1")); + assertThat(remoteMatcher, trueWith("cluster:ind*x1")); + assertThat(remoteMatcher, trueWith("*luster:ind*x1")); + assertThat(remoteMatcher, falseWith("cluster:index2")); - assertTrue(remoteMatcher.test("cluster:alias")); - assertTrue(remoteMatcher.test("cluster:*lias")); + assertThat(remoteMatcher, trueWith("cluster:alias")); + assertThat(remoteMatcher, trueWith("cluster:*lias")); - assertFalse(remoteMatcher.test("index1")); - assertFalse(remoteMatcher.test("alias")); + assertThat(remoteMatcher, falseWith("index1")); + assertThat(remoteMatcher, falseWith("alias")); - assertFalse(remoteMatcher.test("*index1")); - assertFalse(remoteMatcher.test("*alias")); - assertFalse(remoteMatcher.test("cluster*")); - assertFalse(remoteMatcher.test("cluster*index1")); + assertThat(remoteMatcher, falseWith("*index1")); + assertThat(remoteMatcher, falseWith("*alias")); + assertThat(remoteMatcher, falseWith("cluster*")); + assertThat(remoteMatcher, falseWith("cluster*index1")); } } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexReaderWrapperTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexReaderWrapperTests.java index 4d94b3b1dd732..8ea98acdd6806 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexReaderWrapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexReaderWrapperTests.java @@ -163,7 +163,7 @@ public void testAlwaysWrapWithFieldUsageTrackingDirectoryReader() throws IOExcep IndexSearcher.getDefaultSimilarity(), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), - open::close + open ), mock(ShardFieldUsageTracker.FieldUsageStatsTrackingSession.class), wrapper diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 4d6e316b4b7d9..e6d6de16cff2c 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -78,7 +78,6 @@ import org.elasticsearch.index.engine.DocIdSeqNoAndSource; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineConfig; -import org.elasticsearch.index.engine.EngineException; import org.elasticsearch.index.engine.EngineTestCase; import org.elasticsearch.index.engine.InternalEngine; import org.elasticsearch.index.engine.InternalEngineFactory; @@ -2962,7 +2961,7 @@ public void testShardActiveDuringInternalRecovery() throws IOException { // Shard is still inactive since we haven't started recovering yet assertFalse(shard.isActive()); shard.recoveryState().getIndex().setFileDetailsComplete(); - shard.openEngineAndRecoverFromTranslog(); + PlainActionFuture.get(shard::openEngineAndRecoverFromTranslog, 30, TimeUnit.SECONDS); // Shard should now be active since we did recover: assertTrue(shard.isActive()); closeShards(shard); @@ -4165,13 +4164,13 @@ public void testMultiplePeriodicFlushesCanBeTriggeredBeforeTheyAreDurable() thro var flushExecutedBarrier = new CyclicBarrier(2); var shard = newStartedShard(true, indexSettings, config -> new InternalEngine(config) { @Override - public void flush(boolean force, boolean waitIfOngoing, ActionListener listener) throws EngineException { + protected void flushHoldingLock(boolean force, boolean waitIfOngoing, ActionListener listener) { if (shardStarted.get()) { - super.flush(force, waitIfOngoing, ActionListener.noop()); + super.flushHoldingLock(force, waitIfOngoing, ActionListener.noop()); pendingListeners.add(listener); safeAwait(flushExecutedBarrier); } else { - super.flush(force, waitIfOngoing, listener); + super.flushHoldingLock(force, waitIfOngoing, listener); } } }); diff --git a/server/src/test/java/org/elasticsearch/index/store/StoreTests.java b/server/src/test/java/org/elasticsearch/index/store/StoreTests.java index dfbfb737c9ab2..3eb4675d37e97 100644 --- a/server/src/test/java/org/elasticsearch/index/store/StoreTests.java +++ b/server/src/test/java/org/elasticsearch/index/store/StoreTests.java @@ -775,22 +775,22 @@ public void testStoreStats() throws IOException { final long localStoreSizeDelta = randomLongBetween(-initialStoreSize, initialStoreSize); final long reservedBytes = randomBoolean() ? StoreStats.UNKNOWN_RESERVED_BYTES : randomLongBetween(0L, Integer.MAX_VALUE); StoreStats stats = store.stats(reservedBytes, size -> size + localStoreSizeDelta); - assertEquals(initialStoreSize, stats.totalDataSetSize().getBytes()); - assertEquals(initialStoreSize + localStoreSizeDelta, stats.getSize().getBytes()); - assertEquals(reservedBytes, stats.getReservedSize().getBytes()); + assertEquals(initialStoreSize, stats.totalDataSetSizeInBytes()); + assertEquals(initialStoreSize + localStoreSizeDelta, stats.sizeInBytes()); + assertEquals(reservedBytes, stats.reservedSizeInBytes()); stats.add(null); - assertEquals(initialStoreSize, stats.totalDataSetSize().getBytes()); - assertEquals(initialStoreSize + localStoreSizeDelta, stats.getSize().getBytes()); - assertEquals(reservedBytes, stats.getReservedSize().getBytes()); + assertEquals(initialStoreSize, stats.totalDataSetSizeInBytes()); + assertEquals(initialStoreSize + localStoreSizeDelta, stats.sizeInBytes()); + assertEquals(reservedBytes, stats.reservedSizeInBytes()); final long otherStatsDataSetBytes = randomLongBetween(0L, Integer.MAX_VALUE); final long otherStatsLocalBytes = randomLongBetween(0L, Integer.MAX_VALUE); final long otherStatsReservedBytes = randomBoolean() ? StoreStats.UNKNOWN_RESERVED_BYTES : randomLongBetween(0L, Integer.MAX_VALUE); stats.add(new StoreStats(otherStatsLocalBytes, otherStatsDataSetBytes, otherStatsReservedBytes)); - assertEquals(initialStoreSize + otherStatsDataSetBytes, stats.totalDataSetSize().getBytes()); - assertEquals(initialStoreSize + otherStatsLocalBytes + localStoreSizeDelta, stats.getSize().getBytes()); - assertEquals(Math.max(reservedBytes, 0L) + Math.max(otherStatsReservedBytes, 0L), stats.getReservedSize().getBytes()); + assertEquals(initialStoreSize + otherStatsDataSetBytes, stats.totalDataSetSizeInBytes()); + assertEquals(initialStoreSize + otherStatsLocalBytes + localStoreSizeDelta, stats.sizeInBytes()); + assertEquals(Math.max(reservedBytes, 0L) + Math.max(otherStatsReservedBytes, 0L), stats.reservedSizeInBytes()); Directory dir = store.directory(); final long length; @@ -805,8 +805,8 @@ public void testStoreStats() throws IOException { assertTrue(numNonExtraFiles(store) > 0); stats = store.stats(0L, size -> size + localStoreSizeDelta); - assertEquals(initialStoreSize + length, stats.totalDataSetSize().getBytes()); - assertEquals(initialStoreSize + localStoreSizeDelta + length, stats.getSizeInBytes()); + assertEquals(initialStoreSize + length, stats.totalDataSetSizeInBytes()); + assertEquals(initialStoreSize + localStoreSizeDelta + length, stats.sizeInBytes()); deleteContent(store.directory()); IOUtils.close(store); diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java index 2ce85a598541e..1648e38a3f0b9 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java @@ -45,6 +45,8 @@ import java.util.function.Function; import java.util.function.Predicate; +import static org.elasticsearch.test.LambdaMatchers.falseWith; +import static org.elasticsearch.test.LambdaMatchers.trueWith; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.instanceOf; @@ -239,7 +241,7 @@ public void testFieldNamesIsLastWithPlugins() { } public void testGetFieldFilter() { - List mapperPlugins = Arrays.asList(new MapperPlugin() { + List mapperPlugins = List.of(new MapperPlugin() { }, new MapperPlugin() { @Override public Function> getFieldFilter() { @@ -262,16 +264,16 @@ public Function> getFieldFilter() { Function> fieldFilter = mapperRegistry.getFieldFilter(); assertNotSame(MapperPlugin.NOOP_FIELD_FILTER, fieldFilter); - assertFalse(fieldFilter.apply("hidden_index").test(randomAlphaOfLengthBetween(3, 5))); - assertTrue(fieldFilter.apply(randomAlphaOfLengthBetween(3, 5)).test(randomAlphaOfLengthBetween(3, 5))); + assertThat(fieldFilter.apply("hidden_index"), falseWith(randomAlphaOfLengthBetween(3, 5))); + assertThat(fieldFilter.apply(randomAlphaOfLengthBetween(3, 5)), trueWith(randomAlphaOfLengthBetween(3, 5))); - assertFalse(fieldFilter.apply(randomAlphaOfLengthBetween(3, 5)).test("hidden_field")); - assertFalse(fieldFilter.apply("filtered").test(randomAlphaOfLengthBetween(3, 5))); - assertFalse(fieldFilter.apply("filtered").test("hidden_field")); - assertTrue(fieldFilter.apply("filtered").test("visible")); - assertFalse(fieldFilter.apply("hidden_index").test("visible")); - assertTrue(fieldFilter.apply(randomAlphaOfLengthBetween(3, 5)).test("visible")); - assertFalse(fieldFilter.apply("hidden_index").test("hidden_field")); + assertThat(fieldFilter.apply(randomAlphaOfLengthBetween(3, 5)), falseWith("hidden_field")); + assertThat(fieldFilter.apply("filtered"), falseWith(randomAlphaOfLengthBetween(3, 5))); + assertThat(fieldFilter.apply("filtered"), falseWith("hidden_field")); + assertThat(fieldFilter.apply("filtered"), trueWith("visible")); + assertThat(fieldFilter.apply("hidden_index"), falseWith("visible")); + assertThat(fieldFilter.apply(randomAlphaOfLengthBetween(3, 5)), trueWith("visible")); + assertThat(fieldFilter.apply("hidden_index"), falseWith("hidden_field")); } public void testDefaultFieldFilterIsNoOp() { diff --git a/server/src/test/java/org/elasticsearch/indices/SystemIndexMappingUpdateServiceTests.java b/server/src/test/java/org/elasticsearch/indices/SystemIndexMappingUpdateServiceTests.java index 925fadd511a79..108ce6b63cec8 100644 --- a/server/src/test/java/org/elasticsearch/indices/SystemIndexMappingUpdateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/SystemIndexMappingUpdateServiceTests.java @@ -9,8 +9,8 @@ package org.elasticsearch.indices; import org.elasticsearch.Version; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.action.admin.indices.mapping.put.TransportPutMappingAction; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; @@ -53,6 +53,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.same; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -258,7 +259,7 @@ public void testManagerSubmitsPutRequest() { manager.clusterChanged(event(markShardsAvailable(createClusterState(Strings.toString(getMappings("1.0.0", 4)))))); - verify(client, times(1)).execute(any(PutMappingAction.class), any(PutMappingRequest.class), any()); + verify(client, times(1)).execute(same(TransportPutMappingAction.TYPE), any(PutMappingRequest.class), any()); } /** diff --git a/server/src/test/java/org/elasticsearch/indices/TermsLookupTests.java b/server/src/test/java/org/elasticsearch/indices/TermsLookupTests.java index dc2b7614fb52c..8f4bb9d9e3c5d 100644 --- a/server/src/test/java/org/elasticsearch/indices/TermsLookupTests.java +++ b/server/src/test/java/org/elasticsearch/indices/TermsLookupTests.java @@ -63,14 +63,15 @@ public void testSerialization() throws IOException { } public void testXContentParsing() throws IOException { - XContentParser parser = createParser(JsonXContent.jsonXContent, """ - { "index" : "index", "id" : "id", "path" : "path", "routing" : "routing" }"""); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, """ + { "index" : "index", "id" : "id", "path" : "path", "routing" : "routing" }""")) { - TermsLookup tl = TermsLookup.parseTermsLookup(parser); - assertEquals("index", tl.index()); - assertEquals("id", tl.id()); - assertEquals("path", tl.path()); - assertEquals("routing", tl.routing()); + TermsLookup tl = TermsLookup.parseTermsLookup(parser); + assertEquals("index", tl.index()); + assertEquals("id", tl.id()); + assertEquals("path", tl.path()); + assertEquals("routing", tl.routing()); + } } public static TermsLookup randomTermsLookup() { diff --git a/server/src/test/java/org/elasticsearch/indices/analysis/IncorrectSetupStablePluginsTests.java b/server/src/test/java/org/elasticsearch/indices/analysis/IncorrectSetupStablePluginsTests.java index a48762859479f..0069895f1783f 100644 --- a/server/src/test/java/org/elasticsearch/indices/analysis/IncorrectSetupStablePluginsTests.java +++ b/server/src/test/java/org/elasticsearch/indices/analysis/IncorrectSetupStablePluginsTests.java @@ -94,7 +94,7 @@ public void testIncorrectlyAnnotatedConstructor() throws IOException { Map.of("noInjectCharFilter", new PluginInfo("noInjectCharFilter", NoInjectCharFilter.class.getName(), classLoader)) ) ); - assertThat(e.getMessage(), equalTo("Missing @Inject annotation for constructor with settings.")); + assertThat(e.getMessage(), equalTo("Missing @org.elasticsearch.plugin.Inject annotation for constructor with settings.")); } @NamedComponent("multipleConstructors") diff --git a/server/src/test/java/org/elasticsearch/indices/analysis/wrappers/StableApiWrappersTests.java b/server/src/test/java/org/elasticsearch/indices/analysis/wrappers/StableApiWrappersTests.java index 3ddf8feb543fb..a2e594d5ae4e0 100644 --- a/server/src/test/java/org/elasticsearch/indices/analysis/wrappers/StableApiWrappersTests.java +++ b/server/src/test/java/org/elasticsearch/indices/analysis/wrappers/StableApiWrappersTests.java @@ -75,7 +75,10 @@ public void testStablePluginHasNoArgConstructor() throws IOException { IllegalStateException.class, () -> oldTokenFilter.get(null, mock(Environment.class), null, null) ); - assertThat(illegalStateException.getMessage(), equalTo("Missing @Inject annotation for constructor with settings.")); + assertThat( + illegalStateException.getMessage(), + equalTo("Missing @org.elasticsearch.plugin.Inject annotation for constructor with settings.") + ); } public void testAnalyzerFactoryDelegation() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java b/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java index c20c9615573d6..15f6d0ed377fa 100644 --- a/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java @@ -80,7 +80,7 @@ public void checkParentLimit(long newBytesReserved, String label) throws Circuit }; final BreakerSettings settings = new BreakerSettings(CircuitBreaker.REQUEST, (BYTES_PER_THREAD * NUM_THREADS) - 1, 1.0); final ChildMemoryCircuitBreaker breaker = new ChildMemoryCircuitBreaker( - CircuitBreakerMetrics.NOOP.getParentTripCountTotal(), + CircuitBreakerMetrics.NOOP.getTripCount(), settings, logger, (HierarchyCircuitBreakerService) service, @@ -158,7 +158,7 @@ public void checkParentLimit(long newBytesReserved, String label) throws Circuit }; final BreakerSettings settings = new BreakerSettings(CircuitBreaker.REQUEST, childLimit, 1.0); final ChildMemoryCircuitBreaker breaker = new ChildMemoryCircuitBreaker( - CircuitBreakerMetrics.NOOP.getParentTripCountTotal(), + CircuitBreakerMetrics.NOOP.getTripCount(), settings, logger, (HierarchyCircuitBreakerService) service, @@ -346,6 +346,7 @@ public void testParentTriggersG1GCBeforeBreaking() throws InterruptedException, AtomicReference> onOverLimit = new AtomicReference<>(leader -> {}); AtomicLong time = new AtomicLong(randomLongBetween(Long.MIN_VALUE / 2, Long.MAX_VALUE / 2)); long interval = randomLongBetween(1, 1000); + long fullGCInterval = randomLongBetween(500, 2000); final HierarchyCircuitBreakerService service = new HierarchyCircuitBreakerService( CircuitBreakerMetrics.NOOP, clusterSettings, @@ -357,6 +358,8 @@ public void testParentTriggersG1GCBeforeBreaking() throws InterruptedException, HierarchyCircuitBreakerService.createYoungGcCountSupplier(), time::get, interval, + fullGCInterval, + TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(30) ) { @@ -481,6 +484,7 @@ public void testG1OverLimitStrategyBreakOnMemory() { AtomicInteger leaderTriggerCount = new AtomicInteger(); AtomicInteger nonLeaderTriggerCount = new AtomicInteger(); long interval = randomLongBetween(1, 1000); + long fullGCInterval = randomLongBetween(500, 2000); AtomicLong memoryUsage = new AtomicLong(); HierarchyCircuitBreakerService.G1OverLimitStrategy strategy = new HierarchyCircuitBreakerService.G1OverLimitStrategy( @@ -489,6 +493,8 @@ public void testG1OverLimitStrategyBreakOnMemory() { () -> 0, time::get, interval, + fullGCInterval, + TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(30) ) { @Override @@ -535,6 +541,7 @@ public void testG1OverLimitStrategyBreakOnGcCount() { AtomicInteger leaderTriggerCount = new AtomicInteger(); AtomicInteger nonLeaderTriggerCount = new AtomicInteger(); long interval = randomLongBetween(1, 1000); + long fullGCInterval = randomLongBetween(500, 2000); AtomicLong memoryUsageCounter = new AtomicLong(); AtomicLong gcCounter = new AtomicLong(); LongSupplier memoryUsageSupplier = () -> { @@ -547,6 +554,8 @@ public void testG1OverLimitStrategyBreakOnGcCount() { gcCounter::incrementAndGet, time::get, interval, + fullGCInterval, + TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(30) ) { @@ -569,13 +578,15 @@ void overLimitTriggered(boolean leader) { assertThat(strategy.overLimit(input), sameInstance(input)); assertThat(leaderTriggerCount.get(), equalTo(1)); assertThat(gcCounter.get(), equalTo(2L)); - assertThat(memoryUsageCounter.get(), equalTo(2L)); // 1 before gc count break and 1 to get resulting memory usage. + // 1 before gc count break, 1 for full GC check and 1 to get resulting memory usage. + assertThat(memoryUsageCounter.get(), equalTo(3L)); } public void testG1OverLimitStrategyThrottling() throws InterruptedException, BrokenBarrierException, TimeoutException { AtomicLong time = new AtomicLong(randomLongBetween(Long.MIN_VALUE / 2, Long.MAX_VALUE / 2)); AtomicInteger leaderTriggerCount = new AtomicInteger(); long interval = randomLongBetween(1, 1000); + long fullGCInterval = randomLongBetween(500, 2000); AtomicLong memoryUsage = new AtomicLong(); HierarchyCircuitBreakerService.G1OverLimitStrategy strategy = new HierarchyCircuitBreakerService.G1OverLimitStrategy( JvmInfo.jvmInfo(), @@ -583,6 +594,8 @@ public void testG1OverLimitStrategyThrottling() throws InterruptedException, Bro () -> 0, time::get, interval, + fullGCInterval, + TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(30) ) { @@ -661,6 +674,8 @@ public void testG1LockTimeout() throws Exception { gcCounter::incrementAndGet, () -> 0, 1, + 1, + TimeValue.timeValueMillis(randomFrom(0, 5, 10)), TimeValue.timeValueMillis(randomFrom(0, 5, 10)) ) { diff --git a/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerTelemetryTests.java b/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerTelemetryTests.java index 961fe2dc15efe..2cbe1202520df 100644 --- a/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerTelemetryTests.java +++ b/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerTelemetryTests.java @@ -31,6 +31,7 @@ import java.util.function.Function; import java.util.stream.Stream; +import static org.elasticsearch.common.breaker.ChildMemoryCircuitBreaker.CIRCUIT_BREAKER_TYPE_ATTRIBUTE; import static org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING; import static org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING; import static org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING; @@ -49,53 +50,8 @@ protected Collection> nodePlugins() { public static class TestCircuitBreakerTelemetryPlugin extends TestTelemetryPlugin { protected final MeterRegistry meter = new RecordingMeterRegistry() { - private final LongCounter inFlightRequests = new RecordingInstruments.RecordingLongCounter( - CircuitBreakerMetrics.ES_BREAKER_PARENT_TRIP_COUNT_TOTAL, - recorder - ) { - @Override - public void incrementBy(long inc) { - throw new UnsupportedOperationException(); - } - - @Override - public void incrementBy(long inc, Map attributes) { - throw new UnsupportedOperationException(); - } - }; - - private final LongCounter fielddata = new RecordingInstruments.RecordingLongCounter( - CircuitBreakerMetrics.ES_BREAKER_FIELD_DATA_TRIP_COUNT_TOTAL, - recorder - ) { - @Override - public void incrementBy(long inc) { - throw new UnsupportedOperationException(); - } - - @Override - public void incrementBy(long inc, Map attributes) { - throw new UnsupportedOperationException(); - } - }; - - private final LongCounter request = new RecordingInstruments.RecordingLongCounter( - CircuitBreakerMetrics.ES_BREAKER_REQUEST_TRIP_COUNT_TOTAL, - recorder - ) { - @Override - public void incrementBy(long inc) { - throw new UnsupportedOperationException(); - } - - @Override - public void incrementBy(long inc, Map attributes) { - throw new UnsupportedOperationException(); - } - }; - - private final LongCounter parent = new RecordingInstruments.RecordingLongCounter( - CircuitBreakerMetrics.ES_BREAKER_PARENT_TRIP_COUNT_TOTAL, + private final LongCounter tripCount = new RecordingInstruments.RecordingLongCounter( + CircuitBreakerMetrics.ES_BREAKER_TRIP_COUNT_TOTAL, recorder ) { @Override @@ -111,14 +67,8 @@ public void incrementBy(long inc, Map attributes) { @Override protected LongCounter buildLongCounter(String name, String description, String unit) { - if (name.equals(inFlightRequests.getName())) { - return inFlightRequests; - } else if (name.equals(request.getName())) { - return request; - } else if (name.equals(fielddata.getName())) { - return fielddata; - } else if (name.equals(parent.getName())) { - return parent; + if (name.equals(tripCount.getName())) { + return tripCount; } throw new IllegalArgumentException("Unknown counter metric name [" + name + "]"); } @@ -136,15 +86,7 @@ public LongCounter getLongCounter(String name) { } private void assertCircuitBreakerName(final String name) { - assertThat( - name, - Matchers.oneOf( - CircuitBreakerMetrics.ES_BREAKER_FIELD_DATA_TRIP_COUNT_TOTAL, - CircuitBreakerMetrics.ES_BREAKER_IN_FLIGHT_REQUESTS_TRIP_COUNT_TOTAL, - CircuitBreakerMetrics.ES_BREAKER_PARENT_TRIP_COUNT_TOTAL, - CircuitBreakerMetrics.ES_BREAKER_REQUEST_TRIP_COUNT_TOTAL - ) - ); + assertThat(name, Matchers.oneOf(CircuitBreakerMetrics.ES_BREAKER_TRIP_COUNT_TOTAL)); } }; } @@ -193,6 +135,7 @@ public void testCircuitBreakerTripCountMetric() { final Measurement measurement = allMeasurements.get(0); assertThat(1L, Matchers.equalTo(measurement.getLong())); assertThat(1L, Matchers.equalTo(measurement.value())); + assertThat(Map.of(CIRCUIT_BREAKER_TYPE_ATTRIBUTE, "inflight_requests"), Matchers.equalTo(measurement.attributes())); assertThat(true, Matchers.equalTo(measurement.isLong())); return; } @@ -205,13 +148,9 @@ private List getMeasurements(String dataNodeName) { .toList() .get(0); return Measurement.combine( - Stream.of( - dataNodeTelemetryPlugin.getLongCounterMeasurement(CircuitBreakerMetrics.ES_BREAKER_IN_FLIGHT_REQUESTS_TRIP_COUNT_TOTAL) - .stream(), - dataNodeTelemetryPlugin.getLongCounterMeasurement(CircuitBreakerMetrics.ES_BREAKER_FIELD_DATA_TRIP_COUNT_TOTAL).stream(), - dataNodeTelemetryPlugin.getLongCounterMeasurement(CircuitBreakerMetrics.ES_BREAKER_REQUEST_TRIP_COUNT_TOTAL).stream(), - dataNodeTelemetryPlugin.getLongCounterMeasurement(CircuitBreakerMetrics.ES_BREAKER_PARENT_TRIP_COUNT_TOTAL).stream() - ).flatMap(Function.identity()).toList() + Stream.of(dataNodeTelemetryPlugin.getLongCounterMeasurement(CircuitBreakerMetrics.ES_BREAKER_TRIP_COUNT_TOTAL).stream()) + .flatMap(Function.identity()) + .toList() ); } diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index fd285ba8b239f..82fb694db6c66 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -784,10 +784,8 @@ void phase2( }; PlainActionFuture future = new PlainActionFuture<>(); - expectThrows(IndexShardRelocatedException.class, () -> { - handler.recoverToTarget(future); - future.actionGet(); - }); + handler.recoverToTarget(future); + expectThrows(IndexShardRelocatedException.class, future); assertFalse(phase1Called.get()); assertFalse(prepareTargetForTranslogCalled.get()); assertFalse(phase2Called.get()); diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestDocumentTests.java b/server/src/test/java/org/elasticsearch/ingest/IngestDocumentTests.java index 166b830d14301..71e90e8f4cc06 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestDocumentTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestDocumentTests.java @@ -339,7 +339,7 @@ public void testSimpleSetFieldValue() { } public void testSetFieldValueNullValue() { - ingestDocument.setFieldValue("new_field", null); + ingestDocument.setFieldValue("new_field", (Object) null); assertThat(ingestDocument.getSourceAndMetadata().containsKey("new_field"), equalTo(true)); assertThat(ingestDocument.getSourceAndMetadata().get("new_field"), nullValue()); } diff --git a/server/src/test/java/org/elasticsearch/ingest/ValueSourceTests.java b/server/src/test/java/org/elasticsearch/ingest/ValueSourceTests.java index 84b7db7301597..0f18d04d8ac9c 100644 --- a/server/src/test/java/org/elasticsearch/ingest/ValueSourceTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/ValueSourceTests.java @@ -45,7 +45,7 @@ public void testCopyDoesNotChangeProvidedMap() { IngestDocument ingestDocument = TestIngestDocument.emptyIngestDocument(); ingestDocument.setFieldValue( - new TestTemplateService.MockTemplateScript.Factory("field1"), + ingestDocument.renderTemplate(new TestTemplateService.MockTemplateScript.Factory("field1")), ValueSource.wrap(myPreciousMap, TestTemplateService.instance()) ); ingestDocument.removeField("field1.field2"); @@ -60,7 +60,7 @@ public void testCopyDoesNotChangeProvidedList() { IngestDocument ingestDocument = TestIngestDocument.emptyIngestDocument(); ingestDocument.setFieldValue( - new TestTemplateService.MockTemplateScript.Factory("field1"), + ingestDocument.renderTemplate(new TestTemplateService.MockTemplateScript.Factory("field1")), ValueSource.wrap(myPreciousList, TestTemplateService.instance()) ); ingestDocument.removeField("field1.0"); diff --git a/server/src/test/java/org/elasticsearch/plugins/ExtensionLoaderTests.java b/server/src/test/java/org/elasticsearch/plugins/ExtensionLoaderTests.java index 35f65ebedf9b9..b0415104f4422 100644 --- a/server/src/test/java/org/elasticsearch/plugins/ExtensionLoaderTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/ExtensionLoaderTests.java @@ -20,8 +20,10 @@ import java.util.HashMap; import java.util.Locale; import java.util.Map; +import java.util.Optional; import java.util.ServiceLoader; +import static org.elasticsearch.test.hamcrest.OptionalMatchers.isEmpty; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; @@ -68,21 +70,16 @@ public int getValue() { """, name, value); } - public void testNoProviderNullFallback() { - TestService service = ExtensionLoader.loadSingleton(ServiceLoader.load(TestService.class), () -> null); - assertThat(service, nullValue()); - } - public void testNoProvider() { - TestService service = ExtensionLoader.loadSingleton(ServiceLoader.load(TestService.class), () -> () -> 2); - assertThat(service, not(nullValue())); - assertThat(service.getValue(), equalTo(2)); + Optional service = ExtensionLoader.loadSingleton(ServiceLoader.load(TestService.class)); + assertThat(service, isEmpty()); } public void testOneProvider() throws Exception { Map sources = Map.of("p.FooService", defineProvider("FooService", 1)); try (var loader = buildProviderJar(sources)) { - TestService service = ExtensionLoader.loadSingleton(ServiceLoader.load(TestService.class, loader.classloader()), () -> null); + TestService service = ExtensionLoader.loadSingleton(ServiceLoader.load(TestService.class, loader.classloader())) + .orElseThrow(AssertionError::new); assertThat(service, not(nullValue())); assertThat(service.getValue(), equalTo(1)); } @@ -98,7 +95,7 @@ public void testManyProviders() throws Exception { try (var loader = buildProviderJar(sources)) { var e = expectThrows( IllegalStateException.class, - () -> ExtensionLoader.loadSingleton(ServiceLoader.load(TestService.class, loader.classloader()), () -> null) + () -> ExtensionLoader.loadSingleton(ServiceLoader.load(TestService.class, loader.classloader())) ); assertThat(e.getMessage(), containsString("More than one extension found")); assertThat(e.getMessage(), containsString("TestService")); diff --git a/server/src/test/java/org/elasticsearch/plugins/PluginDescriptorTests.java b/server/src/test/java/org/elasticsearch/plugins/PluginDescriptorTests.java index 000dc1a33ed91..7ed4d975fe3be 100644 --- a/server/src/test/java/org/elasticsearch/plugins/PluginDescriptorTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/PluginDescriptorTests.java @@ -24,6 +24,7 @@ import java.util.Map; import java.util.function.Consumer; +import static org.elasticsearch.test.hamcrest.OptionalMatchers.isEmpty; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; @@ -183,14 +184,14 @@ public void testReadFromPropertiesJvmMissingClassname() throws Exception { public void testReadFromPropertiesModulenameFallback() throws Exception { PluginDescriptor info = mockInternalDescriptor("modulename", null); - assertThat(info.getModuleName().isPresent(), is(false)); + assertThat(info.getModuleName(), isEmpty()); assertThat(info.isModular(), is(false)); assertThat(info.getExtendedPlugins(), empty()); } public void testReadFromPropertiesModulenameEmpty() throws Exception { PluginDescriptor info = mockInternalDescriptor("modulename", " "); - assertThat(info.getModuleName().isPresent(), is(false)); + assertThat(info.getModuleName(), isEmpty()); assertThat(info.isModular(), is(false)); assertThat(info.getExtendedPlugins(), empty()); } diff --git a/server/src/test/java/org/elasticsearch/repositories/IndexIdTests.java b/server/src/test/java/org/elasticsearch/repositories/IndexIdTests.java index 580fdaf6f7f7d..37ff5521c201f 100644 --- a/server/src/test/java/org/elasticsearch/repositories/IndexIdTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/IndexIdTests.java @@ -48,17 +48,20 @@ public void testXContent() throws IOException { IndexId indexId = new IndexId(randomAlphaOfLength(8), UUIDs.randomBase64UUID()); XContentBuilder builder = JsonXContent.contentBuilder(); indexId.toXContent(builder, ToXContent.EMPTY_PARAMS); - XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); - assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); - String name = null; - String id = null; - while (parser.nextToken() != XContentParser.Token.END_OBJECT) { - final String currentFieldName = parser.currentName(); - parser.nextToken(); - if (currentFieldName.equals(IndexId.NAME)) { - name = parser.text(); - } else if (currentFieldName.equals(IndexId.ID)) { - id = parser.text(); + String name; + String id; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder))) { + assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); + name = null; + id = null; + while (parser.nextToken() != XContentParser.Token.END_OBJECT) { + final String currentFieldName = parser.currentName(); + parser.nextToken(); + if (currentFieldName.equals(IndexId.NAME)) { + name = parser.text(); + } else if (currentFieldName.equals(IndexId.ID)) { + id = parser.text(); + } } } assertNotNull(name); diff --git a/server/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java b/server/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java index 0211397fdeee8..b5c6b28693b3a 100644 --- a/server/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java @@ -27,7 +27,6 @@ import java.util.HashMap; import java.util.List; import java.util.Set; -import java.util.concurrent.atomic.AtomicBoolean; import static org.hamcrest.core.StringContains.containsString; import static org.hamcrest.object.HasToString.hasToString; @@ -49,13 +48,13 @@ public void tearDown() throws Exception { threadPool.shutdown(); } - public void testOneUnconsumedParameters() throws Exception { - final AtomicBoolean executed = new AtomicBoolean(); + public void testOneUnconsumedParameters() { + final var restChannelConsumer = new TestRestChannelConsumer(); BaseRestHandler handler = new BaseRestHandler() { @Override - protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { request.param("consumed"); - return channel -> executed.set(true); + return restChannelConsumer; } @Override @@ -79,16 +78,17 @@ public List routes() { () -> handler.handleRequest(request, channel, mockClient) ); assertThat(e, hasToString(containsString("request [/] contains unrecognized parameter: [unconsumed]"))); - assertFalse(executed.get()); + assertFalse(restChannelConsumer.executed); + assertTrue(restChannelConsumer.closed); } - public void testMultipleUnconsumedParameters() throws Exception { - final AtomicBoolean executed = new AtomicBoolean(); + public void testMultipleUnconsumedParameters() { + final var restChannelConsumer = new TestRestChannelConsumer(); BaseRestHandler handler = new BaseRestHandler() { @Override - protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { request.param("consumed"); - return channel -> executed.set(true); + return restChannelConsumer; } @Override @@ -113,20 +113,21 @@ public List routes() { () -> handler.handleRequest(request, channel, mockClient) ); assertThat(e, hasToString(containsString("request [/] contains unrecognized parameters: [unconsumed-first], [unconsumed-second]"))); - assertFalse(executed.get()); + assertFalse(restChannelConsumer.executed); + assertTrue(restChannelConsumer.closed); } public void testUnconsumedParametersDidYouMean() throws Exception { - final AtomicBoolean executed = new AtomicBoolean(); + final var restChannelConsumer = new TestRestChannelConsumer(); BaseRestHandler handler = new BaseRestHandler() { @Override - protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { request.param("consumed"); request.param("field"); request.param("tokenizer"); request.param("very_close_to_parameter_1"); request.param("very_close_to_parameter_2"); - return channel -> executed.set(true); + return restChannelConsumer; } @Override @@ -171,16 +172,17 @@ public List routes() { ) ) ); - assertFalse(executed.get()); + assertFalse(restChannelConsumer.executed); + assertTrue(restChannelConsumer.closed); } public void testUnconsumedResponseParameters() throws Exception { - final AtomicBoolean executed = new AtomicBoolean(); + final var restChannelConsumer = new TestRestChannelConsumer(); BaseRestHandler handler = new BaseRestHandler() { @Override - protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { request.param("consumed"); - return channel -> executed.set(true); + return restChannelConsumer; } @Override @@ -205,15 +207,16 @@ public List routes() { RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams(params).build(); RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); handler.handleRequest(request, channel, mockClient); - assertTrue(executed.get()); + assertTrue(restChannelConsumer.executed); + assertTrue(restChannelConsumer.closed); } public void testDefaultResponseParameters() throws Exception { - final AtomicBoolean executed = new AtomicBoolean(); + final var restChannelConsumer = new TestRestChannelConsumer(); BaseRestHandler handler = new BaseRestHandler() { @Override - protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { - return channel -> executed.set(true); + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { + return restChannelConsumer; } @Override @@ -235,15 +238,16 @@ public List routes() { RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams(params).build(); RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); handler.handleRequest(request, channel, mockClient); - assertTrue(executed.get()); + assertTrue(restChannelConsumer.executed); + assertTrue(restChannelConsumer.closed); } public void testCatResponseParameters() throws Exception { - final AtomicBoolean executed = new AtomicBoolean(); + final var restChannelConsumer = new TestRestChannelConsumer(); AbstractCatAction handler = new AbstractCatAction() { @Override protected RestChannelConsumer doCatRequest(RestRequest request, NodeClient client) { - return channel -> executed.set(true); + return restChannelConsumer; } @Override @@ -279,16 +283,17 @@ public List routes() { RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams(params).build(); RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); handler.handleRequest(request, channel, mockClient); - assertTrue(executed.get()); + assertTrue(restChannelConsumer.executed); + assertTrue(restChannelConsumer.closed); } public void testConsumedBody() throws Exception { - final AtomicBoolean executed = new AtomicBoolean(); + final var restChannelConsumer = new TestRestChannelConsumer(); final BaseRestHandler handler = new BaseRestHandler() { @Override protected RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { request.content(); - return channel -> executed.set(true); + return restChannelConsumer; } @Override @@ -309,16 +314,17 @@ public List routes() { ).build(); final RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); handler.handleRequest(request, channel, mockClient); - assertTrue(executed.get()); + assertTrue(restChannelConsumer.executed); + assertTrue(restChannelConsumer.closed); } } public void testUnconsumedNoBody() throws Exception { - final AtomicBoolean executed = new AtomicBoolean(); + final var restChannelConsumer = new TestRestChannelConsumer(); final BaseRestHandler handler = new BaseRestHandler() { @Override protected RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - return channel -> executed.set(true); + return restChannelConsumer; } @Override @@ -335,15 +341,16 @@ public List routes() { final RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).build(); final RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); handler.handleRequest(request, channel, mockClient); - assertTrue(executed.get()); + assertTrue(restChannelConsumer.executed); + assertTrue(restChannelConsumer.closed); } public void testUnconsumedBody() throws IOException { - final AtomicBoolean executed = new AtomicBoolean(); + final var restChannelConsumer = new TestRestChannelConsumer(); final BaseRestHandler handler = new BaseRestHandler() { @Override protected RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - return channel -> executed.set(true); + return restChannelConsumer; } @Override @@ -368,7 +375,25 @@ public List routes() { () -> handler.handleRequest(request, channel, mockClient) ); assertThat(e, hasToString(containsString("request [GET /] does not support having a body"))); - assertFalse(executed.get()); + assertFalse(restChannelConsumer.executed); + assertTrue(restChannelConsumer.closed); + } + } + + private static class TestRestChannelConsumer implements BaseRestHandler.RestChannelConsumer { + boolean executed; + boolean closed; + + @Override + public void accept(RestChannel restChannel) { + assertFalse(executed); + executed = true; + } + + @Override + public void close() { + assertFalse(closed); + closed = true; } } diff --git a/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java b/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java index 718ba4a0f0e2f..00c65437579ec 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java @@ -473,11 +473,19 @@ public boolean supportsContentStream() { public void testDispatchWithContentStream() { final String mediaType = randomFrom("application/json", "application/smile"); - String content = randomAlphaOfLength((int) Math.round(BREAKER_LIMIT.getBytes() / inFlightRequestsBreaker.getOverhead())); final List contentTypeHeader = Collections.singletonList(mediaType); + XContentType contentType = RestRequest.parseContentType(contentTypeHeader); + byte[] content = randomByteArrayOfLength((int) Math.round(BREAKER_LIMIT.getBytes() / inFlightRequestsBreaker.getOverhead())); + if (contentType == XContentType.SMILE) { + // fake smile bytes to make parser happy + content[0] = (byte) ':'; + content[1] = (byte) ')'; + content[2] = (byte) '\n'; + content[3] = 0; + } RestRequest fakeRestRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withContent( new BytesArray(content), - RestRequest.parseContentType(contentTypeHeader) + contentType ).withPath("/foo").withHeaders(Collections.singletonMap("Content-Type", contentTypeHeader)).build(); if (randomBoolean()) { fakeRestRequest = new RestRequest(fakeRestRequest); @@ -818,12 +826,20 @@ public void testDispatchCompatibleRequestToNewlyAddedHandler() { } private FakeRestRequest requestWithContent(String mediaType) { - String content = randomAlphaOfLength((int) Math.round(BREAKER_LIMIT.getBytes() / inFlightRequestsBreaker.getOverhead())); + byte[] content = randomByteArrayOfLength((int) Math.round(BREAKER_LIMIT.getBytes() / inFlightRequestsBreaker.getOverhead())); final List mediaTypeList = Collections.singletonList(mediaType); - return new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withContent( - new BytesArray(content), - RestRequest.parseContentType(mediaTypeList) - ).withPath("/foo").withHeaders(Map.of("Content-Type", mediaTypeList, "Accept", mediaTypeList)).build(); + XContentType contentType = RestRequest.parseContentType(mediaTypeList); + if (contentType == XContentType.SMILE || contentType == XContentType.VND_SMILE) { + // fake smile bytes to make parser happy + content[0] = (byte) ':'; + content[1] = (byte) ')'; + content[2] = (byte) '\n'; + content[3] = 0; + } + return new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withContent(new BytesArray(content), contentType) + .withPath("/foo") + .withHeaders(Map.of("Content-Type", mediaTypeList, "Accept", mediaTypeList)) + .build(); } public void testCurrentVersionVNDMediaTypeIsNotUsingCompatibility() { diff --git a/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsActionTests.java index bb26a616e0998..331148e1a0023 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsActionTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.rest.action.admin.cluster; -import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsRequest; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; @@ -22,16 +21,16 @@ public void testParserWithPassword() throws Exception { final String request = """ {"secure_settings_password": "secure_settings_password_string"}"""; try (XContentParser parser = XContentType.JSON.xContent().createParser(XContentParserConfiguration.EMPTY, request)) { - NodesReloadSecureSettingsRequest reloadSecureSettingsRequest = RestReloadSecureSettingsAction.PARSER.parse(parser, null); - assertEquals("secure_settings_password_string", reloadSecureSettingsRequest.getSecureSettingsPassword().toString()); + RestReloadSecureSettingsAction.ParsedRequestBody parsedRequestBody = RestReloadSecureSettingsAction.PARSER.parse(parser, null); + assertEquals("secure_settings_password_string", parsedRequestBody.secureSettingsPassword.toString()); } } public void testParserWithoutPassword() throws Exception { final String request = "{}"; try (XContentParser parser = XContentType.JSON.xContent().createParser(XContentParserConfiguration.EMPTY, request)) { - NodesReloadSecureSettingsRequest reloadSecureSettingsRequest = RestReloadSecureSettingsAction.PARSER.parse(parser, null); - assertThat(reloadSecureSettingsRequest.getSecureSettingsPassword(), nullValue()); + RestReloadSecureSettingsAction.ParsedRequestBody parsedRequestBody = RestReloadSecureSettingsAction.PARSER.parse(parser, null); + assertThat(parsedRequestBody.secureSettingsPassword, nullValue()); } } } diff --git a/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java b/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java index a531a74d956ee..9aa358123d282 100644 --- a/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java +++ b/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java @@ -395,8 +395,6 @@ public void testClearQueryCancellationsOnClose() throws IOException { when(indexShard.getThreadPool()).thenReturn(threadPool); IndexService indexService = mock(IndexService.class); - MapperService mapperService = mock(MapperService.class); - when(indexService.mapperService()).thenReturn(mapperService); try (Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { diff --git a/server/src/test/java/org/elasticsearch/search/SearchHitTests.java b/server/src/test/java/org/elasticsearch/search/SearchHitTests.java index d08abe5065984..e9bf6f83f5bbc 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchHitTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchHitTests.java @@ -143,7 +143,7 @@ public static SearchHit createTestItem(XContentType xContentType, boolean withOp @Override protected Writeable.Reader instanceReader() { - return SearchHit::new; + return SearchHit::readFrom; } @Override diff --git a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java index aa787e6343654..bfb62e6fed197 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -311,48 +311,36 @@ public void testSearchWhileIndexDeleted() throws InterruptedException { IndexShard indexShard = indexService.getShard(0); AtomicBoolean running = new AtomicBoolean(true); CountDownLatch startGun = new CountDownLatch(1); - Semaphore semaphore = new Semaphore(Integer.MAX_VALUE); + final int permitCount = 100; + Semaphore semaphore = new Semaphore(permitCount); ShardRouting routing = TestShardRouting.newShardRouting( indexShard.shardId(), randomAlphaOfLength(5), randomBoolean(), ShardRoutingState.INITIALIZING ); - final Thread thread = new Thread() { - @Override - public void run() { - startGun.countDown(); - while (running.get()) { - if (randomBoolean()) { - service.afterIndexRemoved(indexService.index(), indexService.getIndexSettings(), DELETED); - } else { - service.beforeIndexShardCreated(routing, indexService.getIndexSettings().getSettings()); - } - if (randomBoolean()) { - // here we trigger some refreshes to ensure the IR go out of scope such that we hit ACE if we access a search - // context in a non-sane way. - try { - semaphore.acquire(); - } catch (InterruptedException e) { - throw new AssertionError(e); - } - prepareIndex("index").setSource("field", "value") - .setRefreshPolicy(randomFrom(WriteRequest.RefreshPolicy.values())) - .execute(new ActionListener() { - @Override - public void onResponse(DocWriteResponse indexResponse) { - semaphore.release(); - } - - @Override - public void onFailure(Exception e) { - semaphore.release(); - } - }); + final Thread thread = new Thread(() -> { + startGun.countDown(); + while (running.get()) { + if (randomBoolean()) { + service.afterIndexRemoved(indexService.index(), indexService.getIndexSettings(), DELETED); + } else { + service.beforeIndexShardCreated(routing, indexService.getIndexSettings().getSettings()); + } + if (randomBoolean()) { + // here we trigger some refreshes to ensure the IR go out of scope such that we hit ACE if we access a search + // context in a non-sane way. + try { + semaphore.acquire(); + } catch (InterruptedException e) { + throw new AssertionError(e); } + prepareIndex("index").setSource("field", "value") + .setRefreshPolicy(randomFrom(WriteRequest.RefreshPolicy.values())) + .execute(ActionListener.running(semaphore::release)); } } - }; + }); thread.start(); startGun.await(); try { @@ -417,7 +405,7 @@ public void onFailure(Exception e) { } finally { running.set(false); thread.join(); - semaphore.acquire(Integer.MAX_VALUE); + semaphore.acquire(permitCount); } assertEquals(0, service.getActiveContexts()); diff --git a/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/SearchTransportTelemetryTests.java b/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/SearchTransportTelemetryTests.java new file mode 100644 index 0000000000000..f435a9da382fb --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/SearchTransportTelemetryTests.java @@ -0,0 +1,137 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.TelemetryMetrics; + +import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.telemetry.Measurement; +import org.elasticsearch.telemetry.TestTelemetryPlugin; +import org.elasticsearch.test.ESIntegTestCase; + +import java.util.Collection; +import java.util.List; + +import static org.elasticsearch.action.search.SearchTransportAPMMetrics.DFS_ACTION_METRIC; +import static org.elasticsearch.action.search.SearchTransportAPMMetrics.FETCH_ID_ACTION_METRIC; +import static org.elasticsearch.action.search.SearchTransportAPMMetrics.FETCH_ID_SCROLL_ACTION_METRIC; +import static org.elasticsearch.action.search.SearchTransportAPMMetrics.FREE_CONTEXT_SCROLL_ACTION_METRIC; +import static org.elasticsearch.action.search.SearchTransportAPMMetrics.QUERY_ACTION_METRIC; +import static org.elasticsearch.action.search.SearchTransportAPMMetrics.QUERY_ID_ACTION_METRIC; +import static org.elasticsearch.action.search.SearchTransportAPMMetrics.QUERY_SCROLL_ACTION_METRIC; +import static org.elasticsearch.index.query.QueryBuilders.simpleQueryStringQuery; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertScrollResponsesAndHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHitsWithoutFailures; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 1) +public class SearchTransportTelemetryTests extends ESIntegTestCase { + @Override + protected Collection> nodePlugins() { + return List.of(TestTelemetryPlugin.class); + } + + @Override + protected int minimumNumberOfShards() { + return 2; + } + + @Override + protected int maximumNumberOfShards() { + return 7; + } + + @Override + protected int maximumNumberOfReplicas() { + return 0; + } + + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103781") + public void testSearchTransportMetricsDfsQueryThenFetch() throws InterruptedException { + var indexName = "test1"; + createIndex(indexName); + indexRandom(true, false, prepareIndex(indexName).setId("1").setSource("body", "foo")); + + assertSearchHitsWithoutFailures( + prepareSearch(indexName).setSearchType(SearchType.DFS_QUERY_THEN_FETCH).setQuery(simpleQueryStringQuery("foo")), + "1" + ); + assertEquals(getNumShards(indexName).numPrimaries, getNumberOfMeasurements(DFS_ACTION_METRIC)); + assertEquals(getNumShards(indexName).numPrimaries, getNumberOfMeasurements(QUERY_ID_ACTION_METRIC)); + assertNotEquals(0, getNumberOfMeasurements(FETCH_ID_ACTION_METRIC)); + resetMeter(); + } + + public void testSearchTransportMetricsQueryThenFetch() throws InterruptedException { + var indexName = "test2"; + createIndex(indexName); + indexRandom(true, false, prepareIndex(indexName).setId("1").setSource("body", "foo")); + + assertSearchHitsWithoutFailures( + prepareSearch(indexName).setSearchType(SearchType.QUERY_THEN_FETCH).setQuery(simpleQueryStringQuery("foo")), + "1" + ); + assertEquals(getNumShards(indexName).numPrimaries, getNumberOfMeasurements(QUERY_ACTION_METRIC)); + assertNotEquals(0, getNumberOfMeasurements(FETCH_ID_ACTION_METRIC)); + resetMeter(); + } + + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103810") + public void testSearchTransportMetricsScroll() throws InterruptedException { + var indexName = "test3"; + createIndex(indexName); + indexRandom( + true, + false, + prepareIndex(indexName).setId("1").setSource("body", "foo"), + prepareIndex(indexName).setId("2").setSource("body", "foo") + ); // getNumShards(indexName).numPrimaries + + assertScrollResponsesAndHitCount( + TimeValue.timeValueSeconds(60), + prepareSearch(indexName).setSearchType(SearchType.DFS_QUERY_THEN_FETCH).setSize(1).setQuery(simpleQueryStringQuery("foo")), + 2, + (respNum, response) -> { + if (respNum == 1) { + assertEquals(getNumShards(indexName).numPrimaries, getNumberOfMeasurements(DFS_ACTION_METRIC)); + assertEquals(getNumShards(indexName).numPrimaries, getNumberOfMeasurements(QUERY_ID_ACTION_METRIC)); + assertNotEquals(0, getNumberOfMeasurements(FETCH_ID_ACTION_METRIC)); + resetMeter(); + } else if (respNum == 2) { + assertEquals(getNumShards(indexName).numPrimaries, getNumberOfMeasurements(QUERY_SCROLL_ACTION_METRIC)); + assertNotEquals(0, getNumberOfMeasurements(FETCH_ID_SCROLL_ACTION_METRIC)); + } else { + resetMeter(); + } + } + ); + + assertEquals(getNumShards(indexName).numPrimaries, getNumberOfMeasurements(FREE_CONTEXT_SCROLL_ACTION_METRIC)); + resetMeter(); + } + + private void resetMeter() { + getTestTelemetryPlugin().resetMeter(); + } + + private TestTelemetryPlugin getTestTelemetryPlugin() { + return internalCluster().getDataNodeInstance(PluginsService.class).filterPlugins(TestTelemetryPlugin.class).toList().get(0); + } + + private long getNumberOfMeasurements(String attributeValue) { + final List measurements = getTestTelemetryPlugin().getLongHistogramMeasurement( + org.elasticsearch.action.search.SearchTransportAPMMetrics.SEARCH_ACTION_LATENCY_BASE_METRIC + ); + return measurements.stream() + .filter( + m -> m.attributes().get(org.elasticsearch.action.search.SearchTransportAPMMetrics.ACTION_ATTRIBUTE_NAME) == attributeValue + ) + .count(); + } +} diff --git a/server/src/test/java/org/elasticsearch/search/internal/ContextIndexSearcherTests.java b/server/src/test/java/org/elasticsearch/search/internal/ContextIndexSearcherTests.java index a4e52af5f43c2..d1a7e93efb075 100644 --- a/server/src/test/java/org/elasticsearch/search/internal/ContextIndexSearcherTests.java +++ b/server/src/test/java/org/elasticsearch/search/internal/ContextIndexSearcherTests.java @@ -40,6 +40,7 @@ import org.apache.lucene.search.KnnFloatVectorQuery; import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryVisitor; import org.apache.lucene.search.Scorable; @@ -515,6 +516,81 @@ public boolean isCacheable(LeafReaderContext ctx) { } } + public void testTimeoutOnRewriteStandalone() throws IOException { + try (Directory dir = newDirectory()) { + indexDocs(dir); + ThreadPoolExecutor executor = null; + try (DirectoryReader directoryReader = DirectoryReader.open(dir)) { + if (randomBoolean()) { + executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(randomIntBetween(2, 5)); + } + ContextIndexSearcher contextIndexSearcher = new ContextIndexSearcher( + directoryReader, + IndexSearcher.getDefaultSimilarity(), + IndexSearcher.getDefaultQueryCache(), + IndexSearcher.getDefaultQueryCachingPolicy(), + true, + executor, + executor == null ? -1 : executor.getMaximumPoolSize(), + 1 + ); + TestQuery testQuery = new TestQuery() { + @Override + public Query rewrite(IndexSearcher indexSearcher) { + contextIndexSearcher.throwTimeExceededException(); + assert false; + return null; + } + }; + Query rewrite = contextIndexSearcher.rewrite(testQuery); + assertThat(rewrite, instanceOf(MatchNoDocsQuery.class)); + assertEquals("MatchNoDocsQuery(\"rewrite timed out\")", rewrite.toString()); + assertTrue(contextIndexSearcher.timeExceeded()); + } finally { + if (executor != null) { + terminate(executor); + } + } + } + } + + public void testTimeoutOnRewriteDuringSearch() throws IOException { + try (Directory dir = newDirectory()) { + indexDocs(dir); + ThreadPoolExecutor executor = null; + try (DirectoryReader directoryReader = DirectoryReader.open(dir)) { + if (randomBoolean()) { + executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(randomIntBetween(2, 5)); + } + ContextIndexSearcher contextIndexSearcher = new ContextIndexSearcher( + directoryReader, + IndexSearcher.getDefaultSimilarity(), + IndexSearcher.getDefaultQueryCache(), + IndexSearcher.getDefaultQueryCachingPolicy(), + true, + executor, + executor == null ? -1 : executor.getMaximumPoolSize(), + 1 + ); + TestQuery testQuery = new TestQuery() { + @Override + public Query rewrite(IndexSearcher indexSearcher) { + contextIndexSearcher.throwTimeExceededException(); + assert false; + return null; + } + }; + Integer hitCount = contextIndexSearcher.search(testQuery, new TotalHitCountCollectorManager()); + assertEquals(0, hitCount.intValue()); + assertTrue(contextIndexSearcher.timeExceeded()); + } finally { + if (executor != null) { + terminate(executor); + } + } + } + } + private static class TestQuery extends Query { @Override public String toString(String field) { diff --git a/server/src/test/java/org/elasticsearch/search/query/ThrowingQueryBuilder.java b/server/src/test/java/org/elasticsearch/search/query/ThrowingQueryBuilder.java index cd3d195030c55..f42ca49dc14b9 100644 --- a/server/src/test/java/org/elasticsearch/search/query/ThrowingQueryBuilder.java +++ b/server/src/test/java/org/elasticsearch/search/query/ThrowingQueryBuilder.java @@ -67,7 +67,7 @@ public ThrowingQueryBuilder(StreamInput in) throws IOException { this.randomUID = in.readLong(); this.failure = in.readException(); this.shardId = in.readVInt(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_040)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { this.index = in.readOptionalString(); } else { this.index = null; @@ -79,7 +79,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeLong(randomUID); out.writeException(failure); out.writeVInt(shardId); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_040)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { out.writeOptionalString(index); } } diff --git a/server/src/test/java/org/elasticsearch/search/suggest/completion/QueryContextTestCase.java b/server/src/test/java/org/elasticsearch/search/suggest/completion/QueryContextTestCase.java index 48711a665c39e..8395fcce918d9 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/completion/QueryContextTestCase.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/completion/QueryContextTestCase.java @@ -34,11 +34,12 @@ public void testToXContext() throws IOException { QC toXContent = createTestModel(); XContentBuilder builder = XContentFactory.jsonBuilder(); toXContent.toXContent(builder, ToXContent.EMPTY_PARAMS); - XContentParser parser = createParser(builder); - parser.nextToken(); - QC fromXContext = fromXContent(parser); - assertEquals(toXContent, fromXContext); - assertEquals(toXContent.hashCode(), fromXContext.hashCode()); + try (XContentParser parser = createParser(builder)) { + parser.nextToken(); + QC fromXContext = fromXContent(parser); + assertEquals(toXContent, fromXContext); + assertEquals(toXContent.hashCode(), fromXContext.hashCode()); + } } } } diff --git a/server/src/test/java/org/elasticsearch/search/vectors/TestQueryVectorBuilderPlugin.java b/server/src/test/java/org/elasticsearch/search/vectors/TestQueryVectorBuilderPlugin.java index a782560c672e9..03ea135625458 100644 --- a/server/src/test/java/org/elasticsearch/search/vectors/TestQueryVectorBuilderPlugin.java +++ b/server/src/test/java/org/elasticsearch/search/vectors/TestQueryVectorBuilderPlugin.java @@ -108,7 +108,7 @@ public int hashCode() { @Override public List> getQueryVectorBuilders() { return List.of( - new QueryVectorBuilderSpec<>(TestQueryVectorBuilder.NAME, TestQueryVectorBuilder::new, TestQueryVectorBuilder.PARSER::apply) + new QueryVectorBuilderSpec<>(TestQueryVectorBuilder.NAME, TestQueryVectorBuilder::new, TestQueryVectorBuilder.PARSER) ); } } diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index ad09c58b65cba..1df74c787eec4 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -42,11 +42,8 @@ import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.delete.TransportDeleteIndexAction; -import org.elasticsearch.action.admin.indices.mapping.put.AutoPutMappingAction; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; import org.elasticsearch.action.admin.indices.mapping.put.TransportAutoPutMappingAction; import org.elasticsearch.action.admin.indices.mapping.put.TransportPutMappingAction; -import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresAction; import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction; import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.bulk.BulkRequest; @@ -59,6 +56,7 @@ import org.elasticsearch.action.search.SearchPhaseController; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchTransportAPMMetrics; import org.elasticsearch.action.search.SearchTransportService; import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.ActionFilters; @@ -175,6 +173,7 @@ import org.elasticsearch.search.SearchService; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.fetch.FetchPhase; +import org.elasticsearch.telemetry.TelemetryProvider; import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -1979,7 +1978,7 @@ protected void assertSnapshotOrGenericThread() { threadPool ); actions.put( - PutMappingAction.INSTANCE, + TransportPutMappingAction.TYPE, new TransportPutMappingAction( transportService, clusterService, @@ -1992,7 +1991,7 @@ protected void assertSnapshotOrGenericThread() { ) ); actions.put( - AutoPutMappingAction.INSTANCE, + TransportAutoPutMappingAction.TYPE, new TransportAutoPutMappingAction( transportService, clusterService, @@ -2018,7 +2017,8 @@ protected void assertSnapshotOrGenericThread() { actionFilters, indexNameExpressionResolver, namedWriteableRegistry, - EmptySystemIndices.INSTANCE.getExecutorSelector() + EmptySystemIndices.INSTANCE.getExecutorSelector(), + new SearchTransportAPMMetrics(TelemetryProvider.NOOP.getMeterRegistry()) ) ); actions.put( @@ -2099,7 +2099,7 @@ protected void assertSnapshotOrGenericThread() { ) ); actions.put( - IndicesShardStoresAction.INSTANCE, + TransportIndicesShardStoresAction.TYPE, new TransportIndicesShardStoresAction( transportService, clusterService, diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java index 893242ccaa308..873a35aa49107 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java @@ -8,13 +8,19 @@ package org.elasticsearch.snapshots; +import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState.Custom; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.SnapshotsInProgress.Entry; import org.elasticsearch.cluster.SnapshotsInProgress.ShardState; import org.elasticsearch.cluster.SnapshotsInProgress.State; +import org.elasticsearch.cluster.metadata.NodesShutdownMetadata; +import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; @@ -38,6 +44,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -46,6 +53,10 @@ public class SnapshotsInProgressSerializationTests extends SimpleDiffableWireSerializationTestCase { + public static final ClusterState CLUSTER_STATE_FOR_NODE_SHUTDOWNS = ClusterState.builder(ClusterName.DEFAULT) + .putCompatibilityVersions("local", new CompatibilityVersions(TransportVersion.current(), Map.of())) + .build(); + @Override protected Custom createTestInstance() { int numberOfSnapshots = randomInt(10); @@ -53,9 +64,39 @@ protected Custom createTestInstance() { for (int i = 0; i < numberOfSnapshots; i++) { snapshotsInProgress = snapshotsInProgress.withAddedEntry(randomSnapshot()); } + + final var nodeIdsForRemoval = randomList(3, ESTestCase::randomUUID); + if (nodeIdsForRemoval.isEmpty() == false) { + snapshotsInProgress = snapshotsInProgress.withUpdatedNodeIdsForRemoval( + getClusterStateWithNodeShutdownMetadata(nodeIdsForRemoval) + ); + } + return snapshotsInProgress; } + private ClusterState getClusterStateWithNodeShutdownMetadata(List nodeIdsForRemoval) { + return CLUSTER_STATE_FOR_NODE_SHUTDOWNS.copyAndUpdateMetadata( + mdb -> mdb.putCustom( + NodesShutdownMetadata.TYPE, + new NodesShutdownMetadata( + nodeIdsForRemoval.stream() + .collect( + Collectors.toMap( + Function.identity(), + nodeId -> SingleNodeShutdownMetadata.builder() + .setType(SingleNodeShutdownMetadata.Type.REMOVE) + .setNodeId(nodeId) + .setStartedAtMillis(randomNonNegativeLong()) + .setReason(getTestName()) + .build() + ) + ) + ) + ) + ); + } + private Entry randomSnapshot() { Snapshot snapshot = new Snapshot("repo-" + randomInt(5), new SnapshotId(randomAlphaOfLength(10), randomAlphaOfLength(10))); boolean includeGlobalState = randomBoolean(); @@ -170,20 +211,30 @@ protected NamedWriteableRegistry getNamedWriteableRegistry() { @Override protected Custom mutateInstance(Custom instance) { final SnapshotsInProgress snapshotsInProgress = (SnapshotsInProgress) instance; - if (snapshotsInProgress.isEmpty()) { - // add or remove an entry - return snapshotsInProgress.withAddedEntry(randomSnapshot()); + if (randomBoolean()) { + if (snapshotsInProgress.isEmpty()) { + // add an entry + return snapshotsInProgress.withAddedEntry(randomSnapshot()); + } else { + // mutate or remove an entry + final String repo = randomFrom( + snapshotsInProgress.asStream().map(SnapshotsInProgress.Entry::repository).collect(Collectors.toSet()) + ); + final List forRepo = snapshotsInProgress.forRepo(repo); + int index = randomIntBetween(0, forRepo.size() - 1); + Entry entry = forRepo.get(index); + final List updatedEntries = new ArrayList<>(forRepo); + if (randomBoolean()) { + updatedEntries.set(index, mutateEntry(entry)); + } else { + updatedEntries.remove(index); + } + return snapshotsInProgress.withUpdatedEntriesForRepo(repo, updatedEntries); + } } else { - // mutate an entry - final String repo = randomFrom( - snapshotsInProgress.asStream().map(SnapshotsInProgress.Entry::repository).collect(Collectors.toSet()) + return snapshotsInProgress.withUpdatedNodeIdsForRemoval( + getClusterStateWithNodeShutdownMetadata(randomList(1, 3, ESTestCase::randomUUID)) ); - final List forRepo = snapshotsInProgress.forRepo(repo); - int index = randomIntBetween(0, forRepo.size() - 1); - Entry entry = forRepo.get(index); - final List updatedEntries = new ArrayList<>(forRepo); - updatedEntries.set(index, mutateEntry(entry)); - return snapshotsInProgress.withUpdatedEntriesForRepo(repo, updatedEntries); } } @@ -414,9 +465,27 @@ public void testXContent() throws IOException { null, IndexVersion.current() ) - ); + ) + .withUpdatedNodeIdsForRemoval( + CLUSTER_STATE_FOR_NODE_SHUTDOWNS.copyAndUpdateMetadata( + b -> b.putCustom( + NodesShutdownMetadata.TYPE, + new NodesShutdownMetadata( + Map.of( + "node-id", + SingleNodeShutdownMetadata.builder() + .setNodeId("node-id") + .setType(SingleNodeShutdownMetadata.Type.REMOVE) + .setStartedAtMillis(randomNonNegativeLong()) + .setReason("test") + .build() + ) + ) + ) + ) + ); - AbstractChunkedSerializingTestCase.assertChunkCount(sip, instance -> Math.toIntExact(instance.asStream().count() + 2)); + AbstractChunkedSerializingTestCase.assertChunkCount(sip, instance -> Math.toIntExact(instance.asStream().count() + 5)); final var json = Strings.toString(sip, false, true); assertThat( json, @@ -467,7 +536,8 @@ public void testXContent() throws IOException { "feature_states": [], "data_streams": [] } - ] + ], + "node_ids_for_removal":["node-id"] }""")), // or the shards might be in the other order: equalTo(XContentHelper.stripWhitespace(""" @@ -516,7 +586,8 @@ public void testXContent() throws IOException { "feature_states": [], "data_streams": [] } - ] + ], + "node_ids_for_removal":["node-id"] }""")) ) ); @@ -532,6 +603,6 @@ public static State randomState(Map st.completed() || st == ShardState.ABORTED)) { return State.ABORTED; } - return randomFrom(State.STARTED, State.INIT); + return State.STARTED; } } diff --git a/server/src/test/java/org/elasticsearch/transport/InboundAggregatorTests.java b/server/src/test/java/org/elasticsearch/transport/InboundAggregatorTests.java index fcea966832553..150345297a99f 100644 --- a/server/src/test/java/org/elasticsearch/transport/InboundAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/transport/InboundAggregatorTests.java @@ -25,6 +25,7 @@ import java.util.Collections; import java.util.function.Predicate; +import static org.elasticsearch.common.bytes.ReleasableBytesReferenceStreamInputTests.wrapAsReleasable; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.notNullValue; @@ -63,20 +64,20 @@ public void testInboundAggregation() throws IOException { BytesArray bytes = new BytesArray(randomByteArrayOfLength(10)); ArrayList references = new ArrayList<>(); if (randomBoolean()) { - final ReleasableBytesReference content = ReleasableBytesReference.wrap(bytes); + final ReleasableBytesReference content = wrapAsReleasable(bytes); references.add(content); aggregator.aggregate(content); content.close(); } else { - final ReleasableBytesReference content1 = ReleasableBytesReference.wrap(bytes.slice(0, 3)); + final ReleasableBytesReference content1 = wrapAsReleasable(bytes.slice(0, 3)); references.add(content1); aggregator.aggregate(content1); content1.close(); - final ReleasableBytesReference content2 = ReleasableBytesReference.wrap(bytes.slice(3, 3)); + final ReleasableBytesReference content2 = wrapAsReleasable(bytes.slice(3, 3)); references.add(content2); aggregator.aggregate(content2); content2.close(); - final ReleasableBytesReference content3 = ReleasableBytesReference.wrap(bytes.slice(6, 4)); + final ReleasableBytesReference content3 = wrapAsReleasable(bytes.slice(6, 4)); references.add(content3); aggregator.aggregate(content3); content3.close(); @@ -108,7 +109,7 @@ public void testInboundUnknownAction() throws IOException { aggregator.headerReceived(header); BytesArray bytes = new BytesArray(randomByteArrayOfLength(10)); - final ReleasableBytesReference content = ReleasableBytesReference.wrap(bytes); + final ReleasableBytesReference content = wrapAsReleasable(bytes); aggregator.aggregate(content); content.close(); assertFalse(content.hasReferences()); @@ -137,7 +138,7 @@ public void testCircuitBreak() throws IOException { aggregator.headerReceived(breakableHeader); BytesArray bytes = new BytesArray(randomByteArrayOfLength(10)); - final ReleasableBytesReference content1 = ReleasableBytesReference.wrap(bytes); + final ReleasableBytesReference content1 = wrapAsReleasable(bytes); aggregator.aggregate(content1); content1.close(); @@ -161,7 +162,7 @@ public void testCircuitBreak() throws IOException { // Initiate Message aggregator.headerReceived(unbreakableHeader); - final ReleasableBytesReference content2 = ReleasableBytesReference.wrap(bytes); + final ReleasableBytesReference content2 = wrapAsReleasable(bytes); aggregator.aggregate(content2); content2.close(); @@ -180,7 +181,7 @@ public void testCircuitBreak() throws IOException { // Initiate Message aggregator.headerReceived(handshakeHeader); - final ReleasableBytesReference content3 = ReleasableBytesReference.wrap(bytes); + final ReleasableBytesReference content3 = wrapAsReleasable(bytes); aggregator.aggregate(content3); content3.close(); @@ -203,16 +204,16 @@ public void testCloseWillCloseContent() { BytesArray bytes = new BytesArray(randomByteArrayOfLength(10)); ArrayList references = new ArrayList<>(); if (randomBoolean()) { - final ReleasableBytesReference content = ReleasableBytesReference.wrap(bytes); + final ReleasableBytesReference content = wrapAsReleasable(bytes); references.add(content); aggregator.aggregate(content); content.close(); } else { - final ReleasableBytesReference content1 = ReleasableBytesReference.wrap(bytes.slice(0, 5)); + final ReleasableBytesReference content1 = wrapAsReleasable(bytes.slice(0, 5)); references.add(content1); aggregator.aggregate(content1); content1.close(); - final ReleasableBytesReference content2 = ReleasableBytesReference.wrap(bytes.slice(5, 5)); + final ReleasableBytesReference content2 = wrapAsReleasable(bytes.slice(5, 5)); references.add(content2); aggregator.aggregate(content2); content2.close(); @@ -243,7 +244,7 @@ public void testFinishAggregationWillFinishHeader() throws IOException { streamOutput.writeString(actionName); streamOutput.write(randomByteArrayOfLength(10)); - final ReleasableBytesReference content = ReleasableBytesReference.wrap(streamOutput.bytes()); + final ReleasableBytesReference content = wrapAsReleasable(streamOutput.bytes()); aggregator.aggregate(content); content.close(); diff --git a/server/src/test/java/org/elasticsearch/transport/InboundDecoderTests.java b/server/src/test/java/org/elasticsearch/transport/InboundDecoderTests.java index f243a894a8f17..ffcd3a3386f1a 100644 --- a/server/src/test/java/org/elasticsearch/transport/InboundDecoderTests.java +++ b/server/src/test/java/org/elasticsearch/transport/InboundDecoderTests.java @@ -24,6 +24,7 @@ import java.io.IOException; import java.util.ArrayList; +import static org.elasticsearch.common.bytes.ReleasableBytesReferenceStreamInputTests.wrapAsReleasable; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.hasItems; import static org.hamcrest.Matchers.instanceOf; @@ -82,7 +83,7 @@ public void testDecode() throws IOException { InboundDecoder decoder = new InboundDecoder(recycler); final ArrayList fragments = new ArrayList<>(); - final ReleasableBytesReference releasable1 = ReleasableBytesReference.wrap(totalBytes); + final ReleasableBytesReference releasable1 = wrapAsReleasable(totalBytes); int bytesConsumed = decoder.decode(releasable1, fragments::add); assertEquals(totalHeaderSize, bytesConsumed); assertTrue(releasable1.hasReferences()); @@ -104,7 +105,7 @@ public void testDecode() throws IOException { fragments.clear(); final BytesReference bytes2 = totalBytes.slice(bytesConsumed, totalBytes.length() - bytesConsumed); - final ReleasableBytesReference releasable2 = ReleasableBytesReference.wrap(bytes2); + final ReleasableBytesReference releasable2 = wrapAsReleasable(bytes2); int bytesConsumed2 = decoder.decode(releasable2, fragments::add); assertEquals(totalBytes.length() - totalHeaderSize, bytesConsumed2); @@ -146,7 +147,7 @@ public void testDecodePreHeaderSizeVariableInt() throws IOException { InboundDecoder decoder = new InboundDecoder(recycler); final ArrayList fragments = new ArrayList<>(); - final ReleasableBytesReference releasable1 = ReleasableBytesReference.wrap(totalBytes); + final ReleasableBytesReference releasable1 = wrapAsReleasable(totalBytes); int bytesConsumed = decoder.decode(releasable1, fragments::add); assertEquals(partialHeaderSize, bytesConsumed); assertTrue(releasable1.hasReferences()); @@ -165,7 +166,7 @@ public void testDecodePreHeaderSizeVariableInt() throws IOException { fragments.clear(); final BytesReference bytes2 = totalBytes.slice(bytesConsumed, totalBytes.length() - bytesConsumed); - final ReleasableBytesReference releasable2 = ReleasableBytesReference.wrap(bytes2); + final ReleasableBytesReference releasable2 = wrapAsReleasable(bytes2); int bytesConsumed2 = decoder.decode(releasable2, fragments::add); if (compressionScheme == null) { assertEquals(2, fragments.size()); @@ -203,7 +204,7 @@ public void testDecodeHandshakeCompatibility() throws IOException { InboundDecoder decoder = new InboundDecoder(recycler); final ArrayList fragments = new ArrayList<>(); - final ReleasableBytesReference releasable1 = ReleasableBytesReference.wrap(bytes); + final ReleasableBytesReference releasable1 = wrapAsReleasable(bytes); int bytesConsumed = decoder.decode(releasable1, fragments::add); assertEquals(totalHeaderSize, bytesConsumed); assertTrue(releasable1.hasReferences()); @@ -249,14 +250,14 @@ public void testClientChannelTypeFailsDecodingRequests() throws Exception { try (InboundDecoder clientDecoder = new InboundDecoder(recycler, ChannelType.CLIENT)) { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> clientDecoder.decode(ReleasableBytesReference.wrap(bytes), ignored -> {}) + () -> clientDecoder.decode(wrapAsReleasable(bytes), ignored -> {}) ); assertThat(e.getMessage(), containsString("client channels do not accept inbound requests, only responses")); } // the same message will be decoded by a server or mixed decoder try (InboundDecoder decoder = new InboundDecoder(recycler, randomFrom(ChannelType.SERVER, ChannelType.MIX))) { final ArrayList fragments = new ArrayList<>(); - int bytesConsumed = decoder.decode(ReleasableBytesReference.wrap(bytes), fragments::add); + int bytesConsumed = decoder.decode(wrapAsReleasable(bytes), fragments::add); int totalHeaderSize = TcpHeader.headerSize(TransportVersion.current()) + bytes.getInt( TcpHeader.VARIABLE_HEADER_SIZE_POSITION ); @@ -291,14 +292,14 @@ public void testServerChannelTypeFailsDecodingResponses() throws Exception { try (RecyclerBytesStreamOutput os = new RecyclerBytesStreamOutput(recycler)) { final BytesReference bytes = message.serialize(os); try (InboundDecoder decoder = new InboundDecoder(recycler, ChannelType.SERVER)) { - final ReleasableBytesReference releasable1 = ReleasableBytesReference.wrap(bytes); + final ReleasableBytesReference releasable1 = wrapAsReleasable(bytes); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> decoder.decode(releasable1, ignored -> {})); assertThat(e.getMessage(), containsString("server channels do not accept inbound responses, only requests")); } // the same message will be decoded by a client or mixed decoder try (InboundDecoder decoder = new InboundDecoder(recycler, randomFrom(ChannelType.CLIENT, ChannelType.MIX))) { final ArrayList fragments = new ArrayList<>(); - int bytesConsumed = decoder.decode(ReleasableBytesReference.wrap(bytes), fragments::add); + int bytesConsumed = decoder.decode(wrapAsReleasable(bytes), fragments::add); int totalHeaderSize = TcpHeader.headerSize(TransportVersion.current()) + bytes.getInt( TcpHeader.VARIABLE_HEADER_SIZE_POSITION ); @@ -350,7 +351,7 @@ public void testCompressedDecode() throws IOException { InboundDecoder decoder = new InboundDecoder(recycler); final ArrayList fragments = new ArrayList<>(); - final ReleasableBytesReference releasable1 = ReleasableBytesReference.wrap(totalBytes); + final ReleasableBytesReference releasable1 = wrapAsReleasable(totalBytes); int bytesConsumed = decoder.decode(releasable1, fragments::add); assertEquals(totalHeaderSize, bytesConsumed); assertTrue(releasable1.hasReferences()); @@ -372,7 +373,7 @@ public void testCompressedDecode() throws IOException { fragments.clear(); final BytesReference bytes2 = totalBytes.slice(bytesConsumed, totalBytes.length() - bytesConsumed); - final ReleasableBytesReference releasable2 = ReleasableBytesReference.wrap(bytes2); + final ReleasableBytesReference releasable2 = wrapAsReleasable(bytes2); int bytesConsumed2 = decoder.decode(releasable2, fragments::add); assertEquals(totalBytes.length() - totalHeaderSize, bytesConsumed2); @@ -414,7 +415,7 @@ public void testCompressedDecodeHandshakeCompatibility() throws IOException { InboundDecoder decoder = new InboundDecoder(recycler); final ArrayList fragments = new ArrayList<>(); - final ReleasableBytesReference releasable1 = ReleasableBytesReference.wrap(bytes); + final ReleasableBytesReference releasable1 = wrapAsReleasable(bytes); int bytesConsumed = decoder.decode(releasable1, fragments::add); assertEquals(totalHeaderSize, bytesConsumed); assertTrue(releasable1.hasReferences()); @@ -451,7 +452,7 @@ public void testVersionIncompatibilityDecodeException() throws IOException { InboundDecoder decoder = new InboundDecoder(recycler); final ArrayList fragments = new ArrayList<>(); - try (ReleasableBytesReference r = ReleasableBytesReference.wrap(bytes)) { + try (ReleasableBytesReference r = wrapAsReleasable(bytes)) { releasable1 = r; expectThrows(IllegalStateException.class, () -> decoder.decode(releasable1, fragments::add)); } diff --git a/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java b/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java index 6ace25021348c..39d5d768f81ab 100644 --- a/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java @@ -198,7 +198,6 @@ public TestResponse read(StreamInput in) throws IOException { TransportChannel transportChannel = channelCaptor.get(); assertEquals(TransportVersion.current(), transportChannel.getVersion()); - assertEquals("transport", transportChannel.getChannelType()); assertEquals(requestValue, requestCaptor.get().value); String responseValue = randomAlphaOfLength(10); diff --git a/server/src/test/java/org/elasticsearch/transport/ProxyConnectionStrategyTests.java b/server/src/test/java/org/elasticsearch/transport/ProxyConnectionStrategyTests.java index ead43d0bac05e..b3c7c5adac95d 100644 --- a/server/src/test/java/org/elasticsearch/transport/ProxyConnectionStrategyTests.java +++ b/server/src/test/java/org/elasticsearch/transport/ProxyConnectionStrategyTests.java @@ -130,7 +130,11 @@ public void testProxyStrategyWillOpenExpectedNumberOfConnectionsToAddress() { ); int numOfConnections = randomIntBetween(4, 8); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); ProxyConnectionStrategy strategy = new ProxyConnectionStrategy( clusterAlias, localService, @@ -188,7 +192,11 @@ public void testProxyStrategyWillOpenNewConnectionsOnDisconnect() throws Excepti AtomicBoolean useAddress1 = new AtomicBoolean(true); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); ProxyConnectionStrategy strategy = new ProxyConnectionStrategy( clusterAlias, localService, @@ -263,7 +271,11 @@ public void testConnectFailsWithIncompatibleNodes() { ); int numOfConnections = randomIntBetween(4, 8); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); ProxyConnectionStrategy strategy = new ProxyConnectionStrategy( clusterAlias, localService, @@ -328,7 +340,11 @@ public void testConnectFailsWithNonRetryableException() { ); int numOfConnections = randomIntBetween(4, 8); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); ProxyConnectionStrategy strategy = new ProxyConnectionStrategy( clusterAlias, localService, @@ -388,7 +404,11 @@ public void testClusterNameValidationPreventConnectingToDifferentClusters() thro AtomicBoolean useAddress1 = new AtomicBoolean(true); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); ProxyConnectionStrategy strategy = new ProxyConnectionStrategy( clusterAlias, localService, @@ -459,7 +479,11 @@ public void testProxyStrategyWillResolveAddressesEachConnect() throws Exception ); int numOfConnections = randomIntBetween(4, 8); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); ProxyConnectionStrategy strategy = new ProxyConnectionStrategy( clusterAlias, localService, @@ -511,7 +535,11 @@ public void onNodeConnected(DiscoveryNode node, Transport.Connection connection) }); try ( - var remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + var remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); var strategy = new ProxyConnectionStrategy( clusterAlias, localService, @@ -554,7 +582,11 @@ public void testProxyStrategyWillNeedToBeRebuiltIfNumOfSocketsOrAddressesOrServe ); int numOfConnections = randomIntBetween(4, 8); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); ProxyConnectionStrategy strategy = new ProxyConnectionStrategy( clusterAlias, localService, @@ -672,7 +704,11 @@ public void testServerNameAttributes() { ); int numOfConnections = randomIntBetween(4, 8); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); ProxyConnectionStrategy strategy = new ProxyConnectionStrategy( clusterAlias, localService, diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java index d4f03f1027838..947b894124137 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java @@ -45,7 +45,6 @@ import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.TestThreadPool; @@ -62,6 +61,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.Objects; import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; @@ -153,19 +153,16 @@ public static MockTransportService startTransport( 1F ); } else { - searchHits = new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), Float.NaN); + searchHits = SearchHits.empty(new TotalHits(0, TotalHits.Relation.EQUAL_TO), Float.NaN); } - InternalSearchResponse response = new InternalSearchResponse( + SearchResponse searchResponse = new SearchResponse( searchHits, InternalAggregations.EMPTY, null, - null, false, null, - 1 - ); - SearchResponse searchResponse = new SearchResponse( - response, + null, + 1, null, 1, 1, @@ -252,7 +249,14 @@ public void run() { AtomicReference exceptionReference = new AtomicReference<>(); String clusterAlias = "test-cluster"; Settings settings = buildRandomSettings(clusterAlias, addresses(seedNode)); - try (RemoteClusterConnection connection = new RemoteClusterConnection(settings, clusterAlias, service, randomBoolean())) { + try ( + RemoteClusterConnection connection = new RemoteClusterConnection( + settings, + clusterAlias, + service, + randomFrom(RemoteClusterCredentialsManager.EMPTY, buildCredentialsManager(clusterAlias)) + ) + ) { ActionListener listener = ActionListener.wrap(x -> { listenerCalled.countDown(); fail("expected exception"); @@ -322,7 +326,14 @@ public void testCloseWhileConcurrentlyConnecting() throws IOException, Interrupt service.acceptIncomingRequests(); String clusterAlias = "test-cluster"; Settings settings = buildRandomSettings(clusterAlias, seedNodes); - try (RemoteClusterConnection connection = new RemoteClusterConnection(settings, clusterAlias, service, false)) { + try ( + RemoteClusterConnection connection = new RemoteClusterConnection( + settings, + clusterAlias, + service, + RemoteClusterCredentialsManager.EMPTY + ) + ) { int numThreads = randomIntBetween(4, 10); Thread[] threads = new Thread[numThreads]; CyclicBarrier barrier = new CyclicBarrier(numThreads + 1); @@ -470,7 +481,12 @@ private void doTestGetConnectionInfo(boolean hasClusterCredentials) throws Excep settings = Settings.builder().put(settings).setSecureSettings(secureSettings).build(); } try ( - RemoteClusterConnection connection = new RemoteClusterConnection(settings, clusterAlias, service, hasClusterCredentials) + RemoteClusterConnection connection = new RemoteClusterConnection( + settings, + clusterAlias, + service, + hasClusterCredentials ? buildCredentialsManager(clusterAlias) : RemoteClusterCredentialsManager.EMPTY + ) ) { // test no nodes connected RemoteConnectionInfo remoteConnectionInfo = assertSerialization(connection.getConnectionInfo()); @@ -662,7 +678,12 @@ private void doTestCollectNodes(boolean hasClusterCredentials) throws Exception } try ( - RemoteClusterConnection connection = new RemoteClusterConnection(settings, clusterAlias, service, hasClusterCredentials) + RemoteClusterConnection connection = new RemoteClusterConnection( + settings, + clusterAlias, + service, + hasClusterCredentials ? buildCredentialsManager(clusterAlias) : RemoteClusterCredentialsManager.EMPTY + ) ) { CountDownLatch responseLatch = new CountDownLatch(1); AtomicReference> reference = new AtomicReference<>(); @@ -713,7 +734,14 @@ public void testNoChannelsExceptREG() throws Exception { String clusterAlias = "test-cluster"; Settings settings = buildRandomSettings(clusterAlias, addresses(seedNode)); - try (RemoteClusterConnection connection = new RemoteClusterConnection(settings, clusterAlias, service, false)) { + try ( + RemoteClusterConnection connection = new RemoteClusterConnection( + settings, + clusterAlias, + service, + RemoteClusterCredentialsManager.EMPTY + ) + ) { PlainActionFuture plainActionFuture = new PlainActionFuture<>(); connection.ensureConnected(plainActionFuture); plainActionFuture.get(10, TimeUnit.SECONDS); @@ -779,7 +807,14 @@ public void testConnectedNodesConcurrentAccess() throws IOException, Interrupted String clusterAlias = "test-cluster"; Settings settings = buildRandomSettings(clusterAlias, seedNodes); - try (RemoteClusterConnection connection = new RemoteClusterConnection(settings, clusterAlias, service, randomBoolean())) { + try ( + RemoteClusterConnection connection = new RemoteClusterConnection( + settings, + clusterAlias, + service, + randomFrom(RemoteClusterCredentialsManager.EMPTY, buildCredentialsManager(clusterAlias)) + ) + ) { final int numGetThreads = randomIntBetween(4, 10); final Thread[] getThreads = new Thread[numGetThreads]; final int numModifyingThreads = randomIntBetween(4, 10); @@ -873,7 +908,14 @@ public void testGetConnection() throws Exception { service.acceptIncomingRequests(); String clusterAlias = "test-cluster"; Settings settings = buildRandomSettings(clusterAlias, addresses(seedNode)); - try (RemoteClusterConnection connection = new RemoteClusterConnection(settings, clusterAlias, service, false)) { + try ( + RemoteClusterConnection connection = new RemoteClusterConnection( + settings, + clusterAlias, + service, + RemoteClusterCredentialsManager.EMPTY + ) + ) { PlainActionFuture.get(fut -> connection.ensureConnected(fut.map(x -> null))); for (int i = 0; i < 10; i++) { // always a direct connection as the remote node is already connected @@ -921,4 +963,13 @@ private static Settings buildSniffSettings(String clusterAlias, List see ); return builder.build(); } + + private static RemoteClusterCredentialsManager buildCredentialsManager(String clusterAlias) { + Objects.requireNonNull(clusterAlias); + final Settings.Builder builder = Settings.builder(); + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("cluster.remote." + clusterAlias + ".credentials", randomAlphaOfLength(20)); + builder.setSecureSettings(secureSettings); + return new RemoteClusterCredentialsManager(builder.build()); + } } diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterCredentialsManagerTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterCredentialsManagerTests.java new file mode 100644 index 0000000000000..f02148a40e47e --- /dev/null +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterCredentialsManagerTests.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +public class RemoteClusterCredentialsManagerTests extends ESTestCase { + public void testResolveRemoteClusterCredentials() { + final String clusterAlias = randomAlphaOfLength(9); + final String otherClusterAlias = randomAlphaOfLength(10); + + final String secret = randomAlphaOfLength(20); + final Settings settings = buildSettingsWithCredentials(clusterAlias, secret); + RemoteClusterCredentialsManager credentialsManager = new RemoteClusterCredentialsManager(settings); + assertThat(credentialsManager.resolveCredentials(clusterAlias).toString(), equalTo(secret)); + assertThat(credentialsManager.hasCredentials(otherClusterAlias), is(false)); + + final String updatedSecret = randomAlphaOfLength(21); + credentialsManager.updateClusterCredentials(buildSettingsWithCredentials(clusterAlias, updatedSecret)); + assertThat(credentialsManager.resolveCredentials(clusterAlias).toString(), equalTo(updatedSecret)); + + credentialsManager.updateClusterCredentials(Settings.EMPTY); + assertThat(credentialsManager.hasCredentials(clusterAlias), is(false)); + } + + private Settings buildSettingsWithCredentials(String clusterAlias, String secret) { + final Settings.Builder builder = Settings.builder(); + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("cluster.remote." + clusterAlias + ".credentials", secret); + return builder.setSecureSettings(secureSettings).build(); + } +} diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteConnectionManagerTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteConnectionManagerTests.java index 839138d3c7c34..d8ddd7c356b33 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteConnectionManagerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteConnectionManagerTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -23,9 +24,11 @@ import java.io.IOException; import java.net.InetAddress; import java.util.HashSet; +import java.util.Optional; import java.util.Set; import java.util.concurrent.ExecutionException; +import static org.elasticsearch.test.hamcrest.OptionalMatchers.isPresentWith; import static org.elasticsearch.transport.RemoteClusterService.REMOTE_CLUSTER_HANDSHAKE_ACTION_NAME; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; @@ -34,6 +37,7 @@ import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class RemoteConnectionManagerTests extends ESTestCase { @@ -49,6 +53,7 @@ public void setUp() throws Exception { transport = mock(Transport.class); remoteConnectionManager = new RemoteConnectionManager( "remote-cluster", + RemoteClusterCredentialsManager.EMPTY, new ClusterConnectionManager(Settings.EMPTY, transport, new ThreadContext(Settings.EMPTY)) ); @@ -120,10 +125,13 @@ public void testResolveRemoteClusterAlias() throws ExecutionException, Interrupt public void testRewriteHandshakeAction() throws IOException { final Transport.Connection connection = mock(Transport.Connection.class); + final String clusterAlias = randomAlphaOfLengthBetween(3, 8); + final RemoteClusterCredentialsManager credentialsResolver = mock(RemoteClusterCredentialsManager.class); + when(credentialsResolver.resolveCredentials(clusterAlias)).thenReturn(new SecureString(randomAlphaOfLength(42))); final Transport.Connection wrappedConnection = RemoteConnectionManager.wrapConnectionWithRemoteClusterInfo( connection, - randomAlphaOfLengthBetween(3, 8), - RemoteClusterPortSettings.REMOTE_CLUSTER_PROFILE + clusterAlias, + credentialsResolver ); final long requestId = randomLong(); final TransportRequest request = mock(TransportRequest.class); @@ -142,6 +150,25 @@ public void testRewriteHandshakeAction() throws IOException { verify(connection).sendRequest(requestId, anotherAction, request, options); } + public void testWrapAndResolveConnectionRoundTrip() { + final Transport.Connection connection = mock(Transport.Connection.class); + final String clusterAlias = randomAlphaOfLengthBetween(3, 8); + final RemoteClusterCredentialsManager credentialsResolver = mock(RemoteClusterCredentialsManager.class); + final SecureString credentials = new SecureString(randomAlphaOfLength(42)); + // second credential will never be resolved + when(credentialsResolver.resolveCredentials(clusterAlias)).thenReturn(credentials, (SecureString) null); + final Transport.Connection wrappedConnection = RemoteConnectionManager.wrapConnectionWithRemoteClusterInfo( + connection, + clusterAlias, + credentialsResolver + ); + + final Optional actual = RemoteConnectionManager + .resolveRemoteClusterAliasWithCredentials(wrappedConnection); + + assertThat(actual, isPresentWith(new RemoteConnectionManager.RemoteClusterAliasWithCredentials(clusterAlias, credentials))); + } + private static class TestRemoteConnection extends CloseableConnection { private final DiscoveryNode node; diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteConnectionStrategyTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteConnectionStrategyTests.java index 5d461e906a266..ca9986ba5eb1f 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteConnectionStrategyTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteConnectionStrategyTests.java @@ -26,7 +26,11 @@ public void testStrategyChangeMeansThatStrategyMustBeRebuilt() { mock(Transport.class), threadContext ); - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager("cluster-alias", connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + "cluster-alias", + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); FakeConnectionStrategy first = new FakeConnectionStrategy( "cluster-alias", mock(TransportService.class), @@ -46,7 +50,11 @@ public void testSameStrategyChangeMeansThatStrategyDoesNotNeedToBeRebuilt() { mock(Transport.class), threadContext ); - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager("cluster-alias", connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + "cluster-alias", + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); FakeConnectionStrategy first = new FakeConnectionStrategy( "cluster-alias", mock(TransportService.class), @@ -69,7 +77,11 @@ public void testChangeInConnectionProfileMeansTheStrategyMustBeRebuilt() { assertEquals(TimeValue.MINUS_ONE, connectionManager.getConnectionProfile().getPingInterval()); assertEquals(Compression.Enabled.INDEXING_DATA, connectionManager.getConnectionProfile().getCompressionEnabled()); assertEquals(Compression.Scheme.LZ4, connectionManager.getConnectionProfile().getCompressionScheme()); - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager("cluster-alias", connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + "cluster-alias", + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); FakeConnectionStrategy first = new FakeConnectionStrategy( "cluster-alias", mock(TransportService.class), diff --git a/server/src/test/java/org/elasticsearch/transport/SniffConnectionStrategyTests.java b/server/src/test/java/org/elasticsearch/transport/SniffConnectionStrategyTests.java index 3c955258d45c8..121e3ec1d35de 100644 --- a/server/src/test/java/org/elasticsearch/transport/SniffConnectionStrategyTests.java +++ b/server/src/test/java/org/elasticsearch/transport/SniffConnectionStrategyTests.java @@ -55,6 +55,8 @@ import java.util.function.Predicate; import java.util.function.Supplier; +import static org.elasticsearch.test.LambdaMatchers.falseWith; +import static org.elasticsearch.test.LambdaMatchers.trueWith; import static org.hamcrest.CoreMatchers.startsWith; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.endsWith; @@ -192,7 +194,11 @@ public void testSniffStrategyWillConnectToAndDiscoverNodes() { threadPool.getThreadContext() ); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + hasClusterCredentials ? new RemoteClusterCredentialsManager(clientSettings) : RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); SniffConnectionStrategy strategy = new SniffConnectionStrategy( clusterAlias, localService, @@ -262,7 +268,11 @@ public void testSniffStrategyWillResolveDiscoveryNodesEachConnect() throws Excep threadPool.getThreadContext() ); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); SniffConnectionStrategy strategy = new SniffConnectionStrategy( clusterAlias, localService, @@ -336,7 +346,11 @@ public void testSniffStrategyWillConnectToMaxAllowedNodesAndOpenNewConnectionsOn threadPool.getThreadContext() ); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); SniffConnectionStrategy strategy = new SniffConnectionStrategy( clusterAlias, localService, @@ -424,7 +438,11 @@ public void testDiscoverWithSingleIncompatibleSeedNode() { threadPool.getThreadContext() ); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); SniffConnectionStrategy strategy = new SniffConnectionStrategy( clusterAlias, localService, @@ -486,7 +504,11 @@ public void testConnectFailsWithIncompatibleNodes() { threadPool.getThreadContext() ); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); SniffConnectionStrategy strategy = new SniffConnectionStrategy( clusterAlias, localService, @@ -549,7 +571,11 @@ public void testFilterNodesWithNodePredicate() { threadPool.getThreadContext() ); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); SniffConnectionStrategy strategy = new SniffConnectionStrategy( clusterAlias, localService, @@ -617,7 +643,11 @@ public void testConnectFailsIfNoConnectionsOpened() { threadPool.getThreadContext() ); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); SniffConnectionStrategy strategy = new SniffConnectionStrategy( clusterAlias, localService, @@ -694,7 +724,11 @@ public void testClusterNameValidationPreventConnectingToDifferentClusters() thro threadPool.getThreadContext() ); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); SniffConnectionStrategy strategy = new SniffConnectionStrategy( clusterAlias, localService, @@ -783,7 +817,11 @@ public void testMultipleCallsToConnectEnsuresConnection() { threadPool.getThreadContext() ); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); SniffConnectionStrategy strategy = new SniffConnectionStrategy( clusterAlias, localService, @@ -895,7 +933,11 @@ public void testConfiguredProxyAddressModeWillReplaceNodeAddress() { threadPool.getThreadContext() ); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); SniffConnectionStrategy strategy = new SniffConnectionStrategy( clusterAlias, localService, @@ -964,7 +1006,11 @@ public void testSniffStrategyWillNeedToBeRebuiltIfNumOfConnectionsOrSeedsOrProxy threadPool.getThreadContext() ); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); SniffConnectionStrategy strategy = new SniffConnectionStrategy( clusterAlias, localService, @@ -1019,7 +1065,7 @@ public void testGetNodePredicateNodeRoles() { Predicate nodePredicate = SniffConnectionStrategy.getNodePredicate(Settings.EMPTY); { DiscoveryNode all = DiscoveryNodeUtils.create("id", address); - assertTrue(nodePredicate.test(all)); + assertThat(nodePredicate, trueWith(all)); } { DiscoveryNode dataMaster = DiscoveryNodeUtils.create( @@ -1028,7 +1074,7 @@ public void testGetNodePredicateNodeRoles() { Collections.emptyMap(), Set.of(DiscoveryNodeRole.DATA_ROLE, DiscoveryNodeRole.MASTER_ROLE) ); - assertTrue(nodePredicate.test(dataMaster)); + assertThat(nodePredicate, trueWith(dataMaster)); } { DiscoveryNode dedicatedMaster = DiscoveryNodeUtils.create( @@ -1037,7 +1083,7 @@ public void testGetNodePredicateNodeRoles() { Collections.emptyMap(), Set.of(DiscoveryNodeRole.MASTER_ROLE) ); - assertFalse(nodePredicate.test(dedicatedMaster)); + assertThat(nodePredicate, falseWith(dedicatedMaster)); } { DiscoveryNode dedicatedIngest = DiscoveryNodeUtils.create( @@ -1046,7 +1092,7 @@ public void testGetNodePredicateNodeRoles() { Collections.emptyMap(), Set.of(DiscoveryNodeRole.INGEST_ROLE) ); - assertTrue(nodePredicate.test(dedicatedIngest)); + assertThat(nodePredicate, trueWith(dedicatedIngest)); } { DiscoveryNode masterIngest = DiscoveryNodeUtils.create( @@ -1055,7 +1101,7 @@ public void testGetNodePredicateNodeRoles() { Collections.emptyMap(), Set.of(DiscoveryNodeRole.INGEST_ROLE, DiscoveryNodeRole.MASTER_ROLE) ); - assertTrue(nodePredicate.test(masterIngest)); + assertThat(nodePredicate, trueWith(masterIngest)); } { DiscoveryNode dedicatedData = DiscoveryNodeUtils.create( @@ -1064,7 +1110,7 @@ public void testGetNodePredicateNodeRoles() { Collections.emptyMap(), Set.of(DiscoveryNodeRole.DATA_ROLE) ); - assertTrue(nodePredicate.test(dedicatedData)); + assertThat(nodePredicate, trueWith(dedicatedData)); } { DiscoveryNode ingestData = DiscoveryNodeUtils.create( @@ -1073,11 +1119,11 @@ public void testGetNodePredicateNodeRoles() { Collections.emptyMap(), Set.of(DiscoveryNodeRole.DATA_ROLE, DiscoveryNodeRole.INGEST_ROLE) ); - assertTrue(nodePredicate.test(ingestData)); + assertThat(nodePredicate, trueWith(ingestData)); } { DiscoveryNode coordOnly = DiscoveryNodeUtils.create("id", address, Collections.emptyMap(), Set.of()); - assertTrue(nodePredicate.test(coordOnly)); + assertThat(nodePredicate, trueWith(coordOnly)); } } @@ -1099,18 +1145,18 @@ public void testGetNodePredicateNodeAttrs() { Predicate nodePredicate = SniffConnectionStrategy.getNodePredicate(settings); { DiscoveryNode nonGatewayNode = DiscoveryNodeUtils.create("id", address, Collections.singletonMap("gateway", "false"), roles); - assertFalse(nodePredicate.test(nonGatewayNode)); - assertTrue(SniffConnectionStrategy.getNodePredicate(Settings.EMPTY).test(nonGatewayNode)); + assertThat(nodePredicate, falseWith(nonGatewayNode)); + assertThat(SniffConnectionStrategy.getNodePredicate(Settings.EMPTY), trueWith(nonGatewayNode)); } { DiscoveryNode gatewayNode = DiscoveryNodeUtils.create("id", address, Collections.singletonMap("gateway", "true"), roles); - assertTrue(nodePredicate.test(gatewayNode)); - assertTrue(SniffConnectionStrategy.getNodePredicate(Settings.EMPTY).test(gatewayNode)); + assertThat(nodePredicate, trueWith(gatewayNode)); + assertThat(SniffConnectionStrategy.getNodePredicate(Settings.EMPTY), trueWith(gatewayNode)); } { DiscoveryNode noAttrNode = DiscoveryNodeUtils.create("id", address, Collections.emptyMap(), roles); - assertFalse(nodePredicate.test(noAttrNode)); - assertTrue(SniffConnectionStrategy.getNodePredicate(Settings.EMPTY).test(noAttrNode)); + assertThat(nodePredicate, falseWith(noAttrNode)); + assertThat(SniffConnectionStrategy.getNodePredicate(Settings.EMPTY), trueWith(noAttrNode)); } } @@ -1127,7 +1173,7 @@ public void testGetNodePredicatesCombination() { Collections.singletonMap("gateway", "true"), dedicatedMasterRoles ); - assertFalse(nodePredicate.test(node)); + assertThat(nodePredicate, falseWith(node)); } { DiscoveryNode node = DiscoveryNodeUtils.create( @@ -1136,7 +1182,7 @@ public void testGetNodePredicatesCombination() { Collections.singletonMap("gateway", "false"), dedicatedMasterRoles ); - assertFalse(nodePredicate.test(node)); + assertThat(nodePredicate, falseWith(node)); } { DiscoveryNode node = DiscoveryNodeUtils.create( @@ -1145,11 +1191,11 @@ public void testGetNodePredicatesCombination() { Collections.singletonMap("gateway", "false"), dedicatedMasterRoles ); - assertFalse(nodePredicate.test(node)); + assertThat(nodePredicate, falseWith(node)); } { DiscoveryNode node = DiscoveryNodeUtils.create("id", address, Collections.singletonMap("gateway", "true"), allRoles); - assertTrue(nodePredicate.test(node)); + assertThat(nodePredicate, trueWith(node)); } } diff --git a/server/src/test/java/org/elasticsearch/transport/TransportActionProxyTests.java b/server/src/test/java/org/elasticsearch/transport/TransportActionProxyTests.java index fb0eb314a1e33..221e57d913dc7 100644 --- a/server/src/test/java/org/elasticsearch/transport/TransportActionProxyTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TransportActionProxyTests.java @@ -518,11 +518,6 @@ public String getProfileName() { return in.getProfileName(); } - @Override - public String getChannelType() { - return in.getChannelType(); - } - @Override public void sendResponse(TransportResponse response) throws IOException { onResponse.accept(response); diff --git a/server/src/test/java/org/elasticsearch/transport/TransportInfoTests.java b/server/src/test/java/org/elasticsearch/transport/TransportInfoTests.java index 8e23f0e3984b9..261a4ba339c18 100644 --- a/server/src/test/java/org/elasticsearch/transport/TransportInfoTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TransportInfoTests.java @@ -70,7 +70,10 @@ private void assertPublishAddress(TransportInfo httpInfo, String expected) throw httpInfo.toXContent(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); - Map transportMap = (Map) createParser(builder).map().get(TransportInfo.Fields.TRANSPORT); + Map transportMap; + try (var parser = createParser(builder)) { + transportMap = (Map) parser.map().get(TransportInfo.Fields.TRANSPORT); + } Map profilesMap = (Map) transportMap.get("profiles"); assertEquals(expected, transportMap.get(TransportInfo.Fields.PUBLISH_ADDRESS)); assertEquals(expected, ((Map) profilesMap.get("test_profile")).get(TransportInfo.Fields.PUBLISH_ADDRESS)); diff --git a/server/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java b/server/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java index 25ce8254a59c2..38c34f3668844 100644 --- a/server/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java @@ -438,11 +438,6 @@ public String getProfileName() { return channel.getProfileName(); } - @Override - public String getChannelType() { - return channel.getChannelType(); - } - @Override public void sendResponse(TransportResponse response) throws IOException { assertThat(response, instanceOf(TransportService.HandshakeResponse.class)); diff --git a/settings.gradle b/settings.gradle index 90422913ef441..ce35c873f176e 100644 --- a/settings.gradle +++ b/settings.gradle @@ -100,7 +100,6 @@ List projects = [ 'test:fixtures:testcontainer-utils', 'test:fixtures:geoip-fixture', 'test:fixtures:url-fixture', - 'test:fixtures:nginx-fixture', 'test:logger-usage', 'test:test-clusters', 'test:x-content', diff --git a/test/external-modules/apm-integration/src/javaRestTest/java/org/elasticsearch/test/apmintegration/MetricsApmIT.java b/test/external-modules/apm-integration/src/javaRestTest/java/org/elasticsearch/test/apmintegration/MetricsApmIT.java index 93d08fbccd376..ddcd667b9cbe7 100644 --- a/test/external-modules/apm-integration/src/javaRestTest/java/org/elasticsearch/test/apmintegration/MetricsApmIT.java +++ b/test/external-modules/apm-integration/src/javaRestTest/java/org/elasticsearch/test/apmintegration/MetricsApmIT.java @@ -60,19 +60,19 @@ protected String getTestRestCluster() { public void testApmIntegration() throws Exception { Map>> sampleAssertions = new HashMap<>( Map.ofEntries( - assertion(TestMeterUsages.VERY_LONG_NAME, m -> (Double) m.get("value"), closeTo(1.0, 0.001)), - assertion("testLongCounter", m -> (Double) m.get("value"), closeTo(1.0, 0.001)), - assertion("testAsyncDoubleCounter", m -> (Double) m.get("value"), closeTo(1.0, 0.001)), - assertion("testAsyncLongCounter", m -> (Integer) m.get("value"), equalTo(1)), - assertion("testDoubleGauge", m -> (Double) m.get("value"), closeTo(1.0, 0.001)), - assertion("testLongGauge", m -> (Integer) m.get("value"), equalTo(1)), + assertion("es.test.long_counter.total", m -> (Double) m.get("value"), closeTo(1.0, 0.001)), + assertion("es.test.double_counter.total", m -> (Double) m.get("value"), closeTo(1.0, 0.001)), + assertion("es.test.async_double_counter.total", m -> (Double) m.get("value"), closeTo(1.0, 0.001)), + assertion("es.test.async_long_counter.total", m -> (Integer) m.get("value"), equalTo(1)), + assertion("es.test.double_gauge.current", m -> (Double) m.get("value"), closeTo(1.0, 0.001)), + assertion("es.test.long_gauge.current", m -> (Integer) m.get("value"), equalTo(1)), assertion( - "testDoubleHistogram", + "es.test.double_histogram.histogram", m -> ((Collection) m.get("counts")).stream().mapToInt(Integer::intValue).sum(), equalTo(2) ), assertion( - "testLongHistogram", + "es.test.long_histogram.histogram", m -> ((Collection) m.get("counts")).stream().mapToInt(Integer::intValue).sum(), equalTo(2) ) diff --git a/test/external-modules/apm-integration/src/main/java/org/elasticsearch/test/apmintegration/TestMeterUsages.java b/test/external-modules/apm-integration/src/main/java/org/elasticsearch/test/apmintegration/TestMeterUsages.java index 8a71738a0b420..9c23ce371e044 100644 --- a/test/external-modules/apm-integration/src/main/java/org/elasticsearch/test/apmintegration/TestMeterUsages.java +++ b/test/external-modules/apm-integration/src/main/java/org/elasticsearch/test/apmintegration/TestMeterUsages.java @@ -26,18 +26,17 @@ public class TestMeterUsages { private final LongHistogram longHistogram; private final AtomicReference doubleWithAttributes = new AtomicReference<>(); private final AtomicReference longWithAttributes = new AtomicReference<>(); - public static String VERY_LONG_NAME = "a1234567890123456789012345678901234567890123456789012345678901234567890"; public TestMeterUsages(MeterRegistry meterRegistry) { - this.doubleCounter = meterRegistry.registerDoubleCounter(VERY_LONG_NAME, "test", "unit"); - this.longCounter = meterRegistry.registerDoubleCounter("testLongCounter", "test", "unit"); - this.doubleHistogram = meterRegistry.registerDoubleHistogram("testDoubleHistogram", "test", "unit"); - this.longHistogram = meterRegistry.registerLongHistogram("testLongHistogram", "test", "unit"); - meterRegistry.registerDoubleGauge("testDoubleGauge", "test", "unit", doubleWithAttributes::get); - meterRegistry.registerLongGauge("testLongGauge", "test", "unit", longWithAttributes::get); - - meterRegistry.registerLongAsyncCounter("testAsyncLongCounter", "test", "unit", longWithAttributes::get); - meterRegistry.registerDoubleAsyncCounter("testAsyncDoubleCounter", "test", "unit", doubleWithAttributes::get); + this.doubleCounter = meterRegistry.registerDoubleCounter("es.test.long_counter.total", "test", "unit"); + this.longCounter = meterRegistry.registerDoubleCounter("es.test.double_counter.total", "test", "unit"); + this.doubleHistogram = meterRegistry.registerDoubleHistogram("es.test.double_histogram.histogram", "test", "unit"); + this.longHistogram = meterRegistry.registerLongHistogram("es.test.long_histogram.histogram", "test", "unit"); + meterRegistry.registerDoubleGauge("es.test.double_gauge.current", "test", "unit", doubleWithAttributes::get); + meterRegistry.registerLongGauge("es.test.long_gauge.current", "test", "unit", longWithAttributes::get); + + meterRegistry.registerLongAsyncCounter("es.test.async_long_counter.total", "test", "unit", longWithAttributes::get); + meterRegistry.registerDoubleAsyncCounter("es.test.async_double_counter.total", "test", "unit", doubleWithAttributes::get); } public void testUponRequest() { diff --git a/test/external-modules/error-query/src/main/java/org/elasticsearch/test/errorquery/ErrorQueryBuilder.java b/test/external-modules/error-query/src/main/java/org/elasticsearch/test/errorquery/ErrorQueryBuilder.java index 1a424c54821e8..888c34513695b 100644 --- a/test/external-modules/error-query/src/main/java/org/elasticsearch/test/errorquery/ErrorQueryBuilder.java +++ b/test/external-modules/error-query/src/main/java/org/elasticsearch/test/errorquery/ErrorQueryBuilder.java @@ -134,7 +134,7 @@ protected Query doToQuery(SearchExecutionContext context) throws IOException { }); static { - PARSER.declareObjectArray(constructorArg(), (p, c) -> IndexError.PARSER.parse(p, c), new ParseField("indices")); + PARSER.declareObjectArray(constructorArg(), IndexError.PARSER, new ParseField("indices")); PARSER.declareFloat(ConstructingObjectParser.optionalConstructorArg(), BOOST_FIELD); PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), NAME_FIELD); } diff --git a/test/external-modules/error-query/src/main/java/org/elasticsearch/test/errorquery/IndexError.java b/test/external-modules/error-query/src/main/java/org/elasticsearch/test/errorquery/IndexError.java index 19284152efab6..92b05ec9bf649 100644 --- a/test/external-modules/error-query/src/main/java/org/elasticsearch/test/errorquery/IndexError.java +++ b/test/external-modules/error-query/src/main/java/org/elasticsearch/test/errorquery/IndexError.java @@ -53,7 +53,7 @@ public IndexError(StreamInput in) throws IOException { this.shardIds = in.readBoolean() ? in.readIntArray() : null; this.errorType = in.readEnum(ERROR_TYPE.class); this.message = in.readString(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_051)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { this.stallTimeSeconds = in.readVInt(); } else { this.stallTimeSeconds = 0; @@ -69,7 +69,7 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeEnum(errorType); out.writeString(message); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_051)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { out.writeVInt(stallTimeSeconds); } } diff --git a/test/external-modules/latency-simulating-directory/src/internalClusterTest/java/org/elasticsearch/test/simulatedlatencyrepo/LatencySimulatingBlobStoreRepositoryTests.java b/test/external-modules/latency-simulating-directory/src/internalClusterTest/java/org/elasticsearch/test/simulatedlatencyrepo/LatencySimulatingBlobStoreRepositoryTests.java index 77e151ed22f32..6d49971df4f0d 100644 --- a/test/external-modules/latency-simulating-directory/src/internalClusterTest/java/org/elasticsearch/test/simulatedlatencyrepo/LatencySimulatingBlobStoreRepositoryTests.java +++ b/test/external-modules/latency-simulating-directory/src/internalClusterTest/java/org/elasticsearch/test/simulatedlatencyrepo/LatencySimulatingBlobStoreRepositoryTests.java @@ -36,6 +36,7 @@ import java.util.concurrent.atomic.LongAdder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -136,9 +137,9 @@ public void testRetrieveSnapshots() throws Exception { assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); logger.info("--> run a search"); - var searchResponse = client.prepareSearch("test-idx").setQuery(QueryBuilders.termQuery("text", "sometext")).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, greaterThan(0L)); - assertThat(COUNTS.intValue(), greaterThan(0)); + assertResponse(client.prepareSearch("test-idx").setQuery(QueryBuilders.termQuery("text", "sometext")), searchResponse -> { + assertThat(searchResponse.getHits().getTotalHits().value, greaterThan(0L)); + assertThat(COUNTS.intValue(), greaterThan(0)); + }); } } diff --git a/test/external-modules/seek-tracking-directory/src/internalClusterTest/java/org/elasticsearch/test/seektracker/SeekTrackerPluginIT.java b/test/external-modules/seek-tracking-directory/src/internalClusterTest/java/org/elasticsearch/test/seektracker/SeekTrackerPluginIT.java index 54e6583d5f483..7d1e4c4c3d0de 100644 --- a/test/external-modules/seek-tracking-directory/src/internalClusterTest/java/org/elasticsearch/test/seektracker/SeekTrackerPluginIT.java +++ b/test/external-modules/seek-tracking-directory/src/internalClusterTest/java/org/elasticsearch/test/seektracker/SeekTrackerPluginIT.java @@ -45,7 +45,7 @@ public void testSeekTrackerPlugin() throws InterruptedException { } indexRandom(true, docs); - prepareSearch("index").setQuery(QueryBuilders.termQuery("field", "term2")).get(); + prepareSearch("index").setQuery(QueryBuilders.termQuery("field", "term2")).get().decRef(); SeekStatsResponse response = client().execute(SeekTrackerPlugin.SEEK_STATS_ACTION, new SeekStatsRequest("index")).actionGet(); List shardSeekStats = response.getSeekStats().get("index"); diff --git a/test/fixtures/minio-fixture/build.gradle b/test/fixtures/minio-fixture/build.gradle index 9a71387d7c6b7..3c97315dc07ce 100644 --- a/test/fixtures/minio-fixture/build.gradle +++ b/test/fixtures/minio-fixture/build.gradle @@ -11,26 +11,20 @@ apply plugin: 'elasticsearch.cache-test-fixtures' description = 'Fixture for MinIO Storage service' configurations.all { - transitive = false + exclude group: 'org.hamcrest', module: 'hamcrest-core' } - dependencies { - testImplementation project(':test:framework') - + testImplementation (project(':test:framework')) api "junit:junit:${versions.junit}" api project(':test:fixtures:testcontainer-utils') - api "org.testcontainers:testcontainers:${versions.testcontainer}" - implementation "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" - implementation "org.slf4j:slf4j-api:${versions.slf4j}" - implementation "com.github.docker-java:docker-java-api:${versions.dockerJava}" - - runtimeOnly "com.github.docker-java:docker-java-transport-zerodep:${versions.dockerJava}" - runtimeOnly "com.github.docker-java:docker-java-transport:${versions.dockerJava}" - runtimeOnly "com.github.docker-java:docker-java-core:${versions.dockerJava}" - runtimeOnly "org.apache.commons:commons-compress:${versions.commonsCompress}" - runtimeOnly "org.rnorth.duct-tape:duct-tape:${versions.ductTape}" + api("org.testcontainers:testcontainers:${versions.testcontainer}") { + transitive = false + } + api("com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}"){ + transitive = false + } // ensure we have proper logging during when used in tests runtimeOnly "org.slf4j:slf4j-simple:${versions.slf4j}" - runtimeOnly "org.hamcrest:hamcrest:${versions.hamcrest}" + runtimeOnly("org.hamcrest:hamcrest:${versions.hamcrest}") } diff --git a/test/fixtures/nginx-fixture/Dockerfile b/test/fixtures/nginx-fixture/Dockerfile deleted file mode 100644 index 01bad77c488c8..0000000000000 --- a/test/fixtures/nginx-fixture/Dockerfile +++ /dev/null @@ -1,2 +0,0 @@ -FROM nginx -COPY nginx.conf /etc/nginx/nginx.conf diff --git a/test/fixtures/nginx-fixture/build.gradle b/test/fixtures/nginx-fixture/build.gradle deleted file mode 100644 index 438473f70a6f2..0000000000000 --- a/test/fixtures/nginx-fixture/build.gradle +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -apply plugin: 'elasticsearch.test.fixtures' - -description = 'Fixture for an external http service' - -// These directories are shared between the URL repository and the FS repository in integration tests -project.ext { - fsRepositoryDir = file("${testFixturesDir}/fs-repository") -} - -tasks.named("preProcessFixture").configure { - doLast { - // tests expect to have an empty repo - project.ext.fsRepositoryDir.mkdirs() - } -} diff --git a/test/fixtures/nginx-fixture/docker-compose.yml b/test/fixtures/nginx-fixture/docker-compose.yml deleted file mode 100644 index bf6ab56bb0c9a..0000000000000 --- a/test/fixtures/nginx-fixture/docker-compose.yml +++ /dev/null @@ -1,9 +0,0 @@ -version: '3' -services: - nginx-fixture: - build: - context: . - volumes: - - ./testfixtures_shared/fs-repository:/data - ports: - - "80" diff --git a/test/fixtures/nginx-fixture/nginx.conf b/test/fixtures/nginx-fixture/nginx.conf deleted file mode 100644 index 9b199b2dc48b7..0000000000000 --- a/test/fixtures/nginx-fixture/nginx.conf +++ /dev/null @@ -1,10 +0,0 @@ -events {} - -http { - server { - listen 80 default_server; - listen [::]:80 default_server; - - root /data; - } -} diff --git a/test/fixtures/testcontainer-utils/build.gradle b/test/fixtures/testcontainer-utils/build.gradle index 80886d99087c9..3766722abcd65 100644 --- a/test/fixtures/testcontainer-utils/build.gradle +++ b/test/fixtures/testcontainer-utils/build.gradle @@ -1,6 +1,5 @@ apply plugin: 'elasticsearch.java' - configurations.all { transitive = false } @@ -10,6 +9,14 @@ dependencies { api "junit:junit:${versions.junit}" api "org.testcontainers:testcontainers:${versions.testcontainer}" implementation "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" + api "com.github.docker-java:docker-java-api:${versions.dockerJava}" implementation "org.slf4j:slf4j-api:${versions.slf4j}" - implementation "com.github.docker-java:docker-java-api:${versions.dockerJava}" + runtimeOnly "com.github.docker-java:docker-java-transport-zerodep:${versions.dockerJava}" + runtimeOnly "com.github.docker-java:docker-java-transport:${versions.dockerJava}" + runtimeOnly "com.github.docker-java:docker-java-core:${versions.dockerJava}" + runtimeOnly "org.apache.commons:commons-compress:${versions.commonsCompress}" + runtimeOnly "org.rnorth.duct-tape:duct-tape:${versions.ductTape}" + runtimeOnly "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" + runtimeOnly "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" + } diff --git a/test/fixtures/testcontainer-utils/src/main/java/org/elasticsearch/test/fixtures/testcontainers/TestContainersThreadFilter.java b/test/fixtures/testcontainer-utils/src/main/java/org/elasticsearch/test/fixtures/testcontainers/TestContainersThreadFilter.java index f36a3264bffbb..d825330120eec 100644 --- a/test/fixtures/testcontainer-utils/src/main/java/org/elasticsearch/test/fixtures/testcontainers/TestContainersThreadFilter.java +++ b/test/fixtures/testcontainer-utils/src/main/java/org/elasticsearch/test/fixtures/testcontainers/TestContainersThreadFilter.java @@ -19,6 +19,6 @@ public class TestContainersThreadFilter implements ThreadFilter { public boolean reject(Thread t) { return t.getName().startsWith("testcontainers-") || t.getName().startsWith("ducttape") - || t.getName().startsWith("ForkJoinPool.commonPool-worker-1"); + || t.getName().startsWith("ForkJoinPool.commonPool-worker-"); } } diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java b/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java index e1949d78e86c2..7848f0ef4a625 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java @@ -55,6 +55,7 @@ import java.util.Map; import java.util.OptionalDouble; import java.util.Set; +import java.util.function.Predicate; import static org.elasticsearch.cluster.ClusterModule.BALANCED_ALLOCATOR; import static org.elasticsearch.cluster.ClusterModule.DESIRED_BALANCE_ALLOCATOR; @@ -412,7 +413,7 @@ public void beforeAllocation(RoutingAllocation allocation) { } @Override - public void afterPrimariesBeforeReplicas(RoutingAllocation allocation) { + public void afterPrimariesBeforeReplicas(RoutingAllocation allocation, Predicate isRelevantShardPredicate) { // no-op } diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java b/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java index bb86dab60b0eb..1004ea5b50119 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java @@ -107,7 +107,7 @@ ShardStats[] adjustShardStats(ShardStats[] shardsStats) { var storeStats = new StoreStats( shardSizeFunctionCopy.apply(shardRouting), shardSizeFunctionCopy.apply(shardRouting), - shardStats.getStats().store.getReservedSize().getBytes() + shardStats.getStats().store.reservedSizeInBytes() ); var commonStats = new CommonStats(new CommonStatsFlags(CommonStatsFlags.Flag.Store)); commonStats.store = storeStats; diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java index db4b1ec0a99c8..5c5123e03454f 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java @@ -261,7 +261,8 @@ public static DataStream randomInstance(String dataStreamName, LongSupplier time randomBoolean() ? IndexMode.STANDARD : null, // IndexMode.TIME_SERIES triggers validation that many unit tests doesn't pass randomBoolean() ? DataStreamLifecycle.newBuilder().dataRetention(randomMillisUpToYear9999()).build() : null, failureStore, - failureIndices + failureIndices, + randomBoolean() ); } diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java b/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java index 1158e805ba3c1..1810b5cee76ec 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java @@ -84,6 +84,29 @@ public static ShardRouting newShardRouting( ); } + public static ShardRouting newShardRouting( + ShardId shardId, + String currentNodeId, + String relocatingNodeId, + boolean primary, + ShardRoutingState state, + RecoverySource recoverySource + ) { + return new ShardRouting( + shardId, + currentNodeId, + relocatingNodeId, + primary, + state, + recoverySource, + buildUnassignedInfo(state), + buildRelocationFailureInfo(state), + buildAllocationId(state), + -1, + ShardRouting.Role.DEFAULT + ); + } + public static ShardRouting newShardRouting( String index, int shardId, diff --git a/test/framework/src/main/java/org/elasticsearch/common/inject/ModuleTestCase.java b/test/framework/src/main/java/org/elasticsearch/common/inject/ModuleTestCase.java index df8c0e9ad4b32..6f378f263490c 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/inject/ModuleTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/common/inject/ModuleTestCase.java @@ -17,6 +17,8 @@ import java.util.List; import java.util.function.Predicate; +import static org.elasticsearch.test.LambdaMatchers.trueWith; + /** * Base testcase for testing {@link Module} implementations. */ @@ -44,13 +46,13 @@ private static void assertInstanceBindingWithAnnotation( if (element instanceof InstanceBinding binding) { if (to.equals(binding.getKey().getTypeLiteral().getType())) { if (annotation == null || annotation.equals(binding.getKey().getAnnotationType())) { - assertTrue(tester.test(to.cast(binding.getInstance()))); + assertThat(tester, trueWith(to.cast(binding.getInstance()))); return; } } } else if (element instanceof ProviderInstanceBinding binding) { if (to.equals(binding.getKey().getTypeLiteral().getType())) { - assertTrue(tester.test(to.cast(binding.getProviderInstance().get()))); + assertThat(tester, trueWith(to.cast(binding.getProviderInstance().get()))); return; } } diff --git a/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java b/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java index 0a0592b5a01f2..5f6e50a7c83e0 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java +++ b/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java @@ -702,5 +702,11 @@ public void addWithoutBreaking(long bytes) { public long getUsed() { return used.get(); } + + @Override + public String toString() { + long u = used.get(); + return "LimitedBreaker[" + u + "/" + max.getBytes() + "][" + ByteSizeValue.ofBytes(u) + "/" + max + "]"; + } } } diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/util/ShapeTestUtils.java b/test/framework/src/main/java/org/elasticsearch/geo/ShapeTestUtils.java similarity index 96% rename from x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/util/ShapeTestUtils.java rename to test/framework/src/main/java/org/elasticsearch/geo/ShapeTestUtils.java index 96667493de21c..1e21ad1acfd08 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/util/ShapeTestUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/geo/ShapeTestUtils.java @@ -1,12 +1,12 @@ /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. */ -package org.elasticsearch.xpack.spatial.util; +package org.elasticsearch.geo; -import org.apache.lucene.geo.XShapeTestUtil; import org.apache.lucene.geo.XYCircle; import org.apache.lucene.geo.XYPolygon; import org.elasticsearch.geometry.Circle; @@ -117,7 +117,7 @@ public static Polygon randomPolygon(boolean hasAlt) { return new Polygon(linearRing(floatsToDoubles(lucenePolygon.getPolyX()), floatsToDoubles(lucenePolygon.getPolyY()), hasAlt)); } - static double area(XYPolygon p) { + public static double area(XYPolygon p) { double windingSum = 0; final int numPts = p.numPoints() - 1; for (int i = 0; i < numPts; i++) { @@ -127,7 +127,7 @@ static double area(XYPolygon p) { return Math.abs(windingSum / 2); } - static double[] floatsToDoubles(float[] f) { + public static double[] floatsToDoubles(float[] f) { double[] d = new double[f.length]; for (int i = 0; i < f.length; i++) { d[i] = f[i]; diff --git a/x-pack/plugin/spatial/src/test/java/org/apache/lucene/geo/XShapeTestUtil.java b/test/framework/src/main/java/org/elasticsearch/geo/XShapeTestUtil.java similarity index 98% rename from x-pack/plugin/spatial/src/test/java/org/apache/lucene/geo/XShapeTestUtil.java rename to test/framework/src/main/java/org/elasticsearch/geo/XShapeTestUtil.java index e9ebf4534693e..62b3edfe24193 100644 --- a/x-pack/plugin/spatial/src/test/java/org/apache/lucene/geo/XShapeTestUtil.java +++ b/test/framework/src/main/java/org/elasticsearch/geo/XShapeTestUtil.java @@ -15,11 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.lucene.geo; +package org.elasticsearch.geo; import com.carrotsearch.randomizedtesting.RandomizedContext; import com.carrotsearch.randomizedtesting.generators.BiasedNumbers; +import org.apache.lucene.geo.XYCircle; +import org.apache.lucene.geo.XYPolygon; +import org.apache.lucene.geo.XYRectangle; import org.apache.lucene.tests.util.TestUtil; import java.util.ArrayList; diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldTypeTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldTypeTestCase.java index 1e6401d79d3fc..643beda11939c 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldTypeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldTypeTestCase.java @@ -436,6 +436,11 @@ public Set sourcePaths(String name) { public String parentField(String field) { throw new UnsupportedOperationException(); } + + @Override + public FieldNamesFieldMapper.FieldNamesFieldType fieldNames() { + return FieldNamesFieldMapper.FieldNamesFieldType.get(true); + } }; } diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java index 63a726d83f79e..710c31ed7aee8 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java @@ -155,7 +155,7 @@ protected final DocumentMapper createDocumentMapper(String mappings) throws IOEx return mapperService.documentMapper(); } - protected final MapperService createMapperService(XContentBuilder mappings) throws IOException { + public final MapperService createMapperService(XContentBuilder mappings) throws IOException { return createMapperService(getVersion(), mappings); } @@ -258,7 +258,7 @@ protected static void withLuceneIndex( /** * Build a {@link SourceToParse} with the id {@code "1"} and without any dynamic templates. */ - protected static SourceToParse source(CheckedConsumer build) throws IOException { + public static SourceToParse source(CheckedConsumer build) throws IOException { return source("1", build, null); } @@ -335,7 +335,7 @@ protected static XContentBuilder mappingNoSubobjects(CheckedConsumer buildFields) throws IOException { + public static XContentBuilder mapping(CheckedConsumer buildFields) throws IOException { XContentBuilder builder = XContentFactory.jsonBuilder().startObject().startObject("_doc").startObject("properties"); buildFields.accept(builder); return builder.endObject().endObject().endObject(); diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java index 44e28132beec0..12064710faa68 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java @@ -1240,24 +1240,31 @@ public final void testSyntheticEmptyListNoDocValuesLoader() throws IOException { } public final void testBlockLoaderFromColumnReader() throws IOException { - testBlockLoader(true); + testBlockLoader(false, true); } public final void testBlockLoaderFromRowStrideReader() throws IOException { - testBlockLoader(false); + testBlockLoader(false, false); + } + + public final void testBlockLoaderFromColumnReaderWithSyntheticSource() throws IOException { + testBlockLoader(true, true); + } + + // Removed 'final' to silence this test in GeoPointFieldMapperTests, which does not support synthetic source completely + public void testBlockLoaderFromRowStrideReaderWithSyntheticSource() throws IOException { + testBlockLoader(true, false); } protected boolean supportsColumnAtATimeReader(MapperService mapper, MappedFieldType ft) { return ft.hasDocValues(); } - private void testBlockLoader(boolean columnReader) throws IOException { + private void testBlockLoader(boolean syntheticSource, boolean columnReader) throws IOException { + // TODO if we're not using synthetic source use a different sort of example. Or something. SyntheticSourceExample example = syntheticSourceSupport(false).example(5); - MapperService mapper = createMapperService(syntheticSourceMapping(b -> { // TODO randomly use syntheticSourceMapping or normal - b.startObject("field"); - example.mapping().accept(b); - b.endObject(); - })); + XContentBuilder mapping = syntheticSource ? syntheticSourceFieldMapping(example.mapping) : fieldMapping(example.mapping); + MapperService mapper = createMapperService(mapping); testBlockLoader(columnReader, example, mapper, "field"); } @@ -1284,8 +1291,13 @@ public Set sourcePaths(String name) { public String parentField(String field) { return mapper.mappingLookup().parentField(field); } + + @Override + public FieldNamesFieldMapper.FieldNamesFieldType fieldNames() { + return (FieldNamesFieldMapper.FieldNamesFieldType) mapper.fieldType(FieldNamesFieldMapper.NAME); + } }); - Function valuesConvert = loadBlockExpected(); + Function valuesConvert = loadBlockExpected(mapper, loaderFieldName); if (valuesConvert == null) { assertNull(loader); return; @@ -1359,7 +1371,7 @@ protected Matcher blockItemMatcher(Object expected) { * How {@link MappedFieldType#blockLoader} should load values or {@code null} * if that method isn't supported by field being tested. */ - protected Function loadBlockExpected() { + protected Function loadBlockExpected(MapperService mapper, String loaderFieldName) { return null; } diff --git a/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java b/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java index 5a607350d913f..2eb1811c12691 100644 --- a/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java +++ b/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java @@ -397,7 +397,7 @@ public void execute() { } ContextCompiler compiler = contexts.get(context); if (compiler != null) { - return context.factoryClazz.cast(compiler.compile(script::apply, params)); + return context.factoryClazz.cast(compiler.compile(script, params)); } throw new IllegalArgumentException("mock script engine does not know how to handle context [" + context.name + "]"); } diff --git a/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java b/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java index d1835459ab932..34529979cf464 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java +++ b/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java @@ -13,6 +13,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.text.Text; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.script.Script; @@ -327,12 +328,18 @@ public static SearchSourceBuilder randomSearchSourceBuilder( } jsonBuilder.endArray(); jsonBuilder.endObject(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(XContentParserConfiguration.EMPTY, BytesReference.bytes(jsonBuilder).streamInput()); - parser.nextToken(); - parser.nextToken(); - parser.nextToken(); - builder.searchAfter(SearchAfterBuilder.fromXContent(parser).getSortValues()); + try ( + XContentParser parser = XContentHelper.createParserNotCompressed( + XContentParserConfiguration.EMPTY, + BytesReference.bytes(jsonBuilder), + XContentType.JSON + ) + ) { + parser.nextToken(); + parser.nextToken(); + parser.nextToken(); + builder.searchAfter(SearchAfterBuilder.fromXContent(parser).getSortValues()); + } } catch (IOException e) { throw new RuntimeException("Error building search_from", e); } diff --git a/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java b/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java index 589bc76c55a3d..3950683ca0c9d 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java @@ -9,6 +9,12 @@ import org.apache.lucene.search.TotalHits; import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.client.Response; +import org.elasticsearch.test.rest.ESRestTestCase; + +import java.io.IOException; public enum SearchResponseUtils { ; @@ -25,4 +31,37 @@ public static TotalHits getTotalHits(SearchRequestBuilder request) { public static long getTotalHitsValue(SearchRequestBuilder request) { return getTotalHits(request).value; } + + public static SearchResponse responseAsSearchResponse(Response searchResponse) throws IOException { + try (var parser = ESRestTestCase.responseAsParser(searchResponse)) { + return SearchResponse.fromXContent(parser); + } + } + + public static SearchResponse emptyWithTotalHits( + String scrollId, + int totalShards, + int successfulShards, + int skippedShards, + long tookInMillis, + ShardSearchFailure[] shardFailures, + SearchResponse.Clusters clusters + ) { + return new SearchResponse( + SearchHits.EMPTY_WITH_TOTAL_HITS, + null, + null, + false, + null, + null, + 1, + scrollId, + totalShards, + successfulShards, + skippedShards, + tookInMillis, + shardFailures, + clusters + ); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java index c03058f22da5d..3b347c50671cc 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java @@ -51,11 +51,12 @@ public void testFromXContent() throws IOException { } factoriesBuilder.toXContent(builder, ToXContent.EMPTY_PARAMS); XContentBuilder shuffled = shuffleXContent(builder); - XContentParser parser = createParser(shuffled); - AggregationBuilder newAgg = parse(parser); - assertNotSame(newAgg, testAgg); - assertEquals(testAgg, newAgg); - assertEquals(testAgg.hashCode(), newAgg.hashCode()); + try (XContentParser parser = createParser(shuffled)) { + AggregationBuilder newAgg = parse(parser); + assertNotSame(newAgg, testAgg); + assertEquals(testAgg, newAgg); + assertEquals(testAgg.hashCode(), newAgg.hashCode()); + } } public void testSupportsConcurrentExecution() { @@ -85,10 +86,12 @@ public void testFromXContentMulti() throws IOException { } factoriesBuilder.toXContent(builder, ToXContent.EMPTY_PARAMS); XContentBuilder shuffled = shuffleXContent(builder); - XContentParser parser = createParser(shuffled); - assertSame(XContentParser.Token.START_OBJECT, parser.nextToken()); - AggregatorFactories.Builder parsed = AggregatorFactories.parseAggregators(parser); + AggregatorFactories.Builder parsed; + try (XContentParser parser = createParser(shuffled)) { + assertSame(XContentParser.Token.START_OBJECT, parser.nextToken()); + parsed = AggregatorFactories.parseAggregators(parser); + } assertThat(parsed.getAggregatorFactories(), hasSize(testAggs.size())); assertThat(parsed.getPipelineAggregatorFactories(), hasSize(0)); @@ -127,8 +130,10 @@ public void testSerializationMulti() throws IOException { public void testToString() throws IOException { AB testAgg = createTestAggregatorBuilder(); String toString = randomBoolean() ? Strings.toString(testAgg) : testAgg.toString(); - XContentParser parser = createParser(XContentType.JSON.xContent(), toString); - AggregationBuilder newAgg = parse(parser); + AggregationBuilder newAgg; + try (XContentParser parser = createParser(XContentType.JSON.xContent(), toString)) { + newAgg = parse(parser); + } assertNotSame(newAgg, testAgg); assertEquals(testAgg, newAgg); assertEquals(testAgg.hashCode(), newAgg.hashCode()); diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/AbstractSignificanceHeuristicTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/AbstractSignificanceHeuristicTestCase.java index 3967a86ea7065..519b67aae556e 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/AbstractSignificanceHeuristicTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/AbstractSignificanceHeuristicTestCase.java @@ -197,8 +197,10 @@ public void testParseFromAggBuilder() throws IOException { stBuilder.significanceHeuristic(significanceHeuristic).field("text").minDocCount(200); XContentBuilder stXContentBuilder = XContentFactory.jsonBuilder(); stBuilder.internalXContent(stXContentBuilder, null); - XContentParser stParser = createParser(JsonXContent.jsonXContent, Strings.toString(stXContentBuilder)); - SignificanceHeuristic parsedHeuristic = parseSignificanceHeuristic(stParser); + SignificanceHeuristic parsedHeuristic; + try (XContentParser stParser = createParser(JsonXContent.jsonXContent, Strings.toString(stXContentBuilder))) { + parsedHeuristic = parseSignificanceHeuristic(stParser); + } assertThat(significanceHeuristic, equalTo(parsedHeuristic)); } diff --git a/test/framework/src/main/java/org/elasticsearch/search/geo/BasePointShapeQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/geo/BasePointShapeQueryTestCase.java index 13131a5e3eef7..52d2f3f53a43e 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/geo/BasePointShapeQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/geo/BasePointShapeQueryTestCase.java @@ -373,7 +373,8 @@ public void testQueryWithinMultiLine() throws Exception { try { client().prepareSearch(defaultIndexName) .setQuery(queryBuilder().shapeQuery(defaultFieldName, multiline).relation(ShapeRelation.WITHIN)) - .get(); + .get() + .decRef(); } catch (SearchPhaseExecutionException e) { assertThat(e.getCause().getMessage(), containsString("Field [" + defaultFieldName + "] found an unsupported shape Line")); } diff --git a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index 5ab1641307fc5..3744011b5b9f6 100644 --- a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -50,7 +50,6 @@ import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.snapshots.mockstore.MockRepository; -import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.index.IndexVersionUtils; import org.elasticsearch.threadpool.ThreadPool; @@ -80,7 +79,6 @@ import java.util.concurrent.TimeUnit; import java.util.function.BiConsumer; import java.util.function.Function; -import java.util.function.Predicate; import java.util.stream.StreamSupport; import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.READONLY_SETTING_KEY; @@ -405,24 +403,25 @@ protected String initWithSnapshotVersion(String repoName, Path repoPath, IndexVe oldVersionString = currentVersionString.replace(",\"index_version\":" + IndexVersion.current(), "") .replace(",\"version\":\"8.11.0\"", ",\"version\":\"" + Version.fromId(version.id()) + "\""); } - final RepositoryData downgradedRepoData = RepositoryData.snapshotsFromXContent( - JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, oldVersionString), - repositoryData.getGenId(), - randomBoolean() - ); + final RepositoryData downgradedRepoData; + try (var parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, oldVersionString)) { + downgradedRepoData = RepositoryData.snapshotsFromXContent(parser, repositoryData.getGenId(), randomBoolean()); + } Files.write( repoPath.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + repositoryData.getGenId()), BytesReference.toBytes(BytesReference.bytes(downgradedRepoData.snapshotsToXContent(XContentFactory.jsonBuilder(), version))), StandardOpenOption.TRUNCATE_EXISTING ); - final SnapshotInfo downgradedSnapshotInfo = SnapshotInfo.fromXContentInternal( - repoName, - JsonXContent.jsonXContent.createParser( + final SnapshotInfo downgradedSnapshotInfo; + try ( + var parser = JsonXContent.jsonXContent.createParser( XContentParserConfiguration.EMPTY, Strings.toString(snapshotInfo, ChecksumBlobStoreFormat.SNAPSHOT_ONLY_FORMAT_PARAMS) .replace(IndexVersion.current().toString(), version.toString()) ) - ); + ) { + downgradedSnapshotInfo = SnapshotInfo.fromXContentInternal(repoName, parser); + } final BlobStoreRepository blobStoreRepository = getRepositoryOnMaster(repoName); PlainActionFuture.get( f -> blobStoreRepository.threadPool() @@ -503,7 +502,7 @@ protected void indexRandomDocs(String index, int numdocs) throws InterruptedExce protected long getCountForIndex(String indexName) { return SearchResponseUtils.getTotalHitsValue( - client().prepareSearch(indexName).setSource(new SearchSourceBuilder().size(0).trackTotalHits(true)) + prepareSearch(indexName).setSource(new SearchSourceBuilder().size(0).trackTotalHits(true)) ); } @@ -582,18 +581,6 @@ protected void awaitNoMoreRunningOperations(String viaNode) throws Exception { ); } - protected void awaitClusterState(Predicate statePredicate) throws Exception { - awaitClusterState(logger, internalCluster().getMasterName(), statePredicate); - } - - public static void awaitClusterState(Logger logger, Predicate statePredicate) throws Exception { - awaitClusterState(logger, internalCluster().getMasterName(), statePredicate); - } - - public static void awaitClusterState(Logger logger, String viaNode, Predicate statePredicate) throws Exception { - ClusterServiceUtils.awaitClusterState(logger, statePredicate, internalCluster().getInstance(ClusterService.class, viaNode)); - } - protected ActionFuture startFullSnapshotBlockedOnDataNode(String snapshotName, String repoName, String dataNode) throws Exception { blockDataNode(repoName, dataNode); diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java index 0a3316b87bd04..d3833fdb3a778 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java @@ -152,10 +152,14 @@ public void testFromXContent() throws IOException { randomBoolean(), shuffleProtectedFields() ); - assertParsedQuery(createParser(xContentType.xContent(), shuffledXContent), testQuery); + try (var parser = createParser(xContentType.xContent(), shuffledXContent)) { + assertParsedQuery(parser, testQuery); + } for (Map.Entry alternateVersion : getAlternateVersions().entrySet()) { String queryAsString = alternateVersion.getKey(); - assertParsedQuery(createParser(JsonXContent.jsonXContent, queryAsString), alternateVersion.getValue()); + try (var parser = createParser(JsonXContent.jsonXContent, queryAsString)) { + assertParsedQuery(parser, alternateVersion.getValue()); + } } } } @@ -424,12 +428,15 @@ private void assertParsedQuery(XContentParser parser, QueryBuilder expectedQuery protected QueryBuilder parseQuery(AbstractQueryBuilder builder) throws IOException { BytesReference bytes = XContentHelper.toXContent(builder, XContentType.JSON, false); - return parseQuery(createParser(JsonXContent.jsonXContent, bytes)); + try (var parser = createParser(JsonXContent.jsonXContent, bytes)) { + return parseQuery(parser); + } } protected QueryBuilder parseQuery(String queryAsString) throws IOException { - XContentParser parser = createParser(JsonXContent.jsonXContent, queryAsString); - return parseQuery(parser); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, queryAsString)) { + return parseQuery(parser); + } } protected QueryBuilder parseQuery(XContentParser parser) throws IOException { @@ -651,9 +658,13 @@ public void testValidOutput() throws IOException { QB testQuery = createTestQueryBuilder(); XContentType xContentType = XContentType.JSON; String toString = Strings.toString(testQuery); - assertParsedQuery(createParser(xContentType.xContent(), toString), testQuery); + try (var parser = createParser(xContentType.xContent(), toString)) { + assertParsedQuery(parser, testQuery); + } BytesReference bytes = XContentHelper.toXContent(testQuery, xContentType, false); - assertParsedQuery(createParser(xContentType.xContent(), bytes), testQuery); + try (var parser = createParser(xContentType.xContent(), bytes)) { + assertParsedQuery(parser, testQuery); + } } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractXContentTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractXContentTestCase.java index fa4d196ceaeda..770c56f9c5952 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractXContentTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractXContentTestCase.java @@ -154,8 +154,10 @@ public void test() throws IOException { randomFieldsExcludeFilter, createParser ); - XContentParser parser = createParser.apply(XContentFactory.xContent(xContentType), shuffledContent); - T parsed = fromXContent.apply(parser); + final T parsed; + try (XContentParser parser = createParser.apply(XContentFactory.xContent(xContentType), shuffledContent)) { + parsed = fromXContent.apply(parser); + } try { assertEqualsConsumer.accept(testInstance, parsed); if (assertToXContentEquivalence) { @@ -221,6 +223,34 @@ public static void testFromXContent( BiConsumer assertEqualsConsumer, boolean assertToXContentEquivalence, ToXContent.Params toXContentParams + ) throws IOException { + testFromXContent( + numberOfTestRuns, + instanceSupplier, + supportsUnknownFields, + shuffleFieldsExceptions, + randomFieldsExcludeFilter, + createParserFunction, + fromXContent, + assertEqualsConsumer, + assertToXContentEquivalence, + toXContentParams, + t -> {} + ); + } + + public static void testFromXContent( + int numberOfTestRuns, + Supplier instanceSupplier, + boolean supportsUnknownFields, + String[] shuffleFieldsExceptions, + Predicate randomFieldsExcludeFilter, + CheckedBiFunction createParserFunction, + CheckedFunction fromXContent, + BiConsumer assertEqualsConsumer, + boolean assertToXContentEquivalence, + ToXContent.Params toXContentParams, + Consumer dispose ) throws IOException { xContentTester(createParserFunction, instanceSupplier, toXContentParams, fromXContent).numberOfTestRuns(numberOfTestRuns) .supportsUnknownFields(supportsUnknownFields) @@ -228,6 +258,7 @@ public static void testFromXContent( .randomFieldsExcludeFilter(randomFieldsExcludeFilter) .assertEqualsConsumer(assertEqualsConsumer) .assertToXContentEquivalence(assertToXContentEquivalence) + .dispose(dispose) .test(); } @@ -246,10 +277,17 @@ public final void testFromXContent() throws IOException { this::parseInstance, this::assertEqualInstances, assertToXContentEquivalence(), - getToXContentParams() + getToXContentParams(), + this::dispose ); } + /** + * Callback invoked after a test instance is no longer needed that can be overridden to release resources associated with the instance. + * @param instance test instance that is no longer used + */ + protected void dispose(T instance) {} + /** * Creates a random test instance to use in the tests. This method will be * called multiple times during test execution and should return a different @@ -320,8 +358,9 @@ static BytesReference insertRandomFieldsAndShuffle( } else { withRandomFields = xContent; } - XContentParser parserWithRandomFields = createParserFunction.apply(XContentFactory.xContent(xContentType), withRandomFields); - return BytesReference.bytes(ESTestCase.shuffleXContent(parserWithRandomFields, false, shuffleFieldsExceptions)); + try (XContentParser parserWithRandomFields = createParserFunction.apply(XContentFactory.xContent(xContentType), withRandomFields)) { + return BytesReference.bytes(ESTestCase.shuffleXContent(parserWithRandomFields, false, shuffleFieldsExceptions)); + } } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/BreakerTestUtil.java b/test/framework/src/main/java/org/elasticsearch/test/BreakerTestUtil.java new file mode 100644 index 0000000000000..c34219e86ef66 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/test/BreakerTestUtil.java @@ -0,0 +1,85 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.test; + +import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; + +public class BreakerTestUtil { + private static final Logger logger = LogManager.getLogger(BreakerTestUtil.class); + + /** + * Performs a binary search between 0 and {@code tooBigToBreak} bytes for the largest memory size + * that'll cause the closure parameter to throw a {@link CircuitBreakingException}. + */ + public static ByteSizeValue findBreakerLimit(ByteSizeValue tooBigToBreak, CheckedConsumer c) + throws E { + + // Validate arguments: we don't throw for tooBigToBreak and we *do* throw for 0. + try { + c.accept(tooBigToBreak); + } catch (CircuitBreakingException e) { + throw new IllegalArgumentException("expected runnable *not* to break under tooBigToBreak", e); + } + try { + c.accept(ByteSizeValue.ofBytes(0)); + throw new IllegalArgumentException("expected runnable to break under a limit of 0 bytes"); + } catch (CircuitBreakingException e) { + // desired + } + + // Perform the actual binary search + long l = findBreakerLimit(0, tooBigToBreak.getBytes(), c); + + // Validate results: we *do* throw for limit, we don't throw for limit + 1 + ByteSizeValue limit = ByteSizeValue.ofBytes(l); + ByteSizeValue onePastLimit = ByteSizeValue.ofBytes(l + 1); + try { + c.accept(limit); + throw new IllegalArgumentException("expected runnable to break under a limit of " + limit + " bytes"); + } catch (CircuitBreakingException e) { + // desired + } + try { + c.accept(onePastLimit); + } catch (CircuitBreakingException e) { + throw new IllegalArgumentException("expected runnable to break under a limit of " + onePastLimit + " bytes"); + } + return limit; + } + + /** + * A binary search of memory limits, looking for the lowest limit that'll break. + */ + private static long findBreakerLimit(long min, long max, CheckedConsumer c) throws E { + // max is an amount of memory that doesn't break + // min is an amount of memory that *does* break + while (max - min > 1) { + assert max > min; + long diff = max - min; + logger.info( + "Between {} and {}. {} bytes remaining.", + ByteSizeValue.ofBytes(min), + ByteSizeValue.ofBytes(max), + ByteSizeValue.ofBytes(diff) + ); + long mid = min + diff / 2; + try { + c.accept(ByteSizeValue.ofBytes(mid)); + max = mid; + } catch (CircuitBreakingException e) { + min = mid; + } + } + return min; + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 23721de4aad9c..175594ac8210f 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -17,6 +17,7 @@ import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.apache.http.HttpHost; +import org.apache.logging.log4j.Logger; import org.apache.lucene.search.Sort; import org.apache.lucene.search.TotalHits; import org.apache.lucene.tests.util.LuceneTestCase; @@ -26,12 +27,12 @@ import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainResponse; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.action.admin.cluster.node.hotthreads.NodeHotThreads; -import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsResponse; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksRequest; import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse; +import org.elasticsearch.action.admin.cluster.tasks.TransportPendingClusterTasksAction; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; @@ -119,6 +120,7 @@ import org.elasticsearch.indices.IndicesQueryCache; import org.elasticsearch.indices.IndicesRequestCache; import org.elasticsearch.indices.store.IndicesStore; +import org.elasticsearch.monitor.jvm.HotThreads; import org.elasticsearch.node.NodeMocksPlugin; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.plugins.NetworkPlugin; @@ -153,6 +155,7 @@ import org.junit.BeforeClass; import java.io.IOException; +import java.io.StringWriter; import java.lang.annotation.Annotation; import java.lang.annotation.ElementType; import java.lang.annotation.Inherited; @@ -181,6 +184,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; +import java.util.function.Predicate; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -860,7 +864,10 @@ public void waitNoPendingTasksOnAll() throws Exception { for (Client client : clients()) { ClusterHealthResponse clusterHealth = client.admin().cluster().prepareHealth().setLocal(true).get(); assertThat("client " + client + " still has in flight fetch", clusterHealth.getNumberOfInFlightFetch(), equalTo(0)); - PendingClusterTasksResponse pendingTasks = client.admin().cluster().preparePendingClusterTasks().setLocal(true).get(); + PendingClusterTasksResponse pendingTasks = client.execute( + TransportPendingClusterTasksAction.TYPE, + new PendingClusterTasksRequest().local(true) + ).get(); assertThat( "client " + client + " still has pending tasks " + pendingTasks, pendingTasks.pendingTasks(), @@ -971,17 +978,24 @@ private ClusterHealthStatus ensureColor( final var allocationExplainRef = new AtomicReference(); final var clusterStateRef = new AtomicReference(); final var pendingTasksRef = new AtomicReference(); - final var hotThreadsRef = new AtomicReference(); + final var hotThreadsRef = new AtomicReference(); final var detailsFuture = new PlainActionFuture(); try (var listeners = new RefCountingListener(detailsFuture)) { clusterAdmin().prepareAllocationExplain().execute(listeners.acquire(allocationExplainRef::set)); clusterAdmin().prepareState().execute(listeners.acquire(clusterStateRef::set)); - clusterAdmin().preparePendingClusterTasks().execute(listeners.acquire(pendingTasksRef::set)); - clusterAdmin().prepareNodesHotThreads() - .setThreads(9999) - .setIgnoreIdleThreads(false) - .execute(listeners.acquire(hotThreadsRef::set)); + client().execute( + TransportPendingClusterTasksAction.TYPE, + new PendingClusterTasksRequest(), + listeners.acquire(pendingTasksRef::set) + ); + try (var writer = new StringWriter()) { + new HotThreads().busiestThreads(9999).ignoreIdleThreads(false).detect(writer); + hotThreadsRef.set(writer.toString()); + } catch (Exception e) { + logger.error("exception capturing hot threads", e); + hotThreadsRef.set("exception capturing hot threads: " + e); + } } try { @@ -996,10 +1010,7 @@ private ClusterHealthStatus ensureColor( safeFormat(allocationExplainRef.get(), r -> Strings.toString(r.getExplanation(), true, true)), safeFormat(clusterStateRef.get(), r -> r.getState().toString()), safeFormat(pendingTasksRef.get(), r -> Strings.toString(r, true, true)), - safeFormat( - hotThreadsRef.get(), - r -> r.getNodes().stream().map(NodeHotThreads::getHotThreads).collect(Collectors.joining("\n")) - ) + hotThreadsRef.get() ); fail("timed out waiting for " + color + " state"); } @@ -1039,7 +1050,7 @@ public ClusterHealthStatus waitForRelocation(ClusterHealthStatus status) { "waitForRelocation timed out (status={}), cluster state:\n{}\n{}", status, clusterAdmin().prepareState().get().getState(), - clusterAdmin().preparePendingClusterTasks().get() + getClusterPendingTasks() ); assertThat("timed out waiting for relocation", actionGet.isTimedOut(), equalTo(false)); } @@ -1049,6 +1060,30 @@ public ClusterHealthStatus waitForRelocation(ClusterHealthStatus status) { return actionGet.getStatus(); } + public static PendingClusterTasksResponse getClusterPendingTasks() { + return getClusterPendingTasks(client()); + } + + public static PendingClusterTasksResponse getClusterPendingTasks(Client client) { + try { + return client.execute(TransportPendingClusterTasksAction.TYPE, new PendingClusterTasksRequest()).get(10, TimeUnit.SECONDS); + } catch (Exception e) { + return fail(e); + } + } + + protected void awaitClusterState(Predicate statePredicate) throws Exception { + awaitClusterState(logger, internalCluster().getMasterName(), statePredicate); + } + + public static void awaitClusterState(Logger logger, Predicate statePredicate) throws Exception { + awaitClusterState(logger, internalCluster().getMasterName(), statePredicate); + } + + public static void awaitClusterState(Logger logger, String viaNode, Predicate statePredicate) throws Exception { + ClusterServiceUtils.awaitClusterState(logger, statePredicate, internalCluster().getInstance(ClusterService.class, viaNode)); + } + /** * Waits until at least a give number of document is visible for searchers * @@ -1145,11 +1180,7 @@ public static DiscoveryNode waitAndGetHealthNode(InternalTestCluster internalClu * Prints the current cluster state as debug logging. */ public void logClusterState() { - logger.debug( - "cluster state:\n{}\n{}", - clusterAdmin().prepareState().get().getState(), - clusterAdmin().preparePendingClusterTasks().get() - ); + logger.debug("cluster state:\n{}\n{}", clusterAdmin().prepareState().get().getState(), getClusterPendingTasks()); } protected void ensureClusterSizeConsistency() { diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java index 1517571878fa2..0f3c3dd9b7263 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java @@ -13,8 +13,8 @@ import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.admin.indices.get.GetIndexResponse; -import org.elasticsearch.action.admin.indices.template.delete.DeleteComponentTemplateAction; -import org.elasticsearch.action.admin.indices.template.delete.DeleteComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.delete.TransportDeleteComponentTemplateAction; +import org.elasticsearch.action.admin.indices.template.delete.TransportDeleteComposableIndexTemplateAction; import org.elasticsearch.action.datastreams.DeleteDataStreamAction; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.support.DestructiveOperations; @@ -142,10 +142,10 @@ public void tearDown() throws Exception { throw e; } } - var deleteComposableIndexTemplateRequest = new DeleteComposableIndexTemplateAction.Request("*"); - assertAcked(client().execute(DeleteComposableIndexTemplateAction.INSTANCE, deleteComposableIndexTemplateRequest).actionGet()); - var deleteComponentTemplateRequest = new DeleteComponentTemplateAction.Request("*"); - assertAcked(client().execute(DeleteComponentTemplateAction.INSTANCE, deleteComponentTemplateRequest).actionGet()); + var deleteComposableIndexTemplateRequest = new TransportDeleteComposableIndexTemplateAction.Request("*"); + assertAcked(client().execute(TransportDeleteComposableIndexTemplateAction.TYPE, deleteComposableIndexTemplateRequest).actionGet()); + var deleteComponentTemplateRequest = new TransportDeleteComponentTemplateAction.Request("*"); + assertAcked(client().execute(TransportDeleteComponentTemplateAction.TYPE, deleteComponentTemplateRequest).actionGet()); assertAcked(indicesAdmin().prepareDelete("*").setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED_HIDDEN).get()); Metadata metadata = clusterAdmin().prepareState().get().getState().getMetadata(); assertThat( @@ -421,7 +421,7 @@ public ClusterHealthStatus ensureGreen(TimeValue timeout, String... indices) { logger.info( "ensureGreen timed out, cluster state:\n{}\n{}", clusterAdmin().prepareState().get().getState(), - clusterAdmin().preparePendingClusterTasks().get() + ESIntegTestCase.getClusterPendingTasks(client()) ); assertThat("timed out waiting for green state", actionGet.isTimedOut(), equalTo(false)); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index f8482a65bd92b..c072f5643a5cd 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -39,6 +39,8 @@ import org.apache.lucene.tests.util.TimeUnits; import org.apache.lucene.util.SetOnce; import org.elasticsearch.TransportVersion; +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.bootstrap.BootstrapForTesting; @@ -49,8 +51,6 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.CompositeBytesReference; -import org.elasticsearch.common.geo.GeoPoint; -import org.elasticsearch.common.geo.SpatialPoint; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; @@ -79,6 +79,7 @@ import org.elasticsearch.core.CheckedRunnable; import org.elasticsearch.core.PathUtils; import org.elasticsearch.core.PathUtilsForTesting; +import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.core.Strings; import org.elasticsearch.core.SuppressForbidden; @@ -1191,32 +1192,6 @@ public static String randomDateFormatterPattern() { return randomFrom(FormatNames.values()).getName(); } - /** - * Generate a random valid point constrained to geographic ranges (lat, lon ranges). - */ - public static SpatialPoint randomGeoPoint() { - return new GeoPoint(randomDoubleBetween(-90, 90, true), randomDoubleBetween(-180, 180, true)); - } - - /** - * Generate a random valid point constrained to cartesian ranges. - */ - public static SpatialPoint randomCartesianPoint() { - double x = randomDoubleBetween(-Float.MAX_VALUE, Float.MAX_VALUE, true); - double y = randomDoubleBetween(-Float.MAX_VALUE, Float.MAX_VALUE, true); - return new SpatialPoint() { - @Override - public double getX() { - return x; - } - - @Override - public double getY() { - return y; - } - }; - } - /** * helper to randomly perform on consumer with value */ @@ -1731,10 +1706,7 @@ protected final XContentParser createParser(XContent xContent, BytesReference da */ protected final XContentParser createParser(XContentParserConfiguration config, XContent xContent, BytesReference data) throws IOException { - if (data.hasArray()) { - return xContent.createParser(config, data.array(), data.arrayOffset(), data.length()); - } - return xContent.createParser(config, data.streamInput()); + return XContentHelper.createParserNotCompressed(config, data, xContent.type()); } protected final XContentParser createParserWithCompatibilityFor(XContent xContent, String data, RestApiVersion restApiVersion) @@ -2150,4 +2122,20 @@ public static T asInstanceOf(Class clazz, Object o) { assertThat(o, Matchers.instanceOf(clazz)); return (T) o; } + + public static T expectThrows(Class expectedType, ActionFuture future) { + return expectThrows( + expectedType, + "Expected exception " + expectedType.getSimpleName() + " but no exception was thrown", + () -> future.actionGet().decRef() // dec ref if we unexpectedly fail to not leak transport response + ); + } + + public static T expectThrows(Class expectedType, ActionRequestBuilder builder) { + return expectThrows( + expectedType, + "Expected exception " + expectedType.getSimpleName() + " but no exception was thrown", + () -> builder.get().decRef() // dec ref if we unexpectedly fail to not leak transport response + ); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/LambdaMatchers.java b/test/framework/src/main/java/org/elasticsearch/test/LambdaMatchers.java index c41668c6541f2..674d3e87a008b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/LambdaMatchers.java +++ b/test/framework/src/main/java/org/elasticsearch/test/LambdaMatchers.java @@ -18,6 +18,7 @@ import java.util.Iterator; import java.util.List; import java.util.function.Function; +import java.util.function.Predicate; public class LambdaMatchers { @@ -188,4 +189,73 @@ public void describeTo(Description description) { public static Matcher transformedArrayItemsMatch(Function function, Matcher matcher) { return new ArrayTransformMatcher<>(matcher, function); } + + private static class PredicateMatcher extends BaseMatcher> { + final T item; + + private PredicateMatcher(T item) { + this.item = item; + } + + @Override + @SuppressWarnings({ "rawtypes" }) + public boolean matches(Object actual) { + Predicate p = (Predicate) actual; + try { + return predicateMatches(p); + } catch (ClassCastException e) { + return false; + } + } + + @SuppressWarnings({ "rawtypes", "unchecked" }) + protected boolean predicateMatches(Predicate predicate) { + return predicate.test(item); + } + + @Override + @SuppressWarnings({ "rawtypes", "unchecked" }) + public void describeMismatch(Object item, Description description) { + Predicate p = (Predicate) item; + try { + boolean result = p.test(this.item); + description.appendText("predicate with argument ").appendValue(this.item).appendText(" evaluated to ").appendValue(result); + } catch (ClassCastException e) { + description.appendText("predicate did not accept argument of type ") + .appendValue(this.item.getClass()) + .appendText(" (") + .appendText(e.getMessage()) + .appendText(")"); + } + } + + @Override + public void describeTo(Description description) { + description.appendText("predicate evaluates to with argument ").appendValue(item); + } + } + + public static Matcher> trueWith(T item) { + return new PredicateMatcher<>(item); + } + + private static class PredicateFalseMatcher extends PredicateMatcher { + private PredicateFalseMatcher(T item) { + super(item); + } + + @SuppressWarnings({ "rawtypes", "unchecked" }) + protected boolean predicateMatches(Predicate predicate) { + return predicate.test(item) == false; + } + + @Override + public void describeTo(Description description) { + description.appendText("predicate evaluates to with argument ").appendValue(item); + } + } + + public static Matcher> falseWith(T item) { + return new PredicateFalseMatcher<>(item); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java index e13773443d4a6..374854626703d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java @@ -12,8 +12,8 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.admin.indices.template.delete.DeleteComponentTemplateAction; -import org.elasticsearch.action.admin.indices.template.delete.DeleteComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.delete.TransportDeleteComponentTemplateAction; +import org.elasticsearch.action.admin.indices.template.delete.TransportDeleteComposableIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.get.GetComponentTemplateAction; import org.elasticsearch.action.admin.indices.template.get.GetComposableIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; @@ -268,8 +268,8 @@ public void wipeAllComposableIndexTemplates(Set excludeTemplates) { .toArray(String[]::new); if (templates.length != 0) { - var request = new DeleteComposableIndexTemplateAction.Request(templates); - assertAcked(client().execute(DeleteComposableIndexTemplateAction.INSTANCE, request).actionGet()); + var request = new TransportDeleteComposableIndexTemplateAction.Request(templates); + assertAcked(client().execute(TransportDeleteComposableIndexTemplateAction.TYPE, request).actionGet()); } } } @@ -285,8 +285,8 @@ public void wipeAllComponentTemplates(Set excludeTemplates) { .toArray(String[]::new); if (templates.length != 0) { - var request = new DeleteComponentTemplateAction.Request(templates); - assertAcked(client().execute(DeleteComponentTemplateAction.INSTANCE, request).actionGet()); + var request = new TransportDeleteComponentTemplateAction.Request(templates); + assertAcked(client().execute(TransportDeleteComponentTemplateAction.TYPE, request).actionGet()); } } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/client/RandomizingClient.java b/test/framework/src/main/java/org/elasticsearch/test/client/RandomizingClient.java index 5392986c25507..3adf92e30e15d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/client/RandomizingClient.java +++ b/test/framework/src/main/java/org/elasticsearch/test/client/RandomizingClient.java @@ -14,9 +14,11 @@ import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.client.internal.Client; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.client.internal.FilterClient; import org.elasticsearch.cluster.routing.Preference; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.search.builder.PointInTimeBuilder; import java.util.Arrays; import java.util.Random; @@ -59,7 +61,7 @@ public RandomizingClient(Client client, Random random) { @Override public SearchRequestBuilder prepareSearch(String... indices) { - SearchRequestBuilder searchRequestBuilder = in.prepareSearch(indices) + SearchRequestBuilder searchRequestBuilder = new RandomizedSearchRequestBuilder(this).setIndices(indices) .setSearchType(defaultSearchType) .setPreference(defaultPreference) .setBatchedReduceSize(batchedReduceSize); @@ -84,4 +86,18 @@ public Client in() { return super.in(); } + private class RandomizedSearchRequestBuilder extends SearchRequestBuilder { + RandomizedSearchRequestBuilder(ElasticsearchClient client) { + super(client); + } + + @Override + public SearchRequestBuilder setPointInTime(PointInTimeBuilder pointInTimeBuilder) { + if (defaultPreference != null) { + setPreference(null); + } + return super.setPointInTime(pointInTimeBuilder); + } + } + } diff --git a/test/framework/src/main/java/org/elasticsearch/test/gateway/TestGatewayAllocator.java b/test/framework/src/main/java/org/elasticsearch/test/gateway/TestGatewayAllocator.java index b68f080eb958f..aad4c45693f67 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/gateway/TestGatewayAllocator.java +++ b/test/framework/src/main/java/org/elasticsearch/test/gateway/TestGatewayAllocator.java @@ -29,6 +29,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.function.Predicate; import java.util.stream.Collectors; import static org.elasticsearch.cluster.routing.RoutingNodesHelper.assignedShardsIn; @@ -121,7 +122,7 @@ public void applyFailedShards(List failedShards, RoutingAllocation public void beforeAllocation(RoutingAllocation allocation) {} @Override - public void afterPrimariesBeforeReplicas(RoutingAllocation allocation) {} + public void afterPrimariesBeforeReplicas(RoutingAllocation allocation, Predicate isRelevantShardPredicate) {} @Override public void allocateUnassigned( diff --git a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java index 0d7ab26faecf9..dc48868703bbb 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java +++ b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java @@ -35,12 +35,14 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexTemplateMetadata; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.suggest.Suggest; +import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.NotEqualMessageBuilder; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -178,7 +180,7 @@ public static void assertBlocked(BaseBroadcastResponse replicatedBroadcastRespon * @param expectedBlockId the expected block id */ public static void assertBlocked(final ActionRequestBuilder builder, @Nullable final Integer expectedBlockId) { - var e = expectThrows(ClusterBlockException.class, builder::get); + var e = ESTestCase.expectThrows(ClusterBlockException.class, builder); assertThat(e.blocks(), not(empty())); RestStatus status = checkRetryableBlock(e.blocks()) ? RestStatus.TOO_MANY_REQUESTS : RestStatus.FORBIDDEN; assertThat(e.status(), equalTo(status)); @@ -686,13 +688,6 @@ public static T assertBooleanSubQuery(Query query, Class su return subqueryType.cast(q.clauses().get(i).getQuery()); } - /** - * Run the request from a given builder and check that it throws an exception of the right type - */ - public static void assertRequestBuilderThrows(ActionRequestBuilder builder, Class exceptionClass) { - assertFutureThrows(builder.execute(), exceptionClass); - } - /** * Run the request from a given builder and check that it throws an exception of the right type, with a given {@link RestStatus} */ @@ -820,11 +815,16 @@ public static void assertToXContentEquivalent(BytesReference expected, BytesRefe // Note that byte[] holding binary values need special treatment as they need to be properly compared item per item. Map actualMap = null; Map expectedMap = null; - try (XContentParser actualParser = xContentType.xContent().createParser(XContentParserConfiguration.EMPTY, actual.streamInput())) { + try ( + XContentParser actualParser = XContentHelper.createParserNotCompressed(XContentParserConfiguration.EMPTY, actual, xContentType) + ) { actualMap = actualParser.map(); try ( - XContentParser expectedParser = xContentType.xContent() - .createParser(XContentParserConfiguration.EMPTY, expected.streamInput()) + XContentParser expectedParser = XContentHelper.createParserNotCompressed( + XContentParserConfiguration.EMPTY, + expected, + xContentType + ) ) { expectedMap = expectedParser.map(); try { diff --git a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/OptionalMatchers.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/OptionalMatchers.java index 333b8dea76ce2..1cd92296a4ec7 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/OptionalMatchers.java +++ b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/OptionalMatchers.java @@ -8,66 +8,79 @@ package org.elasticsearch.test.hamcrest; +import org.hamcrest.BaseMatcher; import org.hamcrest.Description; +import org.hamcrest.Matcher; import org.hamcrest.TypeSafeMatcher; import java.util.Optional; +import static org.hamcrest.Matchers.anything; +import static org.hamcrest.Matchers.equalTo; + public class OptionalMatchers { private static class IsEmptyMatcher extends TypeSafeMatcher> { - @Override - protected boolean matchesSafely(final Optional item) { - // noinspection OptionalAssignedToNull - return item != null && item.isEmpty(); + protected boolean matchesSafely(Optional item) { + return item.isEmpty(); } @Override - public void describeTo(final Description description) { - description.appendText("expected empty optional"); + protected void describeMismatchSafely(Optional item, Description mismatchDescription) { + mismatchDescription.appendText("a non-empty optional ").appendValue(item.get()); } @Override - protected void describeMismatchSafely(final Optional item, final Description mismatchDescription) { - if (item == null) { - mismatchDescription.appendText("was null"); - } else { - mismatchDescription.appendText("was ").appendText(item.toString()); - } + public void describeTo(Description description) { + description.appendText("an empty optional"); } - } - public static IsEmptyMatcher isEmpty() { + public static Matcher> isEmpty() { return new IsEmptyMatcher(); } - private static class IsPresentMatcher extends TypeSafeMatcher> { + private static class IsPresentMatcher extends BaseMatcher> { + private final Matcher contents; + + private IsPresentMatcher(Matcher contents) { + this.contents = contents; + } @Override - protected boolean matchesSafely(final Optional item) { - return item != null && item.isPresent(); + public boolean matches(Object actual) { + Optional opt = (Optional) actual; + return opt.isPresent() && contents.matches(opt.get()); } @Override - public void describeTo(final Description description) { - description.appendText("expected non-empty optional"); + public void describeTo(Description description) { + description.appendText("a non-empty optional ").appendDescriptionOf(contents); } @Override - protected void describeMismatchSafely(final Optional item, final Description mismatchDescription) { - if (item == null) { - mismatchDescription.appendText("was null"); - } else { - mismatchDescription.appendText("was empty"); + public void describeMismatch(Object item, Description description) { + Optional opt = (Optional) item; + if (opt.isEmpty()) { + description.appendText("an empty optional"); + return; } + + description.appendText("an optional "); + contents.describeMismatch(opt.get(), description); } + } + public static Matcher> isPresent() { + return new IsPresentMatcher<>(anything()); } - public static IsPresentMatcher isPresent() { - return new IsPresentMatcher(); + public static Matcher> isPresentWith(T contents) { + return new IsPresentMatcher<>(equalTo(contents)); } + public static Matcher> isPresentWith(Matcher contents) { + return new IsPresentMatcher<>(contents); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index 98a1bad5dda77..94b1d4ab321ee 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -8,6 +8,8 @@ package org.elasticsearch.test.rest; +import io.netty.handler.codec.http.HttpMethod; + import org.apache.http.Header; import org.apache.http.HttpHost; import org.apache.http.HttpStatus; @@ -15,6 +17,8 @@ import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.InputStreamEntity; import org.apache.http.message.BasicHeader; import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy; import org.apache.http.ssl.SSLContextBuilder; @@ -68,7 +72,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.DeprecationHandler; import org.elasticsearch.xcontent.NamedXContentRegistry; -import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; @@ -107,8 +111,8 @@ import java.util.Objects; import java.util.Optional; import java.util.Set; -import java.util.TreeSet; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; import java.util.function.Predicate; import java.util.function.Supplier; @@ -122,6 +126,8 @@ import static org.elasticsearch.client.RestClient.IGNORE_RESPONSE_CODES_PARAM; import static org.elasticsearch.cluster.ClusterState.VERSION_INTRODUCING_TRANSPORT_VERSIONS; import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.test.rest.TestFeatureService.ALL_FEATURES; +import static org.elasticsearch.xcontent.ToXContent.EMPTY_PARAMS; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -216,8 +222,26 @@ public enum ProductFeature { } private static EnumSet availableFeatures; - private static Set nodeVersions; - private static TestFeatureService testFeatureService; + private static Set nodesVersions; + private static TestFeatureService testFeatureService = ALL_FEATURES; + + protected static Set getCachedNodesVersions() { + assert nodesVersions != null; + return nodesVersions; + } + + protected static Set readVersionsFromNodesInfo(RestClient adminClient) throws IOException { + return getNodesInfo(adminClient).values().stream().map(nodeInfo -> nodeInfo.get("version").toString()).collect(Collectors.toSet()); + } + + protected static Map> getNodesInfo(RestClient adminClient) throws IOException { + Map response = entityAsMap(adminClient.performRequest(new Request("GET", "_nodes/plugins"))); + Map nodes = (Map) response.get("nodes"); + + return nodes.entrySet() + .stream() + .collect(Collectors.toUnmodifiableMap(entry -> entry.getKey().toString(), entry -> (Map) entry.getValue())); + } protected static boolean clusterHasFeature(String featureId) { return testFeatureService.clusterHasFeature(featureId); @@ -233,24 +257,20 @@ public void initClient() throws IOException { assert adminClient == null; assert clusterHosts == null; assert availableFeatures == null; - assert nodeVersions == null; - assert testFeatureService == null; + assert nodesVersions == null; + assert testFeatureService == ALL_FEATURES; clusterHosts = parseClusterHosts(getTestRestCluster()); logger.info("initializing REST clients against {}", clusterHosts); client = buildClient(restClientSettings(), clusterHosts.toArray(new HttpHost[clusterHosts.size()])); adminClient = buildClient(restAdminSettings(), clusterHosts.toArray(new HttpHost[clusterHosts.size()])); availableFeatures = EnumSet.of(ProductFeature.LEGACY_TEMPLATES); - nodeVersions = new TreeSet<>(); - var semanticNodeVersions = new HashSet(); + Set versions = new HashSet<>(); boolean serverless = false; - Map response = entityAsMap(adminClient.performRequest(new Request("GET", "_nodes/plugins"))); - Map nodes = (Map) response.get("nodes"); - for (Map.Entry node : nodes.entrySet()) { - Map nodeInfo = (Map) node.getValue(); + + for (Map nodeInfo : getNodesInfo(adminClient).values()) { var nodeVersion = nodeInfo.get("version").toString(); - nodeVersions.add(nodeVersion); - parseLegacyVersion(nodeVersion).map(semanticNodeVersions::add); + versions.add(nodeVersion); for (Object module : (List) nodeInfo.get("modules")) { Map moduleInfo = (Map) module; final String moduleName = moduleInfo.get("name").toString(); @@ -289,29 +309,41 @@ public void initClient() throws IOException { ); } } + nodesVersions = Collections.unmodifiableSet(versions); + var semanticNodeVersions = nodesVersions.stream() + .map(ESRestTestCase::parseLegacyVersion) + .flatMap(Optional::stream) + .collect(Collectors.toSet()); assert semanticNodeVersions.isEmpty() == false || serverless; - // Historical features information is unavailable when using legacy test plugins - boolean hasHistoricalFeaturesInformation = System.getProperty("tests.features.metadata.path") != null; - var providers = hasHistoricalFeaturesInformation - ? List.of(new RestTestLegacyFeatures(), new ESRestTestCaseHistoricalFeatures()) - : List.of(new RestTestLegacyFeatures()); - - testFeatureService = new TestFeatureService( - hasHistoricalFeaturesInformation, - providers, - semanticNodeVersions, - ClusterFeatures.calculateAllNodeFeatures(getClusterStateFeatures().values()) - ); + testFeatureService = createTestFeatureService(getClusterStateFeatures(adminClient), semanticNodeVersions); } - assert testFeatureService != null; + assert testFeatureService != ALL_FEATURES; assert client != null; assert adminClient != null; assert clusterHosts != null; assert availableFeatures != null; - assert nodeVersions != null; + assert nodesVersions != null; + } + + protected static TestFeatureService createTestFeatureService( + Map> clusterStateFeatures, + Set semanticNodeVersions + ) { + // Historical features information is unavailable when using legacy test plugins + boolean hasHistoricalFeaturesInformation = System.getProperty("tests.features.metadata.path") != null; + var providers = hasHistoricalFeaturesInformation + ? List.of(new RestTestLegacyFeatures(), new ESRestTestCaseHistoricalFeatures()) + : List.of(new RestTestLegacyFeatures()); + + return new ESRestTestFeatureService( + hasHistoricalFeaturesInformation, + providers, + semanticNodeVersions, + ClusterFeatures.calculateAllNodeFeatures(clusterStateFeatures.values()) + ); } protected static boolean has(ProductFeature feature) { @@ -415,7 +447,7 @@ private boolean isExclusivelyTargetingCurrentVersionCluster() { public static RequestOptions expectVersionSpecificWarnings(Consumer expectationsSetter) { Builder builder = RequestOptions.DEFAULT.toBuilder(); - VersionSensitiveWarningsHandler warningsHandler = new VersionSensitiveWarningsHandler(new HashSet<>(nodeVersions)); + VersionSensitiveWarningsHandler warningsHandler = new VersionSensitiveWarningsHandler(getCachedNodesVersions()); expectationsSetter.accept(warningsHandler); builder.setWarningsHandler(warningsHandler); return builder.build(); @@ -484,8 +516,8 @@ public static void closeClients() throws IOException { client = null; adminClient = null; availableFeatures = null; - nodeVersions = null; - testFeatureService = null; + nodesVersions = null; + testFeatureService = ALL_FEATURES; } } @@ -690,8 +722,8 @@ protected Set preserveILMPolicyIds() { "logs@lifecycle", "metrics", "metrics@lifecycle", - "profiling", - "profiling@lifecycle", + "profiling-60-days", + "profiling-60-days@lifecycle", "synthetics", "synthetics@lifecycle", "7-days-default", @@ -1128,27 +1160,25 @@ protected void deleteRepository(String repoName) throws IOException { private static void wipeClusterSettings() throws IOException { Map getResponse = entityAsMap(adminClient().performRequest(new Request("GET", "/_cluster/settings"))); - boolean mustClear = false; - XContentBuilder clearCommand = JsonXContent.contentBuilder(); - clearCommand.startObject(); - for (Map.Entry entry : getResponse.entrySet()) { - String type = entry.getKey().toString(); - Map settings = (Map) entry.getValue(); - if (settings.isEmpty()) { - continue; - } - mustClear = true; - clearCommand.startObject(type); - for (Object key : settings.keySet()) { - clearCommand.field(key + ".*").nullValue(); + final var mustClear = new AtomicBoolean(); + final var request = newXContentRequest(HttpMethod.PUT, "/_cluster/settings", (clearCommand, params) -> { + for (Map.Entry entry : getResponse.entrySet()) { + String type = entry.getKey().toString(); + Map settings = (Map) entry.getValue(); + if (settings.isEmpty()) { + continue; + } + mustClear.set(true); + clearCommand.startObject(type); + for (Object key : settings.keySet()) { + clearCommand.field(key + ".*").nullValue(); + } + clearCommand.endObject(); } - clearCommand.endObject(); - } - clearCommand.endObject(); - - if (mustClear) { - Request request = new Request("PUT", "/_cluster/settings"); + return clearCommand; + }); + if (mustClear.get()) { request.setOptions(RequestOptions.DEFAULT.toBuilder().setWarningsHandler(warnings -> { if (warnings.isEmpty()) { return false; @@ -1158,8 +1188,6 @@ private static void wipeClusterSettings() throws IOException { return warnings.get(0).contains("xpack.monitoring") == false; } })); - - request.setJsonEntity(Strings.toString(clearCommand)); adminClient().performRequest(request); } } @@ -1228,7 +1256,9 @@ protected static RefreshResponse refresh(String index) throws IOException { protected static RefreshResponse refresh(RestClient client, String index) throws IOException { Request refreshRequest = new Request("POST", "/" + index + "/_refresh"); Response response = client.performRequest(refreshRequest); - return RefreshResponse.fromXContent(responseAsParser(response)); + try (var parser = responseAsParser(response)) { + return RefreshResponse.fromXContent(parser); + } } private static void waitForPendingRollupTasks() throws Exception { @@ -1555,11 +1585,12 @@ public static void updateClusterSettings(Settings settings) throws IOException { * Updates the cluster with the provided settings (as persistent settings) **/ public static void updateClusterSettings(RestClient client, Settings settings) throws IOException { - Request request = new Request("PUT", "/_cluster/settings"); - String entity = "{ \"persistent\":" + Strings.toString(settings) + "}"; - request.setJsonEntity(entity); - Response response = client.performRequest(request); - assertOK(response); + final var request = newXContentRequest(HttpMethod.PUT, "/_cluster/settings", (builder, params) -> { + builder.startObject("persistent"); + settings.toXContent(builder, params); + return builder.endObject(); + }); + assertOK(client.performRequest(request)); } /** @@ -1658,34 +1689,44 @@ protected static CreateIndexResponse createIndex(String name, Settings settings, public static CreateIndexResponse createIndex(RestClient client, String name, Settings settings, String mapping, String aliases) throws IOException { - Request request = new Request("PUT", "/" + name); - String entity = "{"; - if (settings != null) { - entity += "\"settings\": " + Strings.toString(settings); - if (settings.getAsBoolean(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) == false) { - expectSoftDeletesWarning(request, name); - } - } - if (mapping != null) { + + final Request request = newXContentRequest(HttpMethod.PUT, "/" + name, (builder, params) -> { if (settings != null) { - entity += ","; + builder.startObject("settings"); + settings.toXContent(builder, params); + builder.endObject(); } - if (mapping.trim().startsWith("{")) { - entity += "\"mappings\" : " + mapping + ""; - } else { - entity += "\"mappings\" : {" + mapping + "}"; + + if (mapping != null) { + try ( + var mappingParser = XContentType.JSON.xContent() + .createParser(XContentParserConfiguration.EMPTY, mapping.trim().startsWith("{") ? mapping : '{' + mapping + '}') + ) { + builder.field("mappings"); + builder.copyCurrentStructure(mappingParser); + } } - } - if (aliases != null) { - if (settings != null || mapping != null) { - entity += ","; + + if (aliases != null) { + try ( + var aliasesParser = XContentType.JSON.xContent().createParser(XContentParserConfiguration.EMPTY, '{' + aliases + '}') + ) { + builder.field("aliases"); + builder.copyCurrentStructure(aliasesParser); + } } - entity += "\"aliases\": {" + aliases + "}"; + + return builder; + }); + + if (settings != null && settings.getAsBoolean(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) == false) { + expectSoftDeletesWarning(request, name); + } + + final Response response = client.performRequest(request); + try (var parser = responseAsParser(response)) { + return CreateIndexResponse.fromXContent(parser); } - entity += "}"; - request.setJsonEntity(entity); - Response response = client.performRequest(request); - return CreateIndexResponse.fromXContent(responseAsParser(response)); } protected static AcknowledgedResponse deleteIndex(String name) throws IOException { @@ -1695,7 +1736,9 @@ protected static AcknowledgedResponse deleteIndex(String name) throws IOExceptio protected static AcknowledgedResponse deleteIndex(RestClient restClient, String name) throws IOException { Request request = new Request("DELETE", "/" + name); Response response = restClient.performRequest(request); - return AcknowledgedResponse.fromXContent(responseAsParser(response)); + try (var parser = responseAsParser(response)) { + return AcknowledgedResponse.fromXContent(parser); + } } protected static void updateIndexSettings(String index, Settings.Builder settings) throws IOException { @@ -1703,9 +1746,8 @@ protected static void updateIndexSettings(String index, Settings.Builder setting } private static void updateIndexSettings(String index, Settings settings) throws IOException { - Request request = new Request("PUT", "/" + index + "/_settings"); - request.setJsonEntity(Strings.toString(settings)); - client().performRequest(request); + final var request = newXContentRequest(HttpMethod.PUT, "/" + index + "/_settings", settings); + assertOK(client.performRequest(request)); } protected static void expectSoftDeletesWarning(Request request, String indexName) throws IOException { @@ -1729,7 +1771,11 @@ protected static Map getIndexSettings(String index) throws IOExc request.addParameter("flat_settings", "true"); Response response = client().performRequest(request); try (InputStream is = response.getEntity().getContent()) { - return XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); + return XContentHelper.convertToMap( + XContentType.fromMediaType(response.getEntity().getContentType().getValue()).xContent(), + is, + true + ); } } @@ -1818,8 +1864,12 @@ protected static Map responseAsMap(Response response) throws IOE return responseEntity; } - protected static XContentParser responseAsParser(Response response) throws IOException { - return XContentHelper.createParser(XContentParserConfiguration.EMPTY, responseAsBytes(response), XContentType.JSON); + public static XContentParser responseAsParser(Response response) throws IOException { + return XContentHelper.createParser( + XContentParserConfiguration.EMPTY, + responseAsBytes(response), + XContentType.fromMediaType(response.getEntity().getContentType().getValue()) + ); } protected static BytesReference responseAsBytes(Response response) throws IOException { @@ -1832,9 +1882,13 @@ protected static void registerRepository(String repository, String type, boolean protected static void registerRepository(RestClient restClient, String repository, String type, boolean verify, Settings settings) throws IOException { - final Request request = new Request(HttpPut.METHOD_NAME, "_snapshot/" + repository); + + final Request request = newXContentRequest( + HttpMethod.PUT, + "/_snapshot/" + repository, + new PutRepositoryRequest(repository).type(type).settings(settings) + ); request.addParameter("verify", Boolean.toString(verify)); - request.setJsonEntity(Strings.toString(new PutRepositoryRequest(repository).type(type).settings(settings))); final Response response = restClient.performRequest(request); assertAcked("Failed to create repository [" + repository + "] of type [" + type + "]: " + response, response); @@ -1941,10 +1995,12 @@ protected static boolean isXPackTemplate(String name) { || name.startsWith("logs-apm")) { return true; } + if (name.startsWith(".slm-history") || name.startsWith("ilm-history")) { + return true; + } switch (name) { case ".watches": case "security_audit_log": - case ".slm-history": case ".async-search": case ".profiling-ilm-lock": // TODO: Remove after switch to K/V indices case "saml-service-provider": @@ -1959,7 +2015,6 @@ protected static boolean isXPackTemplate(String name) { case "synthetics-settings": case "synthetics-mappings": case ".snapshot-blob-cache": - case "ilm-history": case "logstash-index-template": case "security-index-template": case "data-streams-mappings": @@ -2064,11 +2119,11 @@ public void ensurePeerRecoveryRetentionLeasesRenewedAndSynced(String index) thro }, 60, TimeUnit.SECONDS); } - private static Map> getClusterStateFeatures() throws IOException { + protected static Map> getClusterStateFeatures(RestClient adminClient) throws IOException { final Request request = new Request("GET", "_cluster/state"); request.addParameter("filter_path", "nodes_features"); - final Response response = adminClient().performRequest(request); + final Response response = adminClient.performRequest(request); var responseData = responseAsMap(response); if (responseData.get("nodes_features") instanceof List nodesFeatures) { @@ -2157,7 +2212,7 @@ protected static TransportVersion getTransportVersionWithFallback( return fallbackSupplier.get(); } - protected static Optional parseLegacyVersion(String version) { + public static Optional parseLegacyVersion(String version) { var semanticVersionMatcher = SEMANTIC_VERSION_PATTERN.matcher(version); if (semanticVersionMatcher.matches()) { return Optional.of(Version.fromString(semanticVersionMatcher.group(1))); @@ -2260,15 +2315,11 @@ protected FieldCapabilitiesResponse fieldCaps( request.addParameter("filters", fieldFilters); } if (indexFilter != null) { - XContentBuilder body = JsonXContent.contentBuilder(); - body.startObject(); - body.field("index_filter", indexFilter); - body.endObject(); - request.setJsonEntity(Strings.toString(body)); + addXContentBody(request, (body, params) -> body.field("index_filter", indexFilter)); } Response response = restClient.performRequest(request); assertOK(response); - try (XContentParser parser = createParser(JsonXContent.jsonXContent, response.getEntity().getContent())) { + try (XContentParser parser = responseAsParser(response)) { return FieldCapabilitiesResponse.fromXContent(parser); } } @@ -2324,4 +2375,38 @@ public static void setIgnoredErrorResponseCodes(Request request, RestStatus... r Arrays.stream(restStatuses).map(restStatus -> Integer.toString(restStatus.getStatus())).collect(Collectors.joining(",")) ); } + + private static XContentType randomSupportedContentType() { + if (clusterHasFeature(RestTestLegacyFeatures.SUPPORTS_TRUE_BINARY_RESPONSES) == false) { + // Very old versions encode binary stored fields using base64 in all formats, not just JSON, but we expect to see raw binary + // fields in non-JSON formats, so we stick to JSON in these cases. + return XContentType.JSON; + } + + if (clusterHasFeature(RestTestLegacyFeatures.SUPPORTS_VENDOR_XCONTENT_TYPES) == false) { + // The VND_* formats were introduced part-way through the 7.x series for compatibility with 8.x, but are not supported by older + // 7.x versions. + return randomFrom(XContentType.JSON, XContentType.CBOR, XContentType.YAML, XContentType.SMILE); + } + + return randomFrom(XContentType.values()); + } + + public static void addXContentBody(Request request, ToXContent body) throws IOException { + final var xContentType = randomSupportedContentType(); + final var bodyBytes = XContentHelper.toXContent(body, xContentType, EMPTY_PARAMS, randomBoolean()); + request.setEntity( + new InputStreamEntity( + bodyBytes.streamInput(), + bodyBytes.length(), + ContentType.create(xContentType.mediaTypeWithoutParameters()) + ) + ); + } + + public static Request newXContentRequest(HttpMethod method, String endpoint, ToXContent body) throws IOException { + final var request = new Request(method.name(), endpoint); + addXContentBody(request, body); + return request; + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestFeatureService.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestFeatureService.java new file mode 100644 index 0000000000000..5bb22058e4688 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestFeatureService.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.test.rest; + +import org.elasticsearch.Version; +import org.elasticsearch.core.Strings; +import org.elasticsearch.features.FeatureData; +import org.elasticsearch.features.FeatureSpecification; + +import java.util.Collection; +import java.util.List; +import java.util.NavigableMap; +import java.util.Set; +import java.util.function.Predicate; + +class ESRestTestFeatureService implements TestFeatureService { + private final Predicate historicalFeaturesPredicate; + private final Set clusterStateFeatures; + + ESRestTestFeatureService( + boolean hasHistoricalFeaturesInformation, + List specs, + Collection nodeVersions, + Set clusterStateFeatures + ) { + var minNodeVersion = nodeVersions.stream().min(Version::compareTo); + var featureData = FeatureData.createFromSpecifications(specs); + var historicalFeatures = featureData.getHistoricalFeatures(); + var allHistoricalFeatures = historicalFeatures.lastEntry() == null ? Set.of() : historicalFeatures.lastEntry().getValue(); + + var errorMessage = Strings.format( + hasHistoricalFeaturesInformation + ? "Check the feature has been added to the correct FeatureSpecification in the relevant module or, if this is a " + + "legacy feature used only in tests, to a test-only FeatureSpecification such as %s." + : "This test is running on the legacy test framework; historical features from production code will not be available. " + + "You need to port the test to the new test plugins in order to use historical features from production code. " + + "If this is a legacy feature used only in tests, you can add it to a test-only FeatureSpecification such as %s.", + RestTestLegacyFeatures.class.getCanonicalName() + ); + this.historicalFeaturesPredicate = minNodeVersion.>map(v -> featureId -> { + assert allHistoricalFeatures.contains(featureId) : Strings.format("Unknown historical feature %s: %s", featureId, errorMessage); + return hasHistoricalFeature(historicalFeatures, v, featureId); + }).orElse(featureId -> { + // We can safely assume that new non-semantic versions (serverless) support all historical features + assert allHistoricalFeatures.contains(featureId) : Strings.format("Unknown historical feature %s: %s", featureId, errorMessage); + return true; + }); + this.clusterStateFeatures = clusterStateFeatures; + } + + private static boolean hasHistoricalFeature(NavigableMap> historicalFeatures, Version version, String featureId) { + var features = historicalFeatures.floorEntry(version); + return features != null && features.getValue().contains(featureId); + } + + public boolean clusterHasFeature(String featureId) { + if (clusterStateFeatures.contains(featureId)) { + return true; + } + return historicalFeaturesPredicate.test(featureId); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java b/test/framework/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java index bcd5867239f90..726d2ec0d963d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java @@ -9,7 +9,7 @@ package org.elasticsearch.test.rest; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.ListenableActionFuture; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; @@ -150,7 +150,7 @@ public Exception getInboundException() { public static class FakeHttpChannel implements HttpChannel { private final InetSocketAddress remoteAddress; - private final ListenableActionFuture closeFuture = new ListenableActionFuture<>(); + private final SubscribableListener closeFuture = new SubscribableListener<>(); public FakeHttpChannel(InetSocketAddress remoteAddress) { this.remoteAddress = remoteAddress; diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ObjectPath.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ObjectPath.java index dcd6f0e7f2e26..222296895da0f 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ObjectPath.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ObjectPath.java @@ -11,6 +11,7 @@ import org.elasticsearch.client.Response; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.xcontent.XContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; @@ -38,7 +39,7 @@ public static ObjectPath createFromResponse(Response response) throws IOExceptio } public static ObjectPath createFromXContent(XContent xContent, BytesReference input) throws IOException { - try (XContentParser parser = xContent.createParser(XContentParserConfiguration.EMPTY, input.streamInput())) { + try (XContentParser parser = XContentHelper.createParserNotCompressed(XContentParserConfiguration.EMPTY, input, xContent.type())) { if (parser.nextToken() == XContentParser.Token.START_ARRAY) { return new ObjectPath(parser.listOrderedMap()); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestLegacyFeatures.java b/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestLegacyFeatures.java index ca8f339026b6b..fcd2f781ec58d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestLegacyFeatures.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestLegacyFeatures.java @@ -39,6 +39,10 @@ public class RestTestLegacyFeatures implements FeatureSpecification { "indices.delete_template_multiple_names_supported" ); public static final NodeFeature ML_NEW_MEMORY_FORMAT = new NodeFeature("ml.new_memory_format"); + @UpdateForV9 + public static final NodeFeature SUPPORTS_VENDOR_XCONTENT_TYPES = new NodeFeature("rest.supports_vendor_xcontent_types"); + @UpdateForV9 + public static final NodeFeature SUPPORTS_TRUE_BINARY_RESPONSES = new NodeFeature("rest.supports_true_binary_responses"); /** These are "pure test" features: normally we would not need them, and test for TransportVersion/fallback to Version (see for example * {@code ESRestTestCase#minimumTransportVersion()}. However, some tests explicitly check and validate the content of a response, so @@ -88,6 +92,8 @@ public Map getHistoricalFeatures() { entry(SECURITY_UPDATE_API_KEY, Version.V_8_4_0), entry(SECURITY_BULK_UPDATE_API_KEY, Version.V_8_5_0), entry(ML_NEW_MEMORY_FORMAT, Version.V_8_11_0), + entry(SUPPORTS_VENDOR_XCONTENT_TYPES, Version.V_7_11_0), + entry(SUPPORTS_TRUE_BINARY_RESPONSES, Version.V_7_7_0), entry(TRANSPORT_VERSION_SUPPORTED, VERSION_INTRODUCING_TRANSPORT_VERSIONS), entry(STATE_REPLACED_TRANSPORT_VERSION_WITH_NODES_VERSION, Version.V_8_11_0), entry(ML_MEMORY_OVERHEAD_FIXED, Version.V_8_2_1), diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/TestFeatureService.java b/test/framework/src/main/java/org/elasticsearch/test/rest/TestFeatureService.java index 1f7a48add1f1c..9de1fcf631520 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/TestFeatureService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/TestFeatureService.java @@ -8,58 +8,8 @@ package org.elasticsearch.test.rest; -import org.elasticsearch.Version; -import org.elasticsearch.core.Strings; -import org.elasticsearch.features.FeatureData; -import org.elasticsearch.features.FeatureSpecification; +public interface TestFeatureService { + boolean clusterHasFeature(String featureId); -import java.util.Collection; -import java.util.List; -import java.util.NavigableMap; -import java.util.Set; -import java.util.function.Predicate; - -class TestFeatureService { - private final Predicate historicalFeaturesPredicate; - private final Set clusterStateFeatures; - - TestFeatureService( - boolean hasHistoricalFeaturesInformation, - List specs, - Collection nodeVersions, - Set clusterStateFeatures - ) { - var minNodeVersion = nodeVersions.stream().min(Version::compareTo); - var featureData = FeatureData.createFromSpecifications(specs); - var historicalFeatures = featureData.getHistoricalFeatures(); - var allHistoricalFeatures = historicalFeatures.lastEntry() == null ? Set.of() : historicalFeatures.lastEntry().getValue(); - - var errorMessage = hasHistoricalFeaturesInformation - ? "Check the feature has been added to the correct FeatureSpecification in the relevant module or, if this is a " - + "legacy feature used only in tests, to a test-only FeatureSpecification" - : "This test is running on the legacy test framework; historical features from production code will not be available." - + " You need to port the test to the new test plugins in order to use historical features from production code." - + " If this is a legacy feature used only in tests, you can add it to a test-only FeatureSpecification"; - this.historicalFeaturesPredicate = minNodeVersion.>map(v -> featureId -> { - assert allHistoricalFeatures.contains(featureId) : Strings.format("Unknown historical feature %s: %s", featureId, errorMessage); - return hasHistoricalFeature(historicalFeatures, v, featureId); - }).orElse(featureId -> { - // We can safely assume that new non-semantic versions (serverless) support all historical features - assert allHistoricalFeatures.contains(featureId) : Strings.format("Unknown historical feature %s: %s", featureId, errorMessage); - return true; - }); - this.clusterStateFeatures = clusterStateFeatures; - } - - private static boolean hasHistoricalFeature(NavigableMap> historicalFeatures, Version version, String featureId) { - var features = historicalFeatures.floorEntry(version); - return features != null && features.getValue().contains(featureId); - } - - boolean clusterHasFeature(String featureId) { - if (clusterStateFeatures.contains(featureId)) { - return true; - } - return historicalFeaturesPredicate.test(featureId); - } + TestFeatureService ALL_FEATURES = ignored -> true; } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/DisruptableMockTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/DisruptableMockTransport.java index eb85323caf5a1..889b7cdab4629 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/DisruptableMockTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/DisruptableMockTransport.java @@ -261,11 +261,6 @@ public String getProfileName() { return "default"; } - @Override - public String getChannelType() { - return "disruptable-mock-transport-channel"; - } - @Override public void sendResponse(final TransportResponse response) { execute(new RebootSensitiveRunnable() { diff --git a/test/framework/src/main/java/org/elasticsearch/transport/TestTransportChannel.java b/test/framework/src/main/java/org/elasticsearch/transport/TestTransportChannel.java index 3ac03e563c3ac..5dc74a1726f7c 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/TestTransportChannel.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/TestTransportChannel.java @@ -32,9 +32,4 @@ public void sendResponse(TransportResponse response) { public void sendResponse(Exception exception) { listener.onFailure(exception); } - - @Override - public String getChannelType() { - return "test"; - } } diff --git a/test/framework/src/test/java/org/elasticsearch/common/util/concurrent/DeterministicTaskQueueTests.java b/test/framework/src/test/java/org/elasticsearch/common/util/concurrent/DeterministicTaskQueueTests.java index 2f920f3e58fa1..f4677dc603e64 100644 --- a/test/framework/src/test/java/org/elasticsearch/common/util/concurrent/DeterministicTaskQueueTests.java +++ b/test/framework/src/test/java/org/elasticsearch/common/util/concurrent/DeterministicTaskQueueTests.java @@ -254,7 +254,7 @@ public void testRunTasksUpToTimeInOrder() { IntStream.range(0, randomIntBetween(0, 10)) .forEach( i -> taskQueue.scheduleAt( - randomLongBetween(cutoffTimeInMillis + 1, 2 * cutoffTimeInMillis), + randomLongBetween(cutoffTimeInMillis + 1, 2 * cutoffTimeInMillis + 1), () -> seenNumbers.add(i + nRunnableTasks + nDeferredTasksUpToCutoff) ) ); diff --git a/test/framework/src/test/java/org/elasticsearch/test/LambdaMatchersTests.java b/test/framework/src/test/java/org/elasticsearch/test/LambdaMatchersTests.java index 5259a6bd6bbd1..ec9f6c507a972 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/LambdaMatchersTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/LambdaMatchersTests.java @@ -13,9 +13,11 @@ import java.util.List; +import static org.elasticsearch.test.LambdaMatchers.falseWith; import static org.elasticsearch.test.LambdaMatchers.transformedArrayItemsMatch; import static org.elasticsearch.test.LambdaMatchers.transformedItemsMatch; import static org.elasticsearch.test.LambdaMatchers.transformedMatch; +import static org.elasticsearch.test.LambdaMatchers.trueWith; import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.arrayContainingInAnyOrder; import static org.hamcrest.Matchers.containsInAnyOrder; @@ -97,6 +99,21 @@ public void testArrayTransformDescription() { ); } + public void testPredicateMatcher() { + assertThat(t -> true, trueWith(new Object())); + assertThat(t -> true, trueWith(null)); + assertThat(t -> false, falseWith(new Object())); + assertThat(t -> false, falseWith(null)); + + assertMismatch(t -> false, trueWith("obj"), equalTo("predicate with argument \"obj\" evaluated to ")); + assertMismatch(t -> true, falseWith("obj"), equalTo("predicate with argument \"obj\" evaluated to ")); + } + + public void testPredicateMatcherDescription() { + assertDescribeTo(trueWith("obj"), equalTo("predicate evaluates to with argument \"obj\"")); + assertDescribeTo(falseWith("obj"), equalTo("predicate evaluates to with argument \"obj\"")); + } + static void assertMismatch(T v, Matcher matcher, Matcher mismatchDescriptionMatcher) { assertThat(v, not(matcher)); StringDescription description = new StringDescription(); diff --git a/test/framework/src/test/java/org/elasticsearch/test/hamcrest/OptionalMatchersTests.java b/test/framework/src/test/java/org/elasticsearch/test/hamcrest/OptionalMatchersTests.java new file mode 100644 index 0000000000000..0318410bb269f --- /dev/null +++ b/test/framework/src/test/java/org/elasticsearch/test/hamcrest/OptionalMatchersTests.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.test.hamcrest; + +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.StringDescription; + +import java.util.Optional; + +import static org.elasticsearch.test.hamcrest.OptionalMatchers.isEmpty; +import static org.elasticsearch.test.hamcrest.OptionalMatchers.isPresent; +import static org.elasticsearch.test.hamcrest.OptionalMatchers.isPresentWith; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; + +public class OptionalMatchersTests extends ESTestCase { + + public void testEmptyMatcher() { + assertThat(Optional.empty(), isEmpty()); + assertThat(Optional.of(""), not(isEmpty())); + + StringDescription desc = new StringDescription(); + isEmpty().describeMismatch(Optional.of(""), desc); + assertThat(desc.toString(), equalTo("a non-empty optional \"\"")); + } + + public void testIsPresentMatcher() { + assertThat(Optional.of(""), isPresent()); + assertThat(Optional.empty(), not(isPresent())); + + StringDescription desc = new StringDescription(); + isPresent().describeMismatch(Optional.empty(), desc); + assertThat(desc.toString(), equalTo("an empty optional")); + } + + public void testIsPresentWithMatcher() { + assertThat(Optional.of(""), isPresentWith("")); + assertThat(Optional.of("foo"), not(isPresentWith(""))); + assertThat(Optional.empty(), not(isPresentWith(""))); + + StringDescription desc = new StringDescription(); + isPresentWith("foo").describeMismatch(Optional.empty(), desc); + assertThat(desc.toString(), equalTo("an empty optional")); + + desc = new StringDescription(); + isPresentWith("foo").describeMismatch(Optional.of(""), desc); + assertThat(desc.toString(), equalTo("an optional was \"\"")); + } +} diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlDocsTestClient.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlDocsTestClient.java index c95fc5c131df0..38d090e455ebe 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlDocsTestClient.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlDocsTestClient.java @@ -10,7 +10,6 @@ import org.apache.http.HttpEntity; import org.apache.http.HttpHost; -import org.elasticsearch.Version; import org.elasticsearch.client.NodeSelector; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -27,7 +26,6 @@ import java.util.Map; import java.util.Objects; import java.util.function.BiPredicate; -import java.util.function.Predicate; /** * Used to execute REST requests according to the docs snippets that need to be tests. Wraps a @@ -40,12 +38,9 @@ public ClientYamlDocsTestClient( final ClientYamlSuiteRestSpec restSpec, final RestClient restClient, final List hosts, - final Version esVersion, - final Predicate clusterFeaturesPredicate, - final String os, final CheckedSupplier clientBuilderWithSniffedNodes ) { - super(restSpec, restClient, hosts, esVersion, clusterFeaturesPredicate, os, clientBuilderWithSniffedNodes); + super(restSpec, restClient, hosts, clientBuilderWithSniffedNodes); } @Override diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java index d30f65718943e..c57a9f3107393 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java @@ -20,7 +20,6 @@ import org.apache.http.util.EntityUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; import org.elasticsearch.client.NodeSelector; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; @@ -47,7 +46,6 @@ import java.util.Map.Entry; import java.util.Set; import java.util.function.BiPredicate; -import java.util.function.Predicate; import java.util.stream.Collectors; import static com.carrotsearch.randomizedtesting.RandomizedTest.frequently; @@ -64,44 +62,20 @@ public class ClientYamlTestClient implements Closeable { private final ClientYamlSuiteRestSpec restSpec; private final Map restClients = new HashMap<>(); - private final Version esVersion; - private final String os; private final CheckedSupplier clientBuilderWithSniffedNodes; - private final Predicate clusterFeaturesPredicate; ClientYamlTestClient( final ClientYamlSuiteRestSpec restSpec, final RestClient restClient, final List hosts, - final Version esVersion, - final Predicate clusterFeaturesPredicate, - final String os, final CheckedSupplier clientBuilderWithSniffedNodes ) { - this.clusterFeaturesPredicate = clusterFeaturesPredicate; assert hosts.size() > 0; this.restSpec = restSpec; this.restClients.put(NodeSelector.ANY, restClient); - this.esVersion = esVersion; - this.os = os; this.clientBuilderWithSniffedNodes = clientBuilderWithSniffedNodes; } - /** - * @return the version of the oldest node in the cluster - */ - public Version getEsVersion() { - return esVersion; - } - - public boolean clusterHasFeature(String featureId) { - return clusterFeaturesPredicate.test(featureId); - } - - public String getOs() { - return os; - } - /** * Calls an api with the provided parameters and body */ diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java index a584280119ef3..10bf2fb4b0a9f 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java @@ -15,10 +15,10 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; import org.elasticsearch.client.NodeSelector; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.test.rest.Stash; +import org.elasticsearch.test.rest.TestFeatureService; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestApi; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; @@ -30,6 +30,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.function.BiPredicate; /** @@ -50,26 +51,48 @@ public class ClientYamlTestExecutionContext { private ClientYamlTestResponse response; + private final Set nodesVersions; + + private final Set osSet; + private final TestFeatureService testFeatureService; + private final boolean randomizeContentType; private final BiPredicate pathPredicate; public ClientYamlTestExecutionContext( ClientYamlTestCandidate clientYamlTestCandidate, ClientYamlTestClient clientYamlTestClient, - boolean randomizeContentType + boolean randomizeContentType, + final Set nodesVersions, + final TestFeatureService testFeatureService, + final Set osSet ) { - this(clientYamlTestCandidate, clientYamlTestClient, randomizeContentType, (ignoreApi, ignorePath) -> true); + this( + clientYamlTestCandidate, + clientYamlTestClient, + randomizeContentType, + nodesVersions, + testFeatureService, + osSet, + (ignoreApi, ignorePath) -> true + ); } public ClientYamlTestExecutionContext( ClientYamlTestCandidate clientYamlTestCandidate, ClientYamlTestClient clientYamlTestClient, boolean randomizeContentType, + final Set nodesVersions, + final TestFeatureService testFeatureService, + final Set osSet, BiPredicate pathPredicate ) { this.clientYamlTestClient = clientYamlTestClient; this.clientYamlTestCandidate = clientYamlTestCandidate; this.randomizeContentType = randomizeContentType; + this.nodesVersions = nodesVersions; + this.testFeatureService = testFeatureService; + this.osSet = osSet; this.pathPredicate = pathPredicate; } @@ -224,14 +247,14 @@ public Stash stash() { } /** - * @return the version of the oldest node in the cluster + * @return the distinct node versions running in the cluster */ - public Version esVersion() { - return clientYamlTestClient.getEsVersion(); + public Set nodesVersions() { + return nodesVersions; } public String os() { - return clientYamlTestClient.getOs(); + return osSet.iterator().next(); } public ClientYamlTestCandidate getClientYamlTestCandidate() { @@ -239,6 +262,6 @@ public ClientYamlTestCandidate getClientYamlTestCandidate() { } public boolean clusterHasFeature(String featureId) { - return clientYamlTestClient.clusterHasFeature(featureId); + return testFeatureService.clusterHasFeature(featureId); } } diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java index 2e1631cc8c337..049102f87a544 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java @@ -15,7 +15,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.tests.util.TimeUnits; -import org.elasticsearch.Version; import org.elasticsearch.client.Node; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; @@ -29,9 +28,9 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.IOUtils; -import org.elasticsearch.core.Tuple; import org.elasticsearch.test.ClasspathUtils; import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.test.rest.TestFeatureService; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestApi; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestSpec; import org.elasticsearch.test.rest.yaml.section.ClientYamlTestSection; @@ -61,7 +60,7 @@ import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; -import java.util.function.Predicate; +import java.util.stream.Collectors; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; @@ -139,21 +138,38 @@ public void initAndResetContext() throws Exception { validateSpec(restSpec); restSpecification = restSpec; final List hosts = getClusterHosts(); - Tuple versionVersionTuple = readVersionsFromCatNodes(adminClient()); - final Version esVersion = versionVersionTuple.v1(); - final Version masterVersion = versionVersionTuple.v2(); + final Set nodesVersions = getCachedNodesVersions(); final String os = readOsFromNodesInfo(adminClient()); - logger.info( - "initializing client, minimum es version [{}], master version, [{}], hosts {}, os [{}]", - esVersion, - masterVersion, - hosts, - os + logger.info("initializing client, node versions [{}], hosts {}, os [{}]", nodesVersions, hosts, os); + + var semanticNodeVersions = nodesVersions.stream() + .map(ESRestTestCase::parseLegacyVersion) + .flatMap(Optional::stream) + .collect(Collectors.toSet()); + final TestFeatureService testFeatureService = createTestFeatureService( + getClusterStateFeatures(adminClient()), + semanticNodeVersions + ); + + logger.info("initializing client, node versions [{}], hosts {}, os [{}]", nodesVersions, hosts, os); + + clientYamlTestClient = initClientYamlTestClient(restSpec, client(), hosts); + restTestExecutionContext = createRestTestExecutionContext( + testCandidate, + clientYamlTestClient, + nodesVersions, + testFeatureService, + Set.of(os) + ); + adminExecutionContext = new ClientYamlTestExecutionContext( + testCandidate, + clientYamlTestClient, + false, + nodesVersions, + testFeatureService, + Set.of(os) ); - clientYamlTestClient = initClientYamlTestClient(restSpec, client(), hosts, esVersion, ESRestTestCase::clusterHasFeature, os); - restTestExecutionContext = createRestTestExecutionContext(testCandidate, clientYamlTestClient); - adminExecutionContext = new ClientYamlTestExecutionContext(testCandidate, clientYamlTestClient, false); final String[] blacklist = resolvePathsProperty(REST_TESTS_BLACKLIST, null); blacklistPathMatchers = new ArrayList<>(); for (final String entry : blacklist) { @@ -179,28 +195,27 @@ public void initAndResetContext() throws Exception { */ protected ClientYamlTestExecutionContext createRestTestExecutionContext( ClientYamlTestCandidate clientYamlTestCandidate, - ClientYamlTestClient clientYamlTestClient + ClientYamlTestClient clientYamlTestClient, + final Set nodesVersions, + final TestFeatureService testFeatureService, + final Set osSet ) { - return new ClientYamlTestExecutionContext(clientYamlTestCandidate, clientYamlTestClient, randomizeContentType()); + return new ClientYamlTestExecutionContext( + clientYamlTestCandidate, + clientYamlTestClient, + randomizeContentType(), + nodesVersions, + testFeatureService, + osSet + ); } protected ClientYamlTestClient initClientYamlTestClient( final ClientYamlSuiteRestSpec restSpec, final RestClient restClient, - final List hosts, - final Version esVersion, - final Predicate clusterFeaturesPredicate, - final String os + final List hosts ) { - return new ClientYamlTestClient( - restSpec, - restClient, - hosts, - esVersion, - clusterFeaturesPredicate, - os, - this::getClientBuilderWithSniffedHosts - ); + return new ClientYamlTestClient(restSpec, restClient, hosts, this::getClientBuilderWithSniffedHosts); } @AfterClass @@ -307,13 +322,15 @@ static Map> loadSuites(String... paths) throws Exception { for (String strPath : paths) { Path path = root.resolve(strPath); if (Files.isDirectory(path)) { - Files.walk(path).forEach(file -> { - if (file.toString().endsWith(".yml")) { - addSuite(root, file, files); - } else if (file.toString().endsWith(".yaml")) { - throw new IllegalArgumentException("yaml files are no longer supported: " + file); - } - }); + try (var filesStream = Files.walk(path)) { + filesStream.forEach(file -> { + if (file.toString().endsWith(".yml")) { + addSuite(root, file, files); + } else if (file.toString().endsWith(".yaml")) { + throw new IllegalArgumentException("yaml files are no longer supported: " + file); + } + }); + } } else { path = root.resolve(strPath + ".yml"); assert Files.exists(path) : "Path " + path + " does not exist in YAML test root"; @@ -390,36 +407,7 @@ private static void validateSpec(ClientYamlSuiteRestSpec restSpec) { } } - Tuple readVersionsFromCatNodes(RestClient restClient) throws IOException { - // we simply go to the _cat/nodes API and parse all versions in the cluster - final Request request = new Request("GET", "/_cat/nodes"); - request.addParameter("h", "version,master"); - request.setOptions(getCatNodesVersionMasterRequestOptions()); - Response response = restClient.performRequest(request); - ClientYamlTestResponse restTestResponse = new ClientYamlTestResponse(response); - String nodesCatResponse = restTestResponse.getBodyAsString(); - String[] split = nodesCatResponse.split("\n"); - Version version = null; - Version masterVersion = null; - for (String perNode : split) { - final String[] versionAndMaster = perNode.split("\\s+"); - assert versionAndMaster.length == 2 : "invalid line: " + perNode + " length: " + versionAndMaster.length; - final Version currentVersion = Version.fromString(versionAndMaster[0]); - final boolean master = versionAndMaster[1].trim().equals("*"); - if (master) { - assert masterVersion == null; - masterVersion = currentVersion; - } - if (version == null) { - version = currentVersion; - } else if (version.onOrAfter(currentVersion)) { - version = currentVersion; - } - } - return new Tuple<>(version, masterVersion); - } - - String readOsFromNodesInfo(RestClient restClient) throws IOException { + static String readOsFromNodesInfo(RestClient restClient) throws IOException { final Request request = new Request("GET", "/_nodes/os"); Response response = restClient.performRequest(request); ClientYamlTestResponse restTestResponse = new ClientYamlTestResponse(response); @@ -447,10 +435,6 @@ String readOsFromNodesInfo(RestClient restClient) throws IOException { return osPrettyNames.last(); } - protected RequestOptions getCatNodesVersionMasterRequestOptions() { - return RequestOptions.DEFAULT; - } - public void test() throws IOException { // skip test if it matches one of the blacklist globs for (BlacklistedPathPatternMatcher blacklistedPathMatcher : blacklistPathMatchers) { @@ -464,35 +448,32 @@ public void test() throws IOException { // skip test if the whole suite (yaml file) is disabled assumeFalse( testCandidate.getSetupSection().getSkipSection().getSkipMessage(testCandidate.getSuitePath()), - testCandidate.getSetupSection().getSkipSection().skip(restTestExecutionContext.esVersion()) + testCandidate.getSetupSection().getSkipSection().skip(restTestExecutionContext) ); // skip test if the whole suite (yaml file) is disabled assumeFalse( testCandidate.getTeardownSection().getSkipSection().getSkipMessage(testCandidate.getSuitePath()), - testCandidate.getTeardownSection().getSkipSection().skip(restTestExecutionContext.esVersion()) + testCandidate.getTeardownSection().getSkipSection().skip(restTestExecutionContext) ); // skip test if test section is disabled assumeFalse( testCandidate.getTestSection().getSkipSection().getSkipMessage(testCandidate.getTestPath()), - testCandidate.getTestSection().getSkipSection().skip(restTestExecutionContext.esVersion()) - ); - // skip test if os is excluded - assumeFalse( - testCandidate.getTestSection().getSkipSection().getSkipMessage(testCandidate.getTestPath()), - testCandidate.getTestSection().getSkipSection().skip(restTestExecutionContext.os()) + testCandidate.getTestSection().getSkipSection().skip(restTestExecutionContext) ); // let's check that there is something to run, otherwise there might be a problem with the test section - if (testCandidate.getTestSection().getExecutableSections().size() == 0) { + if (testCandidate.getTestSection().getExecutableSections().isEmpty()) { throw new IllegalArgumentException("No executable sections loaded for [" + testCandidate.getTestPath() + "]"); } assumeFalse( "[" + testCandidate.getTestPath() + "] skipped, reason: in fips 140 mode", - inFipsJvm() && testCandidate.getTestSection().getSkipSection().getFeatures().contains("fips_140") + inFipsJvm() && testCandidate.getTestSection().getSkipSection().yamlRunnerHasFeature("fips_140") ); - final Settings globalTemplateSettings = getGlobalTemplateSettings(testCandidate.getTestSection().getSkipSection().getFeatures()); + final Settings globalTemplateSettings = getGlobalTemplateSettings( + testCandidate.getTestSection().getSkipSection().yamlRunnerHasFeature("default_shards") + ); if (globalTemplateSettings.isEmpty() == false && ESRestTestCase.has(ProductFeature.LEGACY_TEMPLATES)) { final XContentBuilder template = jsonBuilder(); @@ -541,6 +522,7 @@ public void test() throws IOException { } } + @Deprecated protected Settings getGlobalTemplateSettings(List features) { if (features.contains("default_shards")) { return Settings.EMPTY; @@ -549,6 +531,14 @@ protected Settings getGlobalTemplateSettings(List features) { } } + protected Settings getGlobalTemplateSettings(boolean defaultShardsFeature) { + if (defaultShardsFeature) { + return Settings.EMPTY; + } else { + return globalTemplateIndexSettings; + } + } + protected boolean skipSetupSections() { return false; } diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/Features.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/Features.java index 8b9aafff5bded..d32b5684e19a9 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/Features.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/Features.java @@ -8,8 +8,6 @@ package org.elasticsearch.test.rest.yaml; -import org.elasticsearch.test.rest.ESRestTestCase; - import java.util.List; /** @@ -52,15 +50,7 @@ private Features() { */ public static boolean areAllSupported(List features) { for (String feature : features) { - if (feature.equals("xpack")) { - if (false == ESRestTestCase.hasXPack()) { - return false; - } - } else if (feature.equals("no_xpack")) { - if (ESRestTestCase.hasXPack()) { - return false; - } - } else if (false == SUPPORTED.contains(feature)) { + if (false == SUPPORTED.contains(feature)) { return false; } } diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuite.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuite.java index 4fee01e71d881..48f24d3a935af 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuite.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuite.java @@ -298,7 +298,7 @@ private static boolean hasSkipFeature( } private static boolean hasSkipFeature(String feature, SkipSection skipSection) { - return skipSection != null && skipSection.getFeatures().contains(feature); + return skipSection != null && skipSection.yamlRunnerHasFeature(feature); } public List getTestSections() { diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java index bd038cc4dcd58..26158451755fd 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java @@ -17,10 +17,12 @@ import org.elasticsearch.client.NodeSelector; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.VersionId; import org.elasticsearch.common.logging.HeaderWarning; import org.elasticsearch.core.Tuple; import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.rest.action.admin.indices.RestPutIndexTemplateAction; +import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.RestTestLegacyFeatures; import org.elasticsearch.test.rest.yaml.ClientYamlTestExecutionContext; import org.elasticsearch.test.rest.yaml.ClientYamlTestResponse; @@ -39,6 +41,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.Set; import java.util.TreeMap; import java.util.function.Predicate; @@ -188,10 +191,16 @@ public static DoSection parse(XContentParser parser) throws IOException { } else if (token.isValue()) { if ("body".equals(paramName)) { String body = parser.text(); - XContentParser bodyParser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, body); - // multiple bodies are supported e.g. in case of bulk provided as a whole string - while (bodyParser.nextToken() != null) { - apiCallSection.addBody(bodyParser.mapOrdered()); + try ( + XContentParser bodyParser = JsonXContent.jsonXContent.createParser( + XContentParserConfiguration.EMPTY, + body + ) + ) { + // multiple bodies are supported e.g. in case of bulk provided as a whole string + while (bodyParser.nextToken() != null) { + apiCallSection.addBody(bodyParser.mapOrdered()); + } } } else { apiCallSection.addParam(paramName, parser.text()); @@ -370,8 +379,14 @@ public void execute(ClientYamlTestExecutionContext executionContext) throws IOEx // This is really difficult to express just with features, so I will break it down into 2 parts: version check for v7, // and feature check for v8. This way the version check can be removed once we move to v9 @UpdateForV9 - var fixedInV7 = executionContext.esVersion().major == Version.V_7_17_0.major - && executionContext.esVersion().onOrAfter(Version.V_7_17_2); + var fixedInV7 = executionContext.nodesVersions() + .stream() + .map(ESRestTestCase::parseLegacyVersion) + .flatMap(Optional::stream) + .min(VersionId::compareTo) + .map(v -> v.major == Version.V_7_17_0.major && v.onOrAfter(Version.V_7_17_2)) + .orElse(false); + var fixedProductionHeader = fixedInV7 || executionContext.clusterHasFeature(RestTestLegacyFeatures.REST_ELASTIC_PRODUCT_HEADER_PRESENT.id()); if (fixedProductionHeader) { @@ -662,7 +677,7 @@ private static NodeSelector parseVersionSelector(XContentParser parser) throws I nodeMatcher = nodeVersion -> Build.current().version().equals(nodeVersion); versionSelectorString = "version is " + Build.current().version() + " (current)"; } else { - var acceptedVersionRange = SkipSection.parseVersionRanges(parser.text()); + var acceptedVersionRange = VersionRange.parseVersionRanges(parser.text()); nodeMatcher = nodeVersion -> matchWithRange(nodeVersion, acceptedVersionRange, parser.getTokenLocation()); versionSelectorString = "version ranges " + acceptedVersionRange; } diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/SkipCriteria.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/SkipCriteria.java new file mode 100644 index 0000000000000..398b402748506 --- /dev/null +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/SkipCriteria.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.test.rest.yaml.section; + +import org.elasticsearch.Version; +import org.elasticsearch.common.VersionId; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.test.rest.yaml.ClientYamlTestExecutionContext; + +import java.util.List; +import java.util.Optional; +import java.util.function.Predicate; + +public class SkipCriteria { + + public static final Predicate SKIP_ALWAYS = context -> true; + + private SkipCriteria() {} + + static Predicate fromVersionRange(String versionRange) { + final List versionRanges = VersionRange.parseVersionRanges(versionRange); + assert versionRanges.isEmpty() == false; + return context -> { + // Try to extract the minimum node version. Assume CURRENT if nodes have non-semantic versions + // TODO: push this logic down to VersionRange. + // This way will have version parsing only when we actually have to skip on a version, we can remove the default and throw + // IllegalArgumentException instead (attempting to skip on version where version is not semantic) + var oldestNodeVersion = context.nodesVersions() + .stream() + .map(ESRestTestCase::parseLegacyVersion) + .flatMap(Optional::stream) + .min(VersionId::compareTo) + .orElse(Version.CURRENT); + + return versionRanges.stream().anyMatch(range -> range.contains(oldestNodeVersion)); + }; + } + + static Predicate fromOsList(List operatingSystems) { + return context -> operatingSystems.stream().anyMatch(osName -> osName.equals(context.os())); + } + + static Predicate fromClusterModules(boolean xpackRequired) { + // TODO: change ESRestTestCase.hasXPack() to be context-specific + return context -> { + if (xpackRequired) { + return ESRestTestCase.hasXPack() == false; + } + return ESRestTestCase.hasXPack(); + }; + } +} diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/SkipSection.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/SkipSection.java index 74d6e8284b438..4bd80fa4d9f13 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/SkipSection.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/SkipSection.java @@ -7,17 +7,17 @@ */ package org.elasticsearch.test.rest.yaml.section; -import org.elasticsearch.Version; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; -import org.elasticsearch.test.VersionUtils; +import org.elasticsearch.test.rest.yaml.ClientYamlTestExecutionContext; import org.elasticsearch.test.rest.yaml.Features; +import org.elasticsearch.xcontent.XContentLocation; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.List; +import java.util.function.Predicate; /** * Represents a skip section that tells whether a specific test section or suite needs to be skipped @@ -27,6 +27,98 @@ * - an operating system (full name, including specific Linux distributions) that might show a certain behavior */ public class SkipSection { + + static class SkipSectionBuilder { + String version = null; + String reason = null; + List testFeatures = new ArrayList<>(); + List operatingSystems = new ArrayList<>(); + + enum XPackRequested { + NOT_SPECIFIED, + YES, + NO, + MISMATCHED + } + + XPackRequested xpackRequested = XPackRequested.NOT_SPECIFIED; + + public SkipSectionBuilder withVersion(String version) { + this.version = version; + return this; + } + + public SkipSectionBuilder withReason(String reason) { + this.reason = reason; + return this; + } + + public SkipSectionBuilder withTestFeature(String featureName) { + this.testFeatures.add(featureName); + return this; + } + + public void withXPack(boolean xpackRequired) { + if (xpackRequired && xpackRequested == XPackRequested.NO || xpackRequired == false && xpackRequested == XPackRequested.YES) { + xpackRequested = XPackRequested.MISMATCHED; + } else { + xpackRequested = xpackRequired ? XPackRequested.YES : XPackRequested.NO; + } + } + + public SkipSectionBuilder withOs(String osName) { + this.operatingSystems.add(osName); + return this; + } + + void validate(XContentLocation contentLocation) { + if ((Strings.hasLength(version) == false) + && testFeatures.isEmpty() + && operatingSystems.isEmpty() + && xpackRequested == XPackRequested.NOT_SPECIFIED) { + throw new ParsingException( + contentLocation, + "at least one criteria (version, test features, os) is mandatory within a skip section" + ); + } + if (Strings.hasLength(version) && Strings.hasLength(reason) == false) { + throw new ParsingException(contentLocation, "reason is mandatory within skip version section"); + } + if (operatingSystems.isEmpty() == false && Strings.hasLength(reason) == false) { + throw new ParsingException(contentLocation, "reason is mandatory within skip version section"); + } + // make feature "skip_os" mandatory if os is given, this is a temporary solution until language client tests know about os + if (operatingSystems.isEmpty() == false && testFeatures.contains("skip_os") == false) { + throw new ParsingException(contentLocation, "if os is specified, feature skip_os must be set"); + } + if (xpackRequested == XPackRequested.MISMATCHED) { + throw new ParsingException(contentLocation, "either `xpack` or `no_xpack` can be present, not both"); + } + } + + public SkipSection build() { + final List> skipCriteriaList; + + // Check if the test runner supports all YAML framework features (see {@link Features}). If not, default to always skip this + // section. + if (Features.areAllSupported(testFeatures) == false) { + skipCriteriaList = List.of(SkipCriteria.SKIP_ALWAYS); + } else { + skipCriteriaList = new ArrayList<>(); + if (xpackRequested == XPackRequested.YES || xpackRequested == XPackRequested.NO) { + skipCriteriaList.add(SkipCriteria.fromClusterModules(xpackRequested == XPackRequested.YES)); + } + if (Strings.hasLength(version)) { + skipCriteriaList.add(SkipCriteria.fromVersionRange(version)); + } + if (operatingSystems.isEmpty() == false) { + skipCriteriaList.add(SkipCriteria.fromOsList(operatingSystems)); + } + } + return new SkipSection(skipCriteriaList, testFeatures, reason); + } + } + /** * Parse a {@link SkipSection} if the next field is {@code skip}, otherwise returns {@link SkipSection#EMPTY}. */ @@ -43,6 +135,24 @@ public static SkipSection parseIfNext(XContentParser parser) throws IOException } public static SkipSection parse(XContentParser parser) throws IOException { + return parseInternal(parser).build(); + } + + private static void parseFeature(String feature, SkipSectionBuilder builder) { + // #31403 introduced YAML test "features" to indicate if the cluster being tested has xpack installed (`xpack`) + // or if it does *not* have xpack installed (`no_xpack`). These are not test runner features, so now that we have + // "modular" skip criteria let's separate them. Eventually, these should move to their own skip section. + if (feature.equals("xpack")) { + builder.withXPack(true); + } else if (feature.equals("no_xpack")) { + builder.withXPack(false); + } else { + builder.withTestFeature(feature); + } + } + + // package private for tests + static SkipSectionBuilder parseInternal(XContentParser parser) throws IOException { if (parser.nextToken() != XContentParser.Token.START_OBJECT) { throw new IllegalArgumentException( "Expected [" @@ -54,22 +164,21 @@ public static SkipSection parse(XContentParser parser) throws IOException { } String currentFieldName = null; XContentParser.Token token; - String version = null; - String reason = null; - List features = new ArrayList<>(); - List operatingSystems = new ArrayList<>(); + + var builder = new SkipSectionBuilder(); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token.isValue()) { if ("version".equals(currentFieldName)) { - version = parser.text(); + builder.withVersion(parser.text()); } else if ("reason".equals(currentFieldName)) { - reason = parser.text(); + builder.withReason(parser.text()); } else if ("features".equals(currentFieldName)) { - features.add(parser.text()); + parseFeature(parser.text(), builder); } else if ("os".equals(currentFieldName)) { - operatingSystems.add(parser.text()); + builder.withOs(parser.text()); } else { throw new ParsingException( parser.getTokenLocation(), @@ -79,131 +188,67 @@ public static SkipSection parse(XContentParser parser) throws IOException { } else if (token == XContentParser.Token.START_ARRAY) { if ("features".equals(currentFieldName)) { while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - features.add(parser.text()); + parseFeature(parser.text(), builder); } } else if ("os".equals(currentFieldName)) { while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - operatingSystems.add(parser.text()); + builder.withOs(parser.text()); } } } } parser.nextToken(); - - if ((Strings.hasLength(version) == false) && features.isEmpty() && operatingSystems.isEmpty()) { - throw new ParsingException(parser.getTokenLocation(), "version, features or os is mandatory within skip section"); - } - if (Strings.hasLength(version) && Strings.hasLength(reason) == false) { - throw new ParsingException(parser.getTokenLocation(), "reason is mandatory within skip version section"); - } - if (operatingSystems.isEmpty() == false && Strings.hasLength(reason) == false) { - throw new ParsingException(parser.getTokenLocation(), "reason is mandatory within skip version section"); - } - // make feature "skip_os" mandatory if os is given, this is a temporary solution until language client tests know about os - if (operatingSystems.isEmpty() == false && features.contains("skip_os") == false) { - throw new ParsingException(parser.getTokenLocation(), "if os is specified, feature skip_os must be set"); - } - return new SkipSection(version, features, operatingSystems, reason); + builder.validate(parser.getTokenLocation()); + return builder; } public static final SkipSection EMPTY = new SkipSection(); - private final List versionRanges; - private final List features; - private final List operatingSystems; + private final List> skipCriteriaList; + private final List yamlRunnerFeatures; private final String reason; private SkipSection() { - this.versionRanges = new ArrayList<>(); - this.features = new ArrayList<>(); - this.operatingSystems = new ArrayList<>(); + this.skipCriteriaList = new ArrayList<>(); + this.yamlRunnerFeatures = new ArrayList<>(); this.reason = null; } - public SkipSection(String versionRange, List features, List operatingSystems, String reason) { - assert features != null; - this.versionRanges = parseVersionRanges(versionRange); - assert versionRanges.isEmpty() == false; - this.features = features; - this.operatingSystems = operatingSystems; + SkipSection(List> skipCriteriaList, List yamlRunnerFeatures, String reason) { + this.skipCriteriaList = skipCriteriaList; + this.yamlRunnerFeatures = yamlRunnerFeatures; this.reason = reason; } - public Version getLowerVersion() { - return versionRanges.get(0).lower(); - } - - public Version getUpperVersion() { - return versionRanges.get(versionRanges.size() - 1).upper(); - } - - public List getFeatures() { - return features; - } - - public List getOperatingSystems() { - return operatingSystems; + public boolean yamlRunnerHasFeature(String feature) { + return yamlRunnerFeatures.contains(feature); } public String getReason() { return reason; } - public boolean skip(Version currentVersion) { + public boolean skip(ClientYamlTestExecutionContext context) { if (isEmpty()) { return false; } - boolean skip = versionRanges.stream().anyMatch(range -> range.contains(currentVersion)); - return skip || Features.areAllSupported(features) == false; - } - - public boolean skip(String os) { - return this.operatingSystems.contains(os); - } - public boolean isVersionCheck() { - return features.isEmpty() && operatingSystems.isEmpty(); + return skipCriteriaList.stream().anyMatch(c -> c.test(context)); } public boolean isEmpty() { return EMPTY.equals(this); } - static List parseVersionRanges(String rawRanges) { - if (rawRanges == null) { - return Collections.singletonList(new VersionRange(null, null)); - } - String[] ranges = rawRanges.split(","); - List versionRanges = new ArrayList<>(); - for (String rawRange : ranges) { - if (rawRange.trim().equals("all")) { - return Collections.singletonList(new VersionRange(VersionUtils.getFirstVersion(), Version.CURRENT)); - } - String[] skipVersions = rawRange.split("-", -1); - if (skipVersions.length > 2) { - throw new IllegalArgumentException("version range malformed: " + rawRanges); - } - - String lower = skipVersions[0].trim(); - String upper = skipVersions[1].trim(); - VersionRange versionRange = new VersionRange( - lower.isEmpty() ? VersionUtils.getFirstVersion() : Version.fromString(lower), - upper.isEmpty() ? Version.CURRENT : Version.fromString(upper) - ); - versionRanges.add(versionRange); - } - return versionRanges; - } - public String getSkipMessage(String description) { StringBuilder messageBuilder = new StringBuilder(); messageBuilder.append("[").append(description).append("] skipped,"); if (reason != null) { messageBuilder.append(" reason: [").append(getReason()).append("]"); } - if (features.isEmpty() == false) { - messageBuilder.append(" unsupported features ").append(getFeatures()); + if (yamlRunnerFeatures.isEmpty() == false) { + messageBuilder.append(" unsupported features ").append(yamlRunnerFeatures); } return messageBuilder.toString(); } diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/VersionRange.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/VersionRange.java index f4b9a3d4aef1a..4feb1ed609fae 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/VersionRange.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/VersionRange.java @@ -8,6 +8,11 @@ package org.elasticsearch.test.rest.yaml.section; import org.elasticsearch.Version; +import org.elasticsearch.test.VersionUtils; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; public record VersionRange(Version lower, Version upper) { @@ -19,4 +24,30 @@ public boolean contains(Version currentVersion) { public String toString() { return "[" + lower + " - " + upper + "]"; } + + static List parseVersionRanges(String rawRanges) { + if (rawRanges == null) { + return Collections.singletonList(new VersionRange(null, null)); + } + String[] ranges = rawRanges.split(","); + List versionRanges = new ArrayList<>(); + for (String rawRange : ranges) { + if (rawRange.trim().equals("all")) { + return Collections.singletonList(new VersionRange(VersionUtils.getFirstVersion(), Version.CURRENT)); + } + String[] skipVersions = rawRange.split("-", -1); + if (skipVersions.length > 2) { + throw new IllegalArgumentException("version range malformed: " + rawRanges); + } + + String lower = skipVersions[0].trim(); + String upper = skipVersions[1].trim(); + VersionRange versionRange = new VersionRange( + lower.isEmpty() ? VersionUtils.getFirstVersion() : Version.fromString(lower), + upper.isEmpty() ? Version.CURRENT : Version.fromString(upper) + ); + versionRanges.add(versionRange); + } + return versionRanges; + } } diff --git a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java index 49cb509608ec1..6e8397c816b3b 100644 --- a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java +++ b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java @@ -9,25 +9,39 @@ package org.elasticsearch.test.rest.yaml; import org.apache.http.HttpEntity; -import org.elasticsearch.Version; import org.elasticsearch.client.NodeSelector; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.VersionUtils; +import org.elasticsearch.test.rest.TestFeatureService; import java.io.IOException; import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.Set; import java.util.concurrent.atomic.AtomicReference; import static org.hamcrest.Matchers.is; public class ClientYamlTestExecutionContextTests extends ESTestCase { + private static class MockTestFeatureService implements TestFeatureService { + @Override + public boolean clusterHasFeature(String featureId) { + return true; + } + } + public void testHeadersSupportStashedValueReplacement() throws IOException { final AtomicReference> headersRef = new AtomicReference<>(); - final Version version = VersionUtils.randomVersion(random()); - final ClientYamlTestExecutionContext context = new ClientYamlTestExecutionContext(null, null, randomBoolean()) { + final String version = randomAlphaOfLength(10); + final ClientYamlTestExecutionContext context = new ClientYamlTestExecutionContext( + null, + null, + randomBoolean(), + Set.of(version), + new MockTestFeatureService(), + Set.of("os") + ) { @Override ClientYamlTestResponse callApiInternal( String apiName, @@ -39,11 +53,6 @@ ClientYamlTestResponse callApiInternal( headersRef.set(headers); return null; } - - @Override - public Version esVersion() { - return version; - } }; final Map headers = new HashMap<>(); headers.put("foo", "$bar"); @@ -62,8 +71,15 @@ public Version esVersion() { } public void testStashHeadersOnException() throws IOException { - final Version version = VersionUtils.randomVersion(random()); - final ClientYamlTestExecutionContext context = new ClientYamlTestExecutionContext(null, null, randomBoolean()) { + final String version = randomAlphaOfLength(10); + final ClientYamlTestExecutionContext context = new ClientYamlTestExecutionContext( + null, + null, + randomBoolean(), + Set.of(version), + new MockTestFeatureService(), + Set.of("os") + ) { @Override ClientYamlTestResponse callApiInternal( String apiName, @@ -74,11 +90,6 @@ ClientYamlTestResponse callApiInternal( ) { throw new RuntimeException("boom!"); } - - @Override - public Version esVersion() { - return version; - } }; final Map headers = new HashMap<>(); headers.put("Accept", "application/json"); diff --git a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java index 6a692e29926de..0ee275fc89c15 100644 --- a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java +++ b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.test.rest.yaml.section; -import org.elasticsearch.Version; import org.elasticsearch.common.ParsingException; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.yaml.YamlXContent; @@ -98,8 +97,6 @@ public void testParseTestSectionWithDoSetAndSkipSectionsNoSkip() throws Exceptio assertThat(testSection, notNullValue()); assertThat(testSection.getName(), equalTo("First test section")); assertThat(testSection.getSkipSection(), notNullValue()); - assertThat(testSection.getSkipSection().getLowerVersion(), equalTo(Version.fromString("6.0.0"))); - assertThat(testSection.getSkipSection().getUpperVersion(), equalTo(Version.fromString("6.2.0"))); assertThat(testSection.getSkipSection().getReason(), equalTo("Update doesn't return metadata fields, waiting for #3259")); assertThat(testSection.getExecutableSections().size(), equalTo(2)); DoSection doSection = (DoSection) testSection.getExecutableSections().get(0); diff --git a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java index 3f8cc298c5c36..c64a30378e9d6 100644 --- a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java +++ b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.test.rest.yaml.section; -import org.elasticsearch.Version; import org.elasticsearch.client.NodeSelector; import org.elasticsearch.common.ParsingException; import org.elasticsearch.core.Strings; @@ -28,7 +27,6 @@ import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; import static java.util.Collections.singletonMap; -import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -152,8 +150,7 @@ public void testParseTestSetupTeardownAndSections() throws Exception { restTestSuite.getTestSections().get(1).getSkipSection().getReason(), equalTo("for newer versions the index name is always returned") ); - assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getLowerVersion(), equalTo(Version.fromString("6.0.0"))); - assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getUpperVersion(), equalTo(Version.CURRENT)); + assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().size(), equalTo(3)); assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().get(0), instanceOf(DoSection.class)); doSection = (DoSection) restTestSuite.getTestSections().get(1).getExecutableSections().get(0); @@ -423,11 +420,7 @@ public void testParseSkipOs() throws Exception { assertThat(restTestSuite.getTestSections().get(0).getName(), equalTo("Broken on some os")); assertThat(restTestSuite.getTestSections().get(0).getSkipSection().isEmpty(), equalTo(false)); assertThat(restTestSuite.getTestSections().get(0).getSkipSection().getReason(), equalTo("not supported")); - assertThat( - restTestSuite.getTestSections().get(0).getSkipSection().getOperatingSystems(), - containsInAnyOrder("windows95", "debian-5") - ); - assertThat(restTestSuite.getTestSections().get(0).getSkipSection().getFeatures(), containsInAnyOrder("skip_os")); + assertThat(restTestSuite.getTestSections().get(0).getSkipSection().yamlRunnerHasFeature("skip_os"), equalTo(true)); } public void testParseFileWithSingleTestSection() throws Exception { @@ -660,7 +653,7 @@ public void testAddingDoWithWarningWithSkip() { DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); doSection.setExpectedWarningHeaders(singletonList("foo")); doSection.setApiCallSection(new ApiCallSection("test")); - SkipSection skipSection = new SkipSection(null, singletonList("warnings"), emptyList(), null); + SkipSection skipSection = new SkipSection(emptyList(), singletonList("warnings"), null); createTestSuite(skipSection, doSection).validate(); } @@ -669,13 +662,13 @@ public void testAddingDoWithWarningRegexWithSkip() { DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); doSection.setExpectedWarningHeadersRegex(singletonList(Pattern.compile("foo"))); doSection.setApiCallSection(new ApiCallSection("test")); - SkipSection skipSection = new SkipSection(null, singletonList("warnings_regex"), emptyList(), null); + SkipSection skipSection = new SkipSection(emptyList(), singletonList("warnings_regex"), null); createTestSuite(skipSection, doSection).validate(); } public void testAddingDoWithNodeSelectorWithSkip() { int lineNumber = between(1, 10000); - SkipSection skipSection = new SkipSection(null, singletonList("node_selector"), emptyList(), null); + SkipSection skipSection = new SkipSection(emptyList(), singletonList("node_selector"), null); DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); ApiCallSection apiCall = new ApiCallSection("test"); apiCall.setNodeSelector(NodeSelector.SKIP_DEDICATED_MASTERS); @@ -685,7 +678,7 @@ public void testAddingDoWithNodeSelectorWithSkip() { public void testAddingDoWithHeadersWithSkip() { int lineNumber = between(1, 10000); - SkipSection skipSection = new SkipSection(null, singletonList("headers"), emptyList(), null); + SkipSection skipSection = new SkipSection(emptyList(), singletonList("headers"), null); DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); ApiCallSection apiCallSection = new ApiCallSection("test"); apiCallSection.addHeaders(singletonMap("foo", "bar")); @@ -695,7 +688,7 @@ public void testAddingDoWithHeadersWithSkip() { public void testAddingContainsWithSkip() { int lineNumber = between(1, 10000); - SkipSection skipSection = new SkipSection(null, singletonList("contains"), emptyList(), null); + SkipSection skipSection = new SkipSection(emptyList(), singletonList("contains"), null); ContainsAssertion containsAssertion = new ContainsAssertion( new XContentLocation(lineNumber, 0), randomAlphaOfLength(randomIntBetween(3, 30)), @@ -706,7 +699,7 @@ public void testAddingContainsWithSkip() { public void testAddingCloseToWithSkip() { int lineNumber = between(1, 10000); - SkipSection skipSection = new SkipSection(null, singletonList("close_to"), emptyList(), null); + SkipSection skipSection = new SkipSection(emptyList(), singletonList("close_to"), null); CloseToAssertion closeToAssertion = new CloseToAssertion( new XContentLocation(lineNumber, 0), randomAlphaOfLength(randomIntBetween(3, 30)), @@ -718,7 +711,7 @@ public void testAddingCloseToWithSkip() { public void testAddingIsAfterWithSkip() { int lineNumber = between(1, 10000); - SkipSection skipSection = new SkipSection(null, singletonList("is_after"), emptyList(), null); + SkipSection skipSection = new SkipSection(emptyList(), singletonList("is_after"), null); IsAfterAssertion isAfterAssertion = new IsAfterAssertion( new XContentLocation(lineNumber, 0), randomAlphaOfLength(randomIntBetween(3, 30)), diff --git a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java index 501f83bb02e1f..0cb9a3e29e63f 100644 --- a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java +++ b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java @@ -16,7 +16,6 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.logging.HeaderWarning; import org.elasticsearch.core.Strings; -import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.rest.yaml.ClientYamlTestExecutionContext; import org.elasticsearch.test.rest.yaml.ClientYamlTestResponse; import org.elasticsearch.xcontent.XContentLocation; @@ -31,6 +30,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.regex.Pattern; import static java.util.Collections.emptyList; @@ -610,7 +610,7 @@ public void testNodeSelectorByVersionRange() throws IOException { doSection.getApiCallSection().getNodeSelector() ) ).thenReturn(mockResponse); - when(context.esVersion()).thenReturn(VersionUtils.randomVersion(random())); + when(context.nodesVersions()).thenReturn(Set.of(randomAlphaOfLength(10))); when(mockResponse.getHeaders("X-elastic-product")).thenReturn(List.of("Elasticsearch")); doSection.execute(context); verify(context).callApi( diff --git a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java index fa91726ca73e6..53aaf99d7e272 100644 --- a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java +++ b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java @@ -7,7 +7,6 @@ */ package org.elasticsearch.test.rest.yaml.section; -import org.elasticsearch.Version; import org.elasticsearch.xcontent.yaml.YamlXContent; import java.io.IOException; @@ -108,8 +107,6 @@ public void testParseSetupAndSkipSectionNoSkip() throws Exception { assertThat(setupSection, notNullValue()); assertThat(setupSection.getSkipSection().isEmpty(), equalTo(false)); assertThat(setupSection.getSkipSection(), notNullValue()); - assertThat(setupSection.getSkipSection().getLowerVersion(), equalTo(Version.fromString("6.0.0"))); - assertThat(setupSection.getSkipSection().getUpperVersion(), equalTo(Version.fromString("6.3.0"))); assertThat(setupSection.getSkipSection().getReason(), equalTo("Update doesn't return metadata fields, waiting for #3259")); assertThat(setupSection.getExecutableSections().size(), equalTo(2)); assertThat(setupSection.getExecutableSections().get(0), instanceOf(DoSection.class)); diff --git a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java index 5e4db061dbaaa..bd1f8fa758499 100644 --- a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java +++ b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java @@ -12,61 +12,171 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.core.Strings; import org.elasticsearch.test.VersionUtils; +import org.elasticsearch.test.rest.yaml.ClientYamlTestExecutionContext; import org.elasticsearch.xcontent.yaml.YamlXContent; +import java.io.IOException; import java.util.Collections; +import java.util.List; +import java.util.Set; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.emptyOrNullString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class SkipSectionTests extends AbstractClientYamlTestFragmentParserTestCase { - public void testSkipMultiRange() { - SkipSection section = new SkipSection("6.0.0 - 6.1.0, 7.1.0 - 7.5.0", Collections.emptyList(), Collections.emptyList(), "foobar"); - - assertFalse(section.skip(Version.CURRENT)); - assertFalse(section.skip(Version.fromString("6.2.0"))); - assertFalse(section.skip(Version.fromString("7.0.0"))); - assertFalse(section.skip(Version.fromString("7.6.0"))); - - assertTrue(section.skip(Version.fromString("6.0.0"))); - assertTrue(section.skip(Version.fromString("6.1.0"))); - assertTrue(section.skip(Version.fromString("7.1.0"))); - assertTrue(section.skip(Version.fromString("7.5.0"))); + public void testSkipVersionMultiRange() { + SkipSection section = new SkipSection( + List.of(SkipCriteria.fromVersionRange("6.0.0 - 6.1.0, 7.1.0 - 7.5.0")), + Collections.emptyList(), + "foobar" + ); - section = new SkipSection("- 7.1.0, 7.2.0 - 7.5.0, 8.0.0 -", Collections.emptyList(), Collections.emptyList(), "foobar"); - assertTrue(section.skip(Version.fromString("7.0.0"))); - assertTrue(section.skip(Version.fromString("7.3.0"))); - assertTrue(section.skip(Version.fromString("8.0.0"))); + var outOfRangeMockContext = mock(ClientYamlTestExecutionContext.class); + when(outOfRangeMockContext.nodesVersions()).thenReturn(Set.of(Version.CURRENT.toString())) + .thenReturn(Set.of("6.2.0")) + .thenReturn(Set.of("7.0.0")) + .thenReturn(Set.of("7.6.0")); + + assertFalse(section.skip(outOfRangeMockContext)); + assertFalse(section.skip(outOfRangeMockContext)); + assertFalse(section.skip(outOfRangeMockContext)); + assertFalse(section.skip(outOfRangeMockContext)); + + var inRangeMockContext = mock(ClientYamlTestExecutionContext.class); + when(inRangeMockContext.nodesVersions()).thenReturn(Set.of("6.0.0")) + .thenReturn(Set.of("6.1.0")) + .thenReturn(Set.of("7.1.0")) + .thenReturn(Set.of("7.5.0")); + + assertTrue(section.skip(inRangeMockContext)); + assertTrue(section.skip(inRangeMockContext)); + assertTrue(section.skip(inRangeMockContext)); + assertTrue(section.skip(inRangeMockContext)); } - public void testSkip() { - SkipSection section = new SkipSection( - "6.0.0 - 6.1.0", - randomBoolean() ? Collections.emptyList() : Collections.singletonList("warnings"), + public void testSkipVersionMultiOpenRange() { + var section = new SkipSection( + List.of(SkipCriteria.fromVersionRange("- 7.1.0, 7.2.0 - 7.5.0, 8.0.0 -")), Collections.emptyList(), "foobar" ); - assertFalse(section.skip(Version.CURRENT)); - assertTrue(section.skip(Version.fromString("6.0.0"))); - section = new SkipSection( - randomBoolean() ? null : "6.0.0 - 6.1.0", - Collections.singletonList("boom"), - Collections.emptyList(), + + var outOfRangeMockContext = mock(ClientYamlTestExecutionContext.class); + when(outOfRangeMockContext.nodesVersions()).thenReturn(Set.of("7.1.1")).thenReturn(Set.of("7.6.0")); + + assertFalse(section.skip(outOfRangeMockContext)); + assertFalse(section.skip(outOfRangeMockContext)); + + var inRangeMockContext = mock(ClientYamlTestExecutionContext.class); + when(inRangeMockContext.nodesVersions()).thenReturn(Set.of("7.0.0")) + .thenReturn(Set.of("7.3.0")) + .thenReturn(Set.of("8.0.0")) + .thenReturn(Set.of(Version.CURRENT.toString())); + + assertTrue(section.skip(inRangeMockContext)); + assertTrue(section.skip(inRangeMockContext)); + assertTrue(section.skip(inRangeMockContext)); + assertTrue(section.skip(inRangeMockContext)); + } + + public void testSkipVersion() { + SkipSection section = new SkipSection(List.of(SkipCriteria.fromVersionRange("6.0.0 - 6.1.0")), Collections.emptyList(), "foobar"); + + var mockContext = mock(ClientYamlTestExecutionContext.class); + when(mockContext.nodesVersions()).thenReturn(Set.of(Version.CURRENT.toString())) + .thenReturn(Set.of("6.0.0")) + .thenReturn(Set.of("6.0.0", "6.1.0")) + .thenReturn(Set.of("6.0.0", "5.2.0")); + + assertFalse(section.skip(mockContext)); + assertTrue(section.skip(mockContext)); + assertTrue(section.skip(mockContext)); + assertFalse(section.skip(mockContext)); + } + + public void testSkipVersionWithTestFeatures() { + SkipSection section = new SkipSection( + List.of(SkipCriteria.fromVersionRange("6.0.0 - 6.1.0")), + Collections.singletonList("warnings"), "foobar" ); - assertTrue(section.skip(Version.CURRENT)); + + var mockContext = mock(ClientYamlTestExecutionContext.class); + when(mockContext.nodesVersions()).thenReturn(Set.of(Version.CURRENT.toString())).thenReturn(Set.of("6.0.0")); + + assertFalse(section.skip(mockContext)); + assertTrue(section.skip(mockContext)); + } + + public void testSkipTestFeatures() { + var section = new SkipSection.SkipSectionBuilder().withTestFeature("boom").build(); + assertTrue(section.skip(mock(ClientYamlTestExecutionContext.class))); + } + + public void testSkipTestFeaturesOverridesAnySkipCriteria() { + var section = new SkipSection.SkipSectionBuilder().withOs("test-os").withTestFeature("boom").build(); + + var mockContext = mock(ClientYamlTestExecutionContext.class); + when(mockContext.os()).thenReturn("test-os"); + + // Skip even if OS is matching + assertTrue(section.skip(mockContext)); + } + + public void testSkipOs() { + SkipSection section = new SkipSection.SkipSectionBuilder().withOs("windows95").withOs("debian-5").build(); + + var mockContext = mock(ClientYamlTestExecutionContext.class); + + when(mockContext.os()).thenReturn("debian-5"); + assertTrue(section.skip(mockContext)); + + when(mockContext.os()).thenReturn("windows95"); + assertTrue(section.skip(mockContext)); + + when(mockContext.os()).thenReturn("ms-dos"); + assertFalse(section.skip(mockContext)); + } + + public void testSkipOsWithTestFeatures() { + SkipSection section = new SkipSection.SkipSectionBuilder().withTestFeature("warnings") + .withOs("windows95") + .withOs("debian-5") + .build(); + + var mockContext = mock(ClientYamlTestExecutionContext.class); + when(mockContext.os()).thenReturn("debian-5"); + assertTrue(section.skip(mockContext)); + + when(mockContext.os()).thenReturn("windows95"); + assertTrue(section.skip(mockContext)); + + when(mockContext.os()).thenReturn("ms-dos"); + assertFalse(section.skip(mockContext)); } public void testMessage() { - SkipSection section = new SkipSection("6.0.0 - 6.1.0", Collections.singletonList("warnings"), Collections.emptyList(), "foobar"); + SkipSection section = new SkipSection( + List.of(SkipCriteria.fromVersionRange("6.0.0 - 6.1.0")), + Collections.singletonList("warnings"), + "foobar" + ); assertEquals("[FOOBAR] skipped, reason: [foobar] unsupported features [warnings]", section.getSkipMessage("FOOBAR")); - section = new SkipSection(null, Collections.singletonList("warnings"), Collections.emptyList(), "foobar"); + section = new SkipSection(List.of(), Collections.singletonList("warnings"), "foobar"); assertEquals("[FOOBAR] skipped, reason: [foobar] unsupported features [warnings]", section.getSkipMessage("FOOBAR")); - section = new SkipSection(null, Collections.singletonList("warnings"), Collections.emptyList(), null); + section = new SkipSection(List.of(), Collections.singletonList("warnings"), null); assertEquals("[FOOBAR] skipped, unsupported features [warnings]", section.getSkipMessage("FOOBAR")); } @@ -76,49 +186,61 @@ public void testParseSkipSectionVersionNoFeature() throws Exception { version: " - %s" reason: Delete ignores the parent param""", version)); - SkipSection skipSection = SkipSection.parse(parser); - assertThat(skipSection, notNullValue()); - assertThat(skipSection.getLowerVersion(), equalTo(VersionUtils.getFirstVersion())); - assertThat(skipSection.getUpperVersion(), equalTo(version)); - assertThat(skipSection.getFeatures().size(), equalTo(0)); - assertThat(skipSection.getReason(), equalTo("Delete ignores the parent param")); + var skipSectionBuilder = SkipSection.parseInternal(parser); + assertThat(skipSectionBuilder, notNullValue()); + assertThat(skipSectionBuilder.version, not(emptyOrNullString())); + assertThat(skipSectionBuilder.testFeatures.size(), equalTo(0)); + assertThat(skipSectionBuilder.reason, equalTo("Delete ignores the parent param")); } - public void testParseSkipSectionAllVersions() throws Exception { - parser = createParser(YamlXContent.yamlXContent, """ - version: " all " - reason: Delete ignores the parent param"""); + public void testParseSkipSectionFeatureNoVersion() throws Exception { + parser = createParser(YamlXContent.yamlXContent, "features: regex"); - SkipSection skipSection = SkipSection.parse(parser); - assertThat(skipSection, notNullValue()); - assertThat(skipSection.getLowerVersion(), equalTo(VersionUtils.getFirstVersion())); - assertThat(skipSection.getUpperVersion(), equalTo(Version.CURRENT)); - assertThat(skipSection.getFeatures().size(), equalTo(0)); - assertThat(skipSection.getReason(), equalTo("Delete ignores the parent param")); + var skipSectionBuilder = SkipSection.parseInternal(parser); + assertThat(skipSectionBuilder, notNullValue()); + assertThat(skipSectionBuilder.version, emptyOrNullString()); + assertThat(skipSectionBuilder.testFeatures, contains("regex")); + assertThat(skipSectionBuilder.reason, nullValue()); + assertThat(skipSectionBuilder.xpackRequested, is(SkipSection.SkipSectionBuilder.XPackRequested.NOT_SPECIFIED)); } - public void testParseSkipSectionFeatureNoVersion() throws Exception { - parser = createParser(YamlXContent.yamlXContent, "features: regex"); + public void testParseXPackFeature() throws IOException { + parser = createParser(YamlXContent.yamlXContent, "features: xpack"); + + var skipSectionBuilder = SkipSection.parseInternal(parser); + assertThat(skipSectionBuilder, notNullValue()); + assertThat(skipSectionBuilder.version, emptyOrNullString()); + assertThat(skipSectionBuilder.testFeatures, empty()); + assertThat(skipSectionBuilder.reason, nullValue()); + assertThat(skipSectionBuilder.xpackRequested, is(SkipSection.SkipSectionBuilder.XPackRequested.YES)); + } + + public void testParseNoXPackFeature() throws IOException { + parser = createParser(YamlXContent.yamlXContent, "features: no_xpack"); + + var skipSectionBuilder = SkipSection.parseInternal(parser); + assertThat(skipSectionBuilder, notNullValue()); + assertThat(skipSectionBuilder.version, emptyOrNullString()); + assertThat(skipSectionBuilder.testFeatures, empty()); + assertThat(skipSectionBuilder.reason, nullValue()); + assertThat(skipSectionBuilder.xpackRequested, is(SkipSection.SkipSectionBuilder.XPackRequested.NO)); + } - SkipSection skipSection = SkipSection.parse(parser); - assertThat(skipSection, notNullValue()); - assertThat(skipSection.isVersionCheck(), equalTo(false)); - assertThat(skipSection.getFeatures().size(), equalTo(1)); - assertThat(skipSection.getFeatures().get(0), equalTo("regex")); - assertThat(skipSection.getReason(), nullValue()); + public void testParseBothXPackFeatures() throws IOException { + parser = createParser(YamlXContent.yamlXContent, "features: [xpack, no_xpack]"); + + var e = expectThrows(ParsingException.class, () -> SkipSection.parseInternal(parser)); + assertThat(e.getMessage(), containsString("either `xpack` or `no_xpack` can be present, not both")); } public void testParseSkipSectionFeaturesNoVersion() throws Exception { parser = createParser(YamlXContent.yamlXContent, "features: [regex1,regex2,regex3]"); - SkipSection skipSection = SkipSection.parse(parser); - assertThat(skipSection, notNullValue()); - assertThat(skipSection.isVersionCheck(), equalTo(false)); - assertThat(skipSection.getFeatures().size(), equalTo(3)); - assertThat(skipSection.getFeatures().get(0), equalTo("regex1")); - assertThat(skipSection.getFeatures().get(1), equalTo("regex2")); - assertThat(skipSection.getFeatures().get(2), equalTo("regex3")); - assertThat(skipSection.getReason(), nullValue()); + var skipSectionBuilder = SkipSection.parseInternal(parser); + assertThat(skipSectionBuilder, notNullValue()); + assertThat(skipSectionBuilder.version, emptyOrNullString()); + assertThat(skipSectionBuilder.testFeatures, contains("regex1", "regex2", "regex3")); + assertThat(skipSectionBuilder.reason, nullValue()); } public void testParseSkipSectionBothFeatureAndVersion() throws Exception { @@ -127,25 +249,24 @@ public void testParseSkipSectionBothFeatureAndVersion() throws Exception { features: regex reason: Delete ignores the parent param"""); - SkipSection skipSection = SkipSection.parse(parser); - assertEquals(VersionUtils.getFirstVersion(), skipSection.getLowerVersion()); - assertEquals(Version.fromString("0.90.2"), skipSection.getUpperVersion()); - assertEquals(Collections.singletonList("regex"), skipSection.getFeatures()); - assertEquals("Delete ignores the parent param", skipSection.getReason()); + var skipSectionBuilder = SkipSection.parseInternal(parser); + assertThat(skipSectionBuilder.version, not(emptyOrNullString())); + assertThat(skipSectionBuilder.testFeatures, contains("regex")); + assertThat(skipSectionBuilder.reason, equalTo("Delete ignores the parent param")); } public void testParseSkipSectionNoReason() throws Exception { parser = createParser(YamlXContent.yamlXContent, "version: \" - 0.90.2\"\n"); - Exception e = expectThrows(ParsingException.class, () -> SkipSection.parse(parser)); + Exception e = expectThrows(ParsingException.class, () -> SkipSection.parseInternal(parser)); assertThat(e.getMessage(), is("reason is mandatory within skip version section")); } public void testParseSkipSectionNoVersionNorFeature() throws Exception { parser = createParser(YamlXContent.yamlXContent, "reason: Delete ignores the parent param\n"); - Exception e = expectThrows(ParsingException.class, () -> SkipSection.parse(parser)); - assertThat(e.getMessage(), is("version, features or os is mandatory within skip section")); + Exception e = expectThrows(ParsingException.class, () -> SkipSection.parseInternal(parser)); + assertThat(e.getMessage(), is("at least one criteria (version, test features, os) is mandatory within a skip section")); } public void testParseSkipSectionOsNoVersion() throws Exception { @@ -155,13 +276,12 @@ public void testParseSkipSectionOsNoVersion() throws Exception { reason: memory accounting broken, see gh#xyz """); - SkipSection skipSection = SkipSection.parse(parser); - assertThat(skipSection, notNullValue()); - assertThat(skipSection.isVersionCheck(), equalTo(false)); - assertThat(skipSection.getFeatures().size(), equalTo(2)); - assertThat(skipSection.getOperatingSystems().size(), equalTo(1)); - assertThat(skipSection.getOperatingSystems().get(0), equalTo("debian-9")); - assertThat(skipSection.getReason(), is("memory accounting broken, see gh#xyz")); + var skipSectionBuilder = SkipSection.parseInternal(parser); + assertThat(skipSectionBuilder, notNullValue()); + assertThat(skipSectionBuilder.version, emptyOrNullString()); + assertThat(skipSectionBuilder.testFeatures, hasSize(2)); + assertThat(skipSectionBuilder.operatingSystems, contains("debian-9")); + assertThat(skipSectionBuilder.reason, is("memory accounting broken, see gh#xyz")); } public void testParseSkipSectionOsListNoVersion() throws Exception { @@ -171,14 +291,12 @@ public void testParseSkipSectionOsListNoVersion() throws Exception { reason: see gh#xyz """); - SkipSection skipSection = SkipSection.parse(parser); - assertThat(skipSection, notNullValue()); - assertThat(skipSection.isVersionCheck(), equalTo(false)); - assertThat(skipSection.getOperatingSystems().size(), equalTo(3)); - assertThat(skipSection.getOperatingSystems().get(0), equalTo("debian-9")); - assertThat(skipSection.getOperatingSystems().get(1), equalTo("windows-95")); - assertThat(skipSection.getOperatingSystems().get(2), equalTo("ms-dos")); - assertThat(skipSection.getReason(), is("see gh#xyz")); + var skipSectionBuilder = SkipSection.parseInternal(parser); + assertThat(skipSectionBuilder, notNullValue()); + assertThat(skipSectionBuilder.version, emptyOrNullString()); + assertThat(skipSectionBuilder.testFeatures, hasSize(1)); + assertThat(skipSectionBuilder.operatingSystems, containsInAnyOrder("debian-9", "windows-95", "ms-dos")); + assertThat(skipSectionBuilder.reason, is("see gh#xyz")); } public void testParseSkipSectionOsNoFeatureNoVersion() throws Exception { @@ -187,20 +305,7 @@ public void testParseSkipSectionOsNoFeatureNoVersion() throws Exception { reason: memory accounting broken, see gh#xyz """); - Exception e = expectThrows(ParsingException.class, () -> SkipSection.parse(parser)); + Exception e = expectThrows(ParsingException.class, () -> SkipSection.parseInternal(parser)); assertThat(e.getMessage(), is("if os is specified, feature skip_os must be set")); } - - public void testParseSkipSectionWithThreeDigitVersion() throws Exception { - parser = createParser(YamlXContent.yamlXContent, """ - version: " - 8.2.999" - features: regex - reason: Now you have two problems"""); - - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> SkipSection.parse(parser)); - assertThat( - e.getMessage(), - containsString("illegal revision version format - only one or two digit numbers are supported but found 999") - ); - } } diff --git a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/TeardownSectionTests.java b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/TeardownSectionTests.java index 52fbb3838ac9e..2c6b4f5be12de 100644 --- a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/TeardownSectionTests.java +++ b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/TeardownSectionTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.test.rest.yaml.section; -import org.elasticsearch.Version; import org.elasticsearch.xcontent.yaml.YamlXContent; import static org.hamcrest.Matchers.equalTo; @@ -64,8 +63,6 @@ public void testParseWithSkip() throws Exception { TeardownSection section = TeardownSection.parse(parser); assertThat(section, notNullValue()); assertThat(section.getSkipSection().isEmpty(), equalTo(false)); - assertThat(section.getSkipSection().getLowerVersion(), equalTo(Version.fromString("6.0.0"))); - assertThat(section.getSkipSection().getUpperVersion(), equalTo(Version.fromString("6.3.0"))); assertThat(section.getSkipSection().getReason(), equalTo("there is a reason")); assertThat(section.getDoSections().size(), equalTo(2)); assertThat(((DoSection) section.getDoSections().get(0)).getApiCallSection().getApi(), equalTo("delete")); diff --git a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/VersionRangeTests.java b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/VersionRangeTests.java new file mode 100644 index 0000000000000..ba20cf51b8299 --- /dev/null +++ b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/VersionRangeTests.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.test.rest.yaml.section; + +import org.elasticsearch.Version; +import org.elasticsearch.core.Strings; +import org.elasticsearch.test.VersionUtils; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.notNullValue; + +public class VersionRangeTests extends AbstractClientYamlTestFragmentParserTestCase { + + public void testParseVersionNoLowerBound() { + Version version = VersionUtils.randomVersion(random()); + String versionRangeString = Strings.format(" - %s", version); + + var versionRange = VersionRange.parseVersionRanges(versionRangeString); + assertThat(versionRange, notNullValue()); + assertThat(versionRange, hasSize(1)); + assertThat(versionRange.get(0).lower(), equalTo(VersionUtils.getFirstVersion())); + assertThat(versionRange.get(0).upper(), equalTo(version)); + } + + public void testParseVersionNoUpperBound() { + Version version = VersionUtils.randomVersion(random()); + String versionRangeString = Strings.format("%s - ", version); + + var versionRange = VersionRange.parseVersionRanges(versionRangeString); + assertThat(versionRange, notNullValue()); + assertThat(versionRange, hasSize(1)); + assertThat(versionRange.get(0).lower(), equalTo(version)); + assertThat(versionRange.get(0).upper(), equalTo(Version.CURRENT)); + } + + public void testParseAllVersions() { + String versionRangeString = " all "; + + var versionRange = VersionRange.parseVersionRanges(versionRangeString); + assertThat(versionRange, notNullValue()); + assertThat(versionRange, hasSize(1)); + assertThat(versionRange.get(0).lower(), equalTo(VersionUtils.getFirstVersion())); + assertThat(versionRange.get(0).upper(), equalTo(Version.CURRENT)); + } + + public void testParseMultipleRanges() { + String versionRangeString = "6.0.0 - 6.1.0, 7.1.0 - 7.5.0"; + + var versionRange = VersionRange.parseVersionRanges(versionRangeString); + assertThat(versionRange, notNullValue()); + assertThat(versionRange, hasSize(2)); + assertThat(versionRange.get(0).lower(), equalTo(Version.fromString("6.0.0"))); + assertThat(versionRange.get(0).upper(), equalTo(Version.fromString("6.1.0"))); + assertThat(versionRange.get(1).lower(), equalTo(Version.fromString("7.1.0"))); + assertThat(versionRange.get(1).upper(), equalTo(Version.fromString("7.5.0"))); + } + + public void testParseWithThreeDigitVersion() { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> VersionRange.parseVersionRanges(" - 8.2.999")); + assertThat( + e.getMessage(), + containsString("illegal revision version format - only one or two digit numbers are supported but found 999") + ); + } +} diff --git a/x-pack/plugin/analytics/src/internalClusterTest/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsWithRequestBreakerIT.java b/x-pack/plugin/analytics/src/internalClusterTest/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsWithRequestBreakerIT.java index 6f13b3b4bc528..4c8666365f603 100644 --- a/x-pack/plugin/analytics/src/internalClusterTest/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsWithRequestBreakerIT.java +++ b/x-pack/plugin/analytics/src/internalClusterTest/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsWithRequestBreakerIT.java @@ -61,7 +61,7 @@ public void testRequestBreaker() throws Exception { new MultiValuesSourceFieldConfig.Builder().setFieldName("field1.keyword").build() ) ) - ).get(); + ).get().decRef(); } catch (ElasticsearchException e) { if (ExceptionsHelper.unwrap(e, CircuitBreakingException.class) == null) { throw e; diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/AnalyticsAggregationBuilders.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/AnalyticsAggregationBuilders.java deleted file mode 100644 index 123ed1a2f8a10..0000000000000 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/AnalyticsAggregationBuilders.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.analytics; - -import org.elasticsearch.xpack.analytics.cumulativecardinality.CumulativeCardinalityPipelineAggregationBuilder; -import org.elasticsearch.xpack.analytics.stringstats.StringStatsAggregationBuilder; - -public class AnalyticsAggregationBuilders { - - public static CumulativeCardinalityPipelineAggregationBuilder cumulativeCardinality(String name, String bucketsPath) { - return new CumulativeCardinalityPipelineAggregationBuilder(name, bucketsPath); - } - - public static StringStatsAggregationBuilder stringStats(String name) { - return new StringStatsAggregationBuilder(name); - } -} diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/aggregations/metrics/AbstractHistoBackedHDRPercentilesAggregator.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/aggregations/metrics/AbstractHistoBackedHDRPercentilesAggregator.java index c79ef9a344b67..295f44cd3c523 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/aggregations/metrics/AbstractHistoBackedHDRPercentilesAggregator.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/aggregations/metrics/AbstractHistoBackedHDRPercentilesAggregator.java @@ -109,8 +109,7 @@ protected DoubleHistogram getState(long bucketOrd) { if (bucketOrd >= states.size()) { return null; } - final DoubleHistogram state = states.get(bucketOrd); - return state; + return states.get(bucketOrd); } @Override diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/aggregations/metrics/HistoBackedHDRPercentileRanksAggregator.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/aggregations/metrics/HistoBackedHDRPercentileRanksAggregator.java index 209a06f1c155c..d50ebc6a46a09 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/aggregations/metrics/HistoBackedHDRPercentileRanksAggregator.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/aggregations/metrics/HistoBackedHDRPercentileRanksAggregator.java @@ -58,7 +58,7 @@ public double metric(String name, long bucketOrd) { if (state == null) { return Double.NaN; } else { - return InternalHDRPercentileRanks.percentileRank(state, Double.valueOf(name)); + return InternalHDRPercentileRanks.percentileRank(state, Double.parseDouble(name)); } } } diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/aggregations/metrics/HistoBackedTDigestPercentileRanksAggregator.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/aggregations/metrics/HistoBackedTDigestPercentileRanksAggregator.java index c20c166bd552b..4ffb3c0c95e91 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/aggregations/metrics/HistoBackedTDigestPercentileRanksAggregator.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/aggregations/metrics/HistoBackedTDigestPercentileRanksAggregator.java @@ -57,7 +57,7 @@ public double metric(String name, long bucketOrd) { if (state == null) { return Double.NaN; } else { - return InternalTDigestPercentileRanks.percentileRank(state, Double.valueOf(name)); + return InternalTDigestPercentileRanks.percentileRank(state, Double.parseDouble(name)); } } } diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/aggregations/support/AnalyticsValuesSourceType.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/aggregations/support/AnalyticsValuesSourceType.java index f66008bcc932c..4279911771dee 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/aggregations/support/AnalyticsValuesSourceType.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/aggregations/support/AnalyticsValuesSourceType.java @@ -55,10 +55,6 @@ public ValuesSource replaceMissing( } }; - public static ValuesSourceType fromString(String name) { - return valueOf(name.trim().toUpperCase(Locale.ROOT)); - } - public String value() { return name().toLowerCase(Locale.ROOT); } diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/cumulativecardinality/CumulativeCardinalityPipelineAggregationBuilder.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/cumulativecardinality/CumulativeCardinalityPipelineAggregationBuilder.java index 5845da9f6e6c5..a4b978e64da9f 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/cumulativecardinality/CumulativeCardinalityPipelineAggregationBuilder.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/cumulativecardinality/CumulativeCardinalityPipelineAggregationBuilder.java @@ -29,9 +29,11 @@ public class CumulativeCardinalityPipelineAggregationBuilder extends AbstractPip public static final String NAME = "cumulative_cardinality"; public static final ConstructingObjectParser PARSER = - new ConstructingObjectParser<>(NAME, false, (args, name) -> { - return new CumulativeCardinalityPipelineAggregationBuilder(name, (String) args[0]); - }); + new ConstructingObjectParser<>( + NAME, + false, + (args, name) -> new CumulativeCardinalityPipelineAggregationBuilder(name, (String) args[0]) + ); static { PARSER.declareString(constructorArg(), BUCKETS_PATH_FIELD); PARSER.declareString(CumulativeCardinalityPipelineAggregationBuilder::format, FORMAT); diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/cumulativecardinality/InternalSimpleLongValue.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/cumulativecardinality/InternalSimpleLongValue.java index 0b15184e54ba7..a1c775c29da0e 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/cumulativecardinality/InternalSimpleLongValue.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/cumulativecardinality/InternalSimpleLongValue.java @@ -53,14 +53,6 @@ public double value() { return value; } - public long getValue() { - return value; - } - - DocValueFormat formatter() { - return format; - } - @Override public InternalSimpleLongValue reduce(List aggregations, AggregationReduceContext reduceContext) { throw new UnsupportedOperationException("Not supported"); diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/movingPercentiles/MovingPercentilesPipelineAggregationBuilder.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/movingPercentiles/MovingPercentilesPipelineAggregationBuilder.java index 72688dafe3721..76ee8272fe345 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/movingPercentiles/MovingPercentilesPipelineAggregationBuilder.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/movingPercentiles/MovingPercentilesPipelineAggregationBuilder.java @@ -29,9 +29,11 @@ public class MovingPercentilesPipelineAggregationBuilder extends AbstractPipelin private static final ParseField SHIFT = new ParseField("shift"); public static final ConstructingObjectParser PARSER = - new ConstructingObjectParser<>(NAME, false, (args, name) -> { - return new MovingPercentilesPipelineAggregationBuilder(name, (String) args[0], (int) args[1]); - }); + new ConstructingObjectParser<>( + NAME, + false, + (args, name) -> new MovingPercentilesPipelineAggregationBuilder(name, (String) args[0], (int) args[1]) + ); static { PARSER.declareString(constructorArg(), BUCKETS_PATH_FIELD); PARSER.declareInt(constructorArg(), WINDOW); diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/movingPercentiles/MovingPercentilesPipelineAggregator.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/movingPercentiles/MovingPercentilesPipelineAggregator.java index 90f22340edb0d..3dc364b1ec131 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/movingPercentiles/MovingPercentilesPipelineAggregator.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/movingPercentiles/MovingPercentilesPipelineAggregator.java @@ -26,6 +26,7 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.stream.Collectors; public class MovingPercentilesPipelineAggregator extends PipelineAggregator { @@ -41,7 +42,6 @@ public class MovingPercentilesPipelineAggregator extends PipelineAggregator { @Override public InternalAggregation reduce(InternalAggregation aggregation, AggregationReduceContext reduceContext) { - @SuppressWarnings("unchecked") InternalMultiBucketAggregation histo = (InternalMultiBucketAggregation) aggregation; List buckets = histo.getBuckets(); HistogramFactory factory = (HistogramFactory) histo; @@ -74,7 +74,7 @@ private void reduceTDigest( List values = buckets.stream() .map(b -> resolveTDigestBucketValue(histo, b, bucketsPaths()[0])) - .filter(v -> v != null) + .filter(Objects::nonNull) .toList(); int index = 0; @@ -124,7 +124,7 @@ private void reduceHDR( List values = buckets.stream() .map(b -> resolveHDRBucketValue(histo, b, bucketsPaths()[0])) - .filter(v -> v != null) + .filter(Objects::nonNull) .toList(); int index = 0; @@ -257,10 +257,7 @@ private static int clamp(int index, int length) { if (index < 0) { return 0; } - if (index > length) { - return length; - } - return index; + return Math.min(index, length); } // TODO: replace this with the PercentilesConfig that's used by the percentiles builder. diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregationBuilder.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregationBuilder.java index 0d3931e9eb8c2..6307cfa5b3674 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregationBuilder.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregationBuilder.java @@ -189,13 +189,6 @@ public MultiTermsAggregationBuilder terms(List ter return this; } - /** - * Gets the field to use for this aggregation. - */ - public List terms() { - return terms; - } - @Override protected AggregationBuilder shallowCopy(AggregatorFactories.Builder factoriesBuilder, Map metadata) { return new MultiTermsAggregationBuilder(this, factoriesBuilder, metadata); @@ -215,13 +208,6 @@ protected final void doWriteTo(StreamOutput out) throws IOException { out.writeBoolean(showTermDocCountError); } - /** - * Get whether doc count error will be return for individual terms - */ - public boolean showTermDocCountError() { - return showTermDocCountError; - } - /** * Set whether doc count error will be return for individual terms */ @@ -242,13 +228,6 @@ public MultiTermsAggregationBuilder size(int size) { return this; } - /** - * Returns the number of term buckets currently configured - */ - public int size() { - return bucketCountThresholds.getRequiredSize(); - } - /** * Sets the shard_size - indicating the number of term buckets each shard * will return to the coordinating node (the node that coordinates the @@ -263,13 +242,6 @@ public MultiTermsAggregationBuilder shardSize(int shardSize) { return this; } - /** - * Returns the number of term buckets per shard that are currently configured - */ - public int shardSize() { - return bucketCountThresholds.getShardSize(); - } - /** * Set the minimum document count terms should have in order to appear in * the response. @@ -284,13 +256,6 @@ public MultiTermsAggregationBuilder minDocCount(long minDocCount) { return this; } - /** - * Returns the minimum document count required per term - */ - public long minDocCount() { - return bucketCountThresholds.getMinDocCount(); - } - /** * Set the minimum document count terms should have on the shard in order to * appear in the response. @@ -305,13 +270,6 @@ public MultiTermsAggregationBuilder shardMinDocCount(long shardMinDocCount) { return this; } - /** - * Returns the minimum document count required per term, per shard - */ - public long shardMinDocCount() { - return bucketCountThresholds.getShardMinDocCount(); - } - /** * Set a new order on this builder and return the builder so that calls * can be chained. A tie-breaker may be added to avoid non-deterministic ordering. @@ -341,13 +299,6 @@ public MultiTermsAggregationBuilder order(List orders) { return this; } - /** - * Gets the order in which the buckets will be returned. - */ - public BucketOrder order() { - return order; - } - /** * Expert: set the collection mode. */ @@ -359,13 +310,6 @@ public MultiTermsAggregationBuilder collectMode(Aggregator.SubAggCollectionMode return this; } - /** - * Expert: get the collection mode. - */ - public Aggregator.SubAggCollectionMode collectMode() { - return collectMode; - } - @Override protected final MultiTermsAggregationFactory doBuild( AggregationContext context, diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/stringstats/InternalStringStats.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/stringstats/InternalStringStats.java index 1663a93a52235..20bd4b3380180 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/stringstats/InternalStringStats.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/stringstats/InternalStringStats.java @@ -214,7 +214,7 @@ public InternalStringStats reduce(List aggregations, Aggreg minLength = Math.min(minLength, stats.getMinLength()); maxLength = Math.max(maxLength, stats.getMaxLength()); totalLength += stats.totalLength; - stats.charOccurrences.forEach((k, v) -> occurs.merge(k, v, (oldValue, newValue) -> oldValue + newValue)); + stats.charOccurrences.forEach((k, v) -> occurs.merge(k, v, Long::sum)); } return new InternalStringStats(name, count, totalLength, minLength, maxLength, occurs, showDistribution, format, getMetadata()); diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/InternalTopMetrics.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/InternalTopMetrics.java index 0194e39f47d44..4135d713f91ad 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/InternalTopMetrics.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/InternalTopMetrics.java @@ -116,7 +116,7 @@ public InternalTopMetrics reduce(List aggregations, Aggrega return this; } List merged = new ArrayList<>(size); - PriorityQueue queue = new PriorityQueue(aggregations.size()) { + PriorityQueue queue = new PriorityQueue<>(aggregations.size()) { @Override protected boolean lessThan(ReduceState lhs, ReduceState rhs) { return sortOrder.reverseMul() * lhs.sortValue().compareTo(rhs.sortValue()) < 0; diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregationBuilder.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregationBuilder.java index 15838a77a96ad..ba9dc7ab7eed9 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregationBuilder.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregationBuilder.java @@ -233,7 +233,7 @@ List getMetricFields() { @Override public Optional> getOutputFieldNames() { - return Optional.of(metricFields.stream().map(mf -> mf.getFieldName()).collect(Collectors.toSet())); + return Optional.of(metricFields.stream().map(MultiValuesSourceFieldConfig::getFieldName).collect(Collectors.toSet())); } @Override diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregator.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregator.java index adfb41f83cc6c..3337b6d239413 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregator.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregator.java @@ -154,8 +154,8 @@ static class Metrics implements BucketedSort.ExtraData, Releasable { } boolean needsScores() { - for (int i = 0; i < values.length; i++) { - if (values[i].needsScores()) { + for (MetricValues value : values) { + if (value.needsScores()) { return true; } } @@ -174,8 +174,8 @@ boolean needsScores() { BucketedSort.ResultBuilder resultBuilder(DocValueFormat sortFormat) { return (index, sortValue) -> { List result = new ArrayList<>(values.length); - for (int i = 0; i < values.length; i++) { - result.add(values[i].metricValue(index)); + for (MetricValues value : values) { + result.add(value.metricValue(index)); } return new InternalTopMetrics.TopMetric(sortFormat, sortValue, result); }; @@ -187,8 +187,8 @@ List names() { @Override public void swap(long lhs, long rhs) { - for (int i = 0; i < values.length; i++) { - values[i].swap(lhs, rhs); + for (MetricValues value : values) { + value.swap(lhs, rhs); } } @@ -199,8 +199,8 @@ public Loader loader(LeafReaderContext ctx) throws IOException { loaders[i] = values[i].loader(ctx); } return (index, doc) -> { - for (int i = 0; i < loaders.length; i++) { - loaders[i].loadFromDoc(index, doc); + for (Loader loader : loaders) { + loader.loadFromDoc(index, doc); } }; } diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/ttest/TTestStatsBuilder.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/ttest/TTestStatsBuilder.java index 8e85d16e01f9f..9c4fcb3e675c3 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/ttest/TTestStatsBuilder.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/ttest/TTestStatsBuilder.java @@ -42,10 +42,6 @@ public TTestStats get(long bucket) { return new TTestStats(counts.get(bucket), sums.get(bucket), sumOfSqrs.get(bucket)); } - public long build(long bucket) { - return counts.get(bucket); - } - public long getSize() { return counts.size(); } diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/ttest/TTestType.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/ttest/TTestType.java index 7b901ea792601..0f718d982b545 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/ttest/TTestType.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/ttest/TTestType.java @@ -21,8 +21,4 @@ public static TTestType resolve(String name) { return TTestType.valueOf(name.toUpperCase(Locale.ROOT)); } - public String value() { - return name().toLowerCase(Locale.ROOT); - } - } diff --git a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/aggregations/metrics/HistogramPercentileAggregationTests.java b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/aggregations/metrics/HistogramPercentileAggregationTests.java index eb72480927931..7c6f85104b5f8 100644 --- a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/aggregations/metrics/HistogramPercentileAggregationTests.java +++ b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/aggregations/metrics/HistogramPercentileAggregationTests.java @@ -13,7 +13,6 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.metrics.InternalHDRPercentiles; @@ -211,11 +210,8 @@ private void setupTDigestHistogram(int compression) throws Exception { } client().admin().indices().refresh(new RefreshRequest("raw", "pre_agg")).get(); - SearchResponse response = client().prepareSearch("raw").setTrackTotalHits(true).get(); - assertEquals(numDocs, response.getHits().getTotalHits().value); - - response = client().prepareSearch("pre_agg").get(); - assertEquals(numDocs / frq, response.getHits().getTotalHits().value); + assertHitCount(client().prepareSearch("raw").setTrackTotalHits(true), numDocs); + assertHitCount(client().prepareSearch("pre_agg"), numDocs / frq); } public void testTDigestHistogram() throws Exception { @@ -228,17 +224,21 @@ public void testTDigestHistogram() throws Exception { .compression(compression) .percentiles(10, 25, 50, 75); - SearchResponse responseRaw = client().prepareSearch("raw").addAggregation(builder).get(); - SearchResponse responsePreAgg = client().prepareSearch("pre_agg").addAggregation(builder).get(); - SearchResponse responseBoth = client().prepareSearch("raw", "pre_agg").addAggregation(builder).get(); - - InternalTDigestPercentiles percentilesRaw = responseRaw.getAggregations().get("agg"); - InternalTDigestPercentiles percentilesPreAgg = responsePreAgg.getAggregations().get("agg"); - InternalTDigestPercentiles percentilesBoth = responseBoth.getAggregations().get("agg"); - for (int i = 1; i < 100; i++) { - assertEquals(percentilesRaw.percentile(i), percentilesPreAgg.percentile(i), 1.0); - assertEquals(percentilesRaw.percentile(i), percentilesBoth.percentile(i), 1.0); - } + assertResponse( + client().prepareSearch("raw").addAggregation(builder), + responseRaw -> assertResponse( + client().prepareSearch("pre_agg").addAggregation(builder), + responsePreAgg -> assertResponse(client().prepareSearch("raw", "pre_agg").addAggregation(builder), responseBoth -> { + InternalTDigestPercentiles percentilesRaw = responseRaw.getAggregations().get("agg"); + InternalTDigestPercentiles percentilesPreAgg = responsePreAgg.getAggregations().get("agg"); + InternalTDigestPercentiles percentilesBoth = responseBoth.getAggregations().get("agg"); + for (int i = 1; i < 100; i++) { + assertEquals(percentilesRaw.percentile(i), percentilesPreAgg.percentile(i), 1.0); + assertEquals(percentilesRaw.percentile(i), percentilesBoth.percentile(i), 1.0); + } + }) + ) + ); } public void testBoxplotHistogram() throws Exception { @@ -246,24 +246,28 @@ public void testBoxplotHistogram() throws Exception { setupTDigestHistogram(compression); BoxplotAggregationBuilder bpBuilder = new BoxplotAggregationBuilder("agg").field("inner.data").compression(compression); - SearchResponse bpResponseRaw = client().prepareSearch("raw").addAggregation(bpBuilder).get(); - SearchResponse bpResponsePreAgg = client().prepareSearch("pre_agg").addAggregation(bpBuilder).get(); - SearchResponse bpResponseBoth = client().prepareSearch("raw", "pre_agg").addAggregation(bpBuilder).get(); - - Boxplot bpRaw = bpResponseRaw.getAggregations().get("agg"); - Boxplot bpPreAgg = bpResponsePreAgg.getAggregations().get("agg"); - Boxplot bpBoth = bpResponseBoth.getAggregations().get("agg"); - assertEquals(bpRaw.getMax(), bpPreAgg.getMax(), 0.0); - assertEquals(bpRaw.getMax(), bpBoth.getMax(), 0.0); - assertEquals(bpRaw.getMin(), bpPreAgg.getMin(), 0.0); - assertEquals(bpRaw.getMin(), bpBoth.getMin(), 0.0); + assertResponse( + client().prepareSearch("raw").addAggregation(bpBuilder), + bpResponseRaw -> assertResponse( + client().prepareSearch("pre_agg").addAggregation(bpBuilder), + bpResponsePreAgg -> assertResponse(client().prepareSearch("raw", "pre_agg").addAggregation(bpBuilder), bpResponseBoth -> { + Boxplot bpRaw = bpResponseRaw.getAggregations().get("agg"); + Boxplot bpPreAgg = bpResponsePreAgg.getAggregations().get("agg"); + Boxplot bpBoth = bpResponseBoth.getAggregations().get("agg"); + assertEquals(bpRaw.getMax(), bpPreAgg.getMax(), 0.0); + assertEquals(bpRaw.getMax(), bpBoth.getMax(), 0.0); + assertEquals(bpRaw.getMin(), bpPreAgg.getMin(), 0.0); + assertEquals(bpRaw.getMin(), bpBoth.getMin(), 0.0); - assertEquals(bpRaw.getQ1(), bpPreAgg.getQ1(), 1.0); - assertEquals(bpRaw.getQ1(), bpBoth.getQ1(), 1.0); - assertEquals(bpRaw.getQ2(), bpPreAgg.getQ2(), 1.0); - assertEquals(bpRaw.getQ2(), bpBoth.getQ2(), 1.0); - assertEquals(bpRaw.getQ3(), bpPreAgg.getQ3(), 1.0); - assertEquals(bpRaw.getQ3(), bpBoth.getQ3(), 1.0); + assertEquals(bpRaw.getQ1(), bpPreAgg.getQ1(), 1.0); + assertEquals(bpRaw.getQ1(), bpBoth.getQ1(), 1.0); + assertEquals(bpRaw.getQ2(), bpPreAgg.getQ2(), 1.0); + assertEquals(bpRaw.getQ2(), bpBoth.getQ2(), 1.0); + assertEquals(bpRaw.getQ3(), bpPreAgg.getQ3(), 1.0); + assertEquals(bpRaw.getQ3(), bpBoth.getQ3(), 1.0); + }) + ) + ); } @Override diff --git a/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistry.java b/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistry.java index 44621ee211838..f528d99133756 100644 --- a/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistry.java +++ b/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistry.java @@ -134,7 +134,9 @@ protected List getIngestPipelines() { private static ComponentTemplate loadComponentTemplate(String name, int version) { try { final byte[] content = loadVersionedResourceUTF8("/component-templates/" + name + ".yaml", version); - return ComponentTemplate.parse(YamlXContent.yamlXContent.createParser(XContentParserConfiguration.EMPTY, content)); + try (var parser = YamlXContent.yamlXContent.createParser(XContentParserConfiguration.EMPTY, content)) { + return ComponentTemplate.parse(parser); + } } catch (Exception e) { throw new RuntimeException("failed to load APM Ingest plugin's component template: " + name, e); } @@ -143,7 +145,9 @@ private static ComponentTemplate loadComponentTemplate(String name, int version) private static ComposableIndexTemplate loadIndexTemplate(String name, int version) { try { final byte[] content = loadVersionedResourceUTF8("/index-templates/" + name + ".yaml", version); - return ComposableIndexTemplate.parse(YamlXContent.yamlXContent.createParser(XContentParserConfiguration.EMPTY, content)); + try (var parser = YamlXContent.yamlXContent.createParser(XContentParserConfiguration.EMPTY, content)) { + return ComposableIndexTemplate.parse(parser); + } } catch (Exception e) { throw new RuntimeException("failed to load APM Ingest plugin's index template: " + name, e); } diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm.error@mappings.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm.error@mappings.yaml index c946403c795dd..e6353853bc4d5 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm.error@mappings.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm.error@mappings.yaml @@ -6,21 +6,29 @@ _meta: template: mappings: properties: + # error.* error.custom: type: flattened error.exception.attributes: type: flattened error.exception.stacktrace: type: flattened + error.log.stacktrace: + type: flattened error.grouping_name: type: keyword script: | def logMessage = params['_source'].error?.log?.message; - if (logMessage != null) { + if (logMessage != null && logMessage != "") { emit(logMessage); return; } def exception = params['_source'].error?.exception; - if (exception != null && exception.length > 0) { + def exceptionMessage = exception != null && exception.length > 0 ? exception[0]?.message : null; + if (exceptionMessage != null && exceptionMessage != "") { emit(exception[0].message); } + + # http.* + http.request.body: + type: flattened diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/traces-apm@mappings.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/traces-apm@mappings.yaml index 558a5da81e4f7..eb2da017d97b7 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/traces-apm@mappings.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/traces-apm@mappings.yaml @@ -6,15 +6,22 @@ _meta: template: mappings: properties: + # NOTE(axw) processor.event may be either "span" or "transaction". + # + # This field should eventually be removed, and we should end up + # with only spans. Some of those spans may be identified as local + # roots, equivalent in concept to transactions. processor.event: type: keyword + + # event.* event.success_count: type: byte index: false - span.duration.us: - type: long - transaction.duration.us: - type: long + + # http.* + http.request.body: + type: flattened http.response.transfer_size: type: long index: false @@ -24,10 +31,22 @@ template: http.response.decoded_body_size: type: long index: false + + # span.* + span.duration.us: + type: long span.representative_count: type: scaled_float scaling_factor: 1000 index: false + span.stacktrace: + type: flattened + + # transaction.* + transaction.custom: + type: flattened + transaction.duration.us: + type: long transaction.representative_count: type: scaled_float scaling_factor: 1000 diff --git a/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java b/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java index 9189cdff74547..7dcd6fdd807e4 100644 --- a/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java +++ b/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java @@ -12,9 +12,9 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.admin.indices.template.put.PutComponentTemplateAction; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; -import org.elasticsearch.action.ingest.PutPipelineAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.ingest.PutPipelineRequest; +import org.elasticsearch.action.ingest.PutPipelineTransportAction; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; @@ -51,13 +51,14 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import static org.elasticsearch.xpack.core.XPackSettings.APM_DATA_ENABLED; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; @@ -137,31 +138,30 @@ public void testThatIndependentTemplatesAreAddedImmediatelyIfMissing() throws Ex assertThat(actualInstalledIndexTemplates.get(), equalTo(0)); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/102797") - public void testIngestPipelines() { + public void testIngestPipelines() throws Exception { DiscoveryNode node = DiscoveryNodeUtils.create("node"); DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build(); final List pipelineConfigs = apmIndexTemplateRegistry.getIngestPipelines(); assertThat(pipelineConfigs, is(not(empty()))); - pipelineConfigs.forEach(ingestPipelineConfig -> { - AtomicInteger putPipelineRequestsLocal = new AtomicInteger(0); - client.setVerifier((a, r, l) -> { - if (r instanceof PutPipelineRequest && ingestPipelineConfig.getId().equals(((PutPipelineRequest) r).getId())) { - putPipelineRequestsLocal.incrementAndGet(); + final Set expectedPipelines = apmIndexTemplateRegistry.getIngestPipelines() + .stream() + .map(IngestPipelineConfig::getId) + .collect(Collectors.toSet()); + final Set installedPipelines = ConcurrentHashMap.newKeySet(pipelineConfigs.size()); + client.setVerifier((a, r, l) -> { + if (r instanceof PutPipelineRequest putPipelineRequest) { + if (expectedPipelines.contains(putPipelineRequest.getId())) { + installedPipelines.add(putPipelineRequest.getId()); } - return AcknowledgedResponse.TRUE; - }); - - apmIndexTemplateRegistry.clusterChanged( - createClusterChangedEvent(Map.of(), Map.of(), ingestPipelineConfig.getPipelineDependencies(), nodes) - ); - try { - assertBusy(() -> assertThat(putPipelineRequestsLocal.get(), greaterThanOrEqualTo(1))); - } catch (Exception e) { - throw new RuntimeException(e); } + return AcknowledgedResponse.TRUE; + }); + + assertBusy(() -> { + apmIndexTemplateRegistry.clusterChanged(createClusterChangedEvent(Map.of(), Map.of(), List.copyOf(installedPipelines), nodes)); + assertThat(installedPipelines, equalTo(expectedPipelines)); }); } @@ -274,14 +274,15 @@ private ActionResponse verifyActions( if (action instanceof PutComponentTemplateAction) { componentTemplatesCounter.incrementAndGet(); return AcknowledgedResponse.TRUE; - } else if (action instanceof PutComposableIndexTemplateAction) { + } else if (action == TransportPutComposableIndexTemplateAction.TYPE) { indexTemplatesCounter.incrementAndGet(); - assertThat(request, instanceOf(PutComposableIndexTemplateAction.Request.class)); - final PutComposableIndexTemplateAction.Request putRequest = ((PutComposableIndexTemplateAction.Request) request); + assertThat(request, instanceOf(TransportPutComposableIndexTemplateAction.Request.class)); + final TransportPutComposableIndexTemplateAction.Request putRequest = + ((TransportPutComposableIndexTemplateAction.Request) request); assertThat(putRequest.indexTemplate().version(), equalTo((long) apmIndexTemplateRegistry.getVersion())); assertNotNull(listener); return AcknowledgedResponse.TRUE; - } else if (action instanceof PutPipelineAction) { + } else if (action == PutPipelineTransportAction.TYPE) { ingestPipelinesCounter.incrementAndGet(); return AcknowledgedResponse.TRUE; } else { @@ -310,7 +311,7 @@ private ClusterChangedEvent createClusterChangedEvent( private ClusterChangedEvent createClusterChangedEvent( Map existingComponentTemplates, Map existingComposableTemplates, - List ingestPipelines, + List existingIngestPipelines, Map existingPolicies, DiscoveryNodes nodes ) { @@ -318,7 +319,7 @@ private ClusterChangedEvent createClusterChangedEvent( Settings.EMPTY, existingComponentTemplates, existingComposableTemplates, - ingestPipelines, + existingIngestPipelines, existingPolicies, nodes ); diff --git a/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/10_apm.yml b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/10_apm.yml index b8fdebf9a938b..f4397ca18c101 100644 --- a/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/10_apm.yml +++ b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/10_apm.yml @@ -82,110 +82,3 @@ setup: - length: {hits.hits: 1} - match: {hits.hits.0.fields.event\.success_count: [1]} - match: {hits.hits.0.fields.span\.duration\.us: [123]} - ---- -"Test metrics-apm.internal-* data stream rerouting": - - do: - bulk: - index: metrics-apm.internal-testing - refresh: true - body: - - create: {} - - "@timestamp": "2017-06-22" - data_stream.type: metrics - data_stream.dataset: apm.internal - data_stream.namespace: testing - metricset: - name: transaction - - create: {} - - "@timestamp": "2017-06-22" - data_stream.type: metrics - data_stream.dataset: apm.internal - data_stream.namespace: testing - metricset: - name: service_destination - - create: {} - - "@timestamp": "2017-06-22" - data_stream.type: metrics - data_stream.dataset: apm.internal - data_stream.namespace: testing - metricset: - name: app_config # should not be rerouted - - do: - indices.get_data_stream: - name: metrics-apm.transaction.1m-testing - - do: - indices.get_data_stream: - name: metrics-apm.service_destination.1m-testing - - do: - indices.get_data_stream: - name: metrics-apm.internal-testing - - do: - search: - index: metrics-apm* - - length: {hits.hits: 3} - - match: {hits.hits.0._source.data_stream.dataset: "apm.internal"} - - match: {hits.hits.1._source.data_stream.dataset: "apm.service_destination.1m"} - - match: {hits.hits.1._source.metricset.interval: "1m"} - - match: {hits.hits.2._source.data_stream.dataset: "apm.transaction.1m"} - - match: {hits.hits.2._source.metricset.interval: "1m"} - ---- -"Test metrics-apm.app-* dynamic mapping": - - do: - bulk: - index: metrics-apm.app.svc1-testing - refresh: true - body: - - create: {} - - "@timestamp": "2017-06-22" - data_stream.type: metrics - data_stream.dataset: apm.app.svc1 - data_stream.namespace: testing - metricset: - name: app - samples: - - name: double_metric - type: gauge - value: 123 - - name: summary_metric - type: summary - value_count: 123 - sum: 456.789 - - name: histogram_metric - type: histogram - counts: [1, 2, 3] - values: [1.5, 2.5, 3.5] - - set: - items.0.create._index: index - - do: - # Wait for cluster state changes to be applied before - # querying field mappings. - cluster.health: - wait_for_events: languid - - do: - indices.get_field_mapping: - index: metrics-apm.app.svc1-testing - fields: [double_metric, summary_metric, histogram_metric] - - match: - $body: - $index: - mappings: - double_metric: - full_name: double_metric - mapping: - double_metric: - type: double - index: false - summary_metric: - full_name: summary_metric - mapping: - summary_metric: - type: aggregate_metric_double - metrics : [sum, value_count] - default_metric: value_count - histogram_metric: - full_name: histogram_metric - mapping: - histogram_metric: - type: histogram diff --git a/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_error_grouping.yml b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_error_grouping.yml new file mode 100644 index 0000000000000..f7cd386227fe8 --- /dev/null +++ b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_error_grouping.yml @@ -0,0 +1,56 @@ +--- +setup: + - do: + cluster.health: + wait_for_events: languid + +--- +"Test logs-apm.error-* error grouping": + - do: + bulk: + index: logs-apm.error-testing + refresh: true + body: + # No error object field + - create: {} + - '{"@timestamp": "2017-06-22"}' + + # Empty error object + - create: {} + - '{"@timestamp": "2017-06-22", "error": {}}' + + # Empty error.log object + - create: {} + - '{"@timestamp": "2017-06-22", "error": {"log": {}}}' + + # Empty error.exception array + - create: {} + - '{"@timestamp": "2017-06-22", "error": {"exception": []}}' + + # Empty error.exception object + - create: {} + - '{"@timestamp": "2017-06-22", "error": {"exception": [{}]}}' + + # Non-empty error.log.message used + - create: {} + - '{"@timestamp": "2017-06-22", "error": {"log": {"message": "log_used"}, "exception": [{"message": "ignored"}]}}' + + # Non-empty error.exception.message used + - create: {} + - '{"@timestamp": "2017-06-22", "error": {"log": {"message": ""}, "exception": [{"message": "exception_used"}]}}' + + - is_false: errors + + - do: + search: + index: logs-apm.error-testing + body: + fields: ["error.grouping_name"] + - length: { hits.hits: 7 } + - match: { hits.hits.0.fields: null } + - match: { hits.hits.1.fields: null } + - match: { hits.hits.2.fields: null } + - match: { hits.hits.3.fields: null } + - match: { hits.hits.4.fields: null } + - match: { hits.hits.5.fields: {"error.grouping_name": ["log_used"]} } + - match: { hits.hits.6.fields: {"error.grouping_name": ["exception_used"]} } diff --git a/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_flattened_fields.yml b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_flattened_fields.yml new file mode 100644 index 0000000000000..adb248b23fe5b --- /dev/null +++ b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_flattened_fields.yml @@ -0,0 +1,107 @@ +--- +setup: + - do: + cluster.health: + wait_for_events: languid + +--- +"Test traces-apm-* flattened fields": + - do: + bulk: + index: traces-apm-testing + refresh: true + body: + # http.request.body should be mapped as flattened, allowing + # differing types to be used in http.request.body.original. + - create: {} + - '{"@timestamp": "2017-06-22", "http.request.body": {"original": "text"}}' + - create: {} + - '{"@timestamp": "2017-06-22", "http.request.body": {"original": {"field": "value"}}}' + + # span.stacktrace is a complex object whose structure may + # change over time, and which is always treated as an object. + # Moreover, stacktraces may contain dynamic "vars" whose + # types may change from one document to the next. + - create: {} + - '{"@timestamp": "2017-06-22", "span.stacktrace": [{"vars": {"a": 123}}]}' + - create: {} + - '{"@timestamp": "2017-06-22", "span.stacktrace": [{"vars": {"a": "b"}}]}' + + # transaction.custom is a complex object of fields with + # arbitrary field types that may change from one document + # to the next. + - create: {} + - '{"@timestamp": "2017-06-22", "transaction.custom": {"a": {"b": 123}}}' + - create: {} + - '{"@timestamp": "2017-06-22", "transaction.custom": {"a": "b"}}' + + - is_false: errors + + - do: + search: + index: traces-apm-testing + body: + fields: ["http.request.body", "span.stacktrace", "transaction.custom"] + - length: { hits.hits: 6 } + - match: { hits.hits.0.fields: {"http.request.body": [{"original": "text"}]} } + - match: { hits.hits.1.fields: {"http.request.body": [{"original": {"field": "value"}}]} } + - match: { hits.hits.2.fields: {"span.stacktrace": [{"vars": {"a": 123}}]} } + - match: { hits.hits.3.fields: {"span.stacktrace": [{"vars": {"a": "b"}}]} } + - match: { hits.hits.4.fields: {"transaction.custom": [{"a": {"b": 123}}]} } + - match: { hits.hits.5.fields: {"transaction.custom": [{"a": "b"}]} } + +--- +"Test logs-apm.error-* flattened fields": + - do: + bulk: + index: logs-apm.error-testing + refresh: true + body: + # http.request.body has the same requirements as http.request.body + # in traces-apm-* data streams. + - create: {} + - '{"@timestamp": "2017-06-22", "http.request.body": {"original": "text"}}' + - create: {} + - '{"@timestamp": "2017-06-22", "http.request.body": {"original": {"field": "value"}}}' + + # error.{exception,log}.stacktrace have the same requirements as span.stacktrace. + - create: {} + - '{"@timestamp": "2017-06-22", "error.exception.stacktrace": [{"vars": {"a": 123}}]}' + - create: {} + - '{"@timestamp": "2017-06-22", "error.exception.stacktrace": [{"vars": {"a": "b"}}]}' + - create: {} + - '{"@timestamp": "2017-06-22", "error.log.stacktrace": [{"vars": {"a": 123}}]}' + - create: {} + - '{"@timestamp": "2017-06-22", "error.log.stacktrace": [{"vars": {"a": "b"}}]}' + + # error.exception.attributes is a complex object with arbitrary field types + # that may change from one document to the next. + - create: {} + - '{"@timestamp": "2017-06-22", "error.exception": [{"attributes": {"a": 123}}]}' + - create: {} + - '{"@timestamp": "2017-06-22", "error.exception": [{"attributes": {"a": "b"}}]}' + + # error.custom has the same requirements as transaction.custom. + - create: {} + - '{"@timestamp": "2017-06-22", "error.custom": {"a": {"b": 123}}}' + - create: {} + - '{"@timestamp": "2017-06-22", "error.custom": {"a": "b"}}' + + - is_false: errors + + - do: + search: + index: logs-apm.error-testing + body: + fields: ["http.request.body", "error.log.*", "error.exception.*", "error.custom"] + - length: { hits.hits: 10 } + - match: { hits.hits.0.fields: {"http.request.body": [{"original": "text"}]} } + - match: { hits.hits.1.fields: {"http.request.body": [{"original": {"field": "value"}}]} } + - match: { hits.hits.2.fields: {"error.exception.stacktrace": [{"vars": {"a": 123}}]} } + - match: { hits.hits.3.fields: {"error.exception.stacktrace": [{"vars": {"a": "b"}}]} } + - match: { hits.hits.4.fields: {"error.log.stacktrace": [{"vars": {"a": 123}}]} } + - match: { hits.hits.5.fields: {"error.log.stacktrace": [{"vars": {"a": "b"}}]} } + - match: { hits.hits.6.fields: {"error.exception.attributes": [{"a": 123}]} } + - match: { hits.hits.7.fields: {"error.exception.attributes": [{"a": "b"}]} } + - match: { hits.hits.8.fields: {"error.custom": [{"a": {"b": 123}}]} } + - match: { hits.hits.9.fields: {"error.custom": [{"a": "b"}]} } diff --git a/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_metrics_mapping.yml b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_metrics_mapping.yml new file mode 100644 index 0000000000000..85858a9c5ed2e --- /dev/null +++ b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_metrics_mapping.yml @@ -0,0 +1,65 @@ +--- +setup: + - do: + cluster.health: + wait_for_events: languid + +--- +"Test metrics-apm.app-* dynamic mapping": + - do: + bulk: + index: metrics-apm.app.svc1-testing + refresh: true + body: + - create: {} + - "@timestamp": "2017-06-22" + data_stream.type: metrics + data_stream.dataset: apm.app.svc1 + data_stream.namespace: testing + metricset: + name: app + samples: + - name: double_metric + type: gauge + value: 123 + - name: summary_metric + type: summary + value_count: 123 + sum: 456.789 + - name: histogram_metric + type: histogram + counts: [1, 2, 3] + values: [1.5, 2.5, 3.5] + - set: + items.0.create._index: index + - do: + # Wait for cluster state changes to be applied before + # querying field mappings. + cluster.health: + wait_for_events: languid + - do: + indices.get_field_mapping: + index: metrics-apm.app.svc1-testing + fields: [double_metric, summary_metric, histogram_metric] + - match: + $body: + $index: + mappings: + double_metric: + full_name: double_metric + mapping: + double_metric: + type: double + index: false + summary_metric: + full_name: summary_metric + mapping: + summary_metric: + type: aggregate_metric_double + metrics : [sum, value_count] + default_metric: value_count + histogram_metric: + full_name: histogram_metric + mapping: + histogram_metric: + type: histogram diff --git a/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_metrics_rerouting.yml b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_metrics_rerouting.yml new file mode 100644 index 0000000000000..f5f2307570563 --- /dev/null +++ b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_metrics_rerouting.yml @@ -0,0 +1,52 @@ +--- +setup: + - do: + cluster.health: + wait_for_events: languid + +--- +"Test metrics-apm.internal-* data stream rerouting": + - do: + bulk: + index: metrics-apm.internal-testing + refresh: true + body: + - create: {} + - "@timestamp": "2017-06-22" + data_stream.type: metrics + data_stream.dataset: apm.internal + data_stream.namespace: testing + metricset: + name: transaction + - create: {} + - "@timestamp": "2017-06-22" + data_stream.type: metrics + data_stream.dataset: apm.internal + data_stream.namespace: testing + metricset: + name: service_destination + - create: {} + - "@timestamp": "2017-06-22" + data_stream.type: metrics + data_stream.dataset: apm.internal + data_stream.namespace: testing + metricset: + name: app_config # should not be rerouted + - do: + indices.get_data_stream: + name: metrics-apm.transaction.1m-testing + - do: + indices.get_data_stream: + name: metrics-apm.service_destination.1m-testing + - do: + indices.get_data_stream: + name: metrics-apm.internal-testing + - do: + search: + index: metrics-apm* + - length: {hits.hits: 3} + - match: {hits.hits.0._source.data_stream.dataset: "apm.internal"} + - match: {hits.hits.1._source.data_stream.dataset: "apm.service_destination.1m"} + - match: {hits.hits.1._source.metricset.interval: "1m"} + - match: {hits.hits.2._source.data_stream.dataset: "apm.transaction.1m"} + - match: {hits.hits.2._source.metricset.interval: "1m"} diff --git a/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_traces_ingest.yml b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_traces_ingest.yml new file mode 100644 index 0000000000000..ea7f948abf0b8 --- /dev/null +++ b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_traces_ingest.yml @@ -0,0 +1,99 @@ +--- +setup: + - do: + cluster.health: + wait_for_events: languid + +--- +"Test traces-apm-* processor.event inference": + - do: + bulk: + index: traces-apm-testing + refresh: true + body: + # `processor.event: transaction` is inferred from presence of `transaction.type` + - create: {} + - '{"@timestamp": "2017-06-22", "transaction": {"type": "foo"}}' + + # `processor.event: span` is inferred otherwise + - create: {} + - '{"@timestamp": "2017-06-22"}' + + - is_false: errors + + - do: + search: + index: traces-apm-testing + body: + fields: ["processor.event"] + - length: { hits.hits: 2 } + - match: { hits.hits.0.fields: {"processor.event": ["transaction"]} } + - match: { hits.hits.1.fields: {"processor.event": ["span"]} } + +--- +"Test traces-apm-* setting *.duration.us from event.duration": + - do: + bulk: + index: traces-apm-testing + refresh: true + body: + - create: {} + - '{"@timestamp": "2017-06-22", "transaction": {"type": "foo"}, "event": {"duration": 1234}}' + + - create: {} + - '{"@timestamp": "2017-06-22", "event": {"duration": 1234}}' + + # If event.duration is omitted, it is assumed to be zero. + - create: {} + - '{"@timestamp": "2017-06-22"}' + + # An existing field will not be overwritten. + - create: {} + - '{"@timestamp": "2017-06-22", "span": {"duration": {"us": 789}}}' + + - is_false: errors + + - do: + search: + index: traces-apm-testing + body: + fields: ["event.duration", "*.duration.us"] + - length: { hits.hits: 4 } + - match: { hits.hits.0.fields: {"transaction.duration.us": [1]} } + - match: { hits.hits.1.fields: {"span.duration.us": [1]} } + - match: { hits.hits.2.fields: {"span.duration.us": [0]} } + - match: { hits.hits.3.fields: {"span.duration.us": [789]} } + +--- +"Test traces-apm-* setting event.success_count from event.outcome": + - do: + bulk: + index: traces-apm-testing + refresh: true + body: + # No event.outcome, no event.success_count + - create: {} + - '{"@timestamp": "2017-06-22"}' + + # event.outcome: unknown, no event.success_count + - create: {} + - '{"@timestamp": "2017-06-22", "event": {"outcome": "unknown"}}' + + - create: {} + - '{"@timestamp": "2017-06-22", "event": {"outcome": "success"}}' + + - create: {} + - '{"@timestamp": "2017-06-22", "event": {"outcome": "failure"}}' + + - is_false: errors + + - do: + search: + index: traces-apm-testing + body: + fields: ["event.success_count"] + - length: { hits.hits: 4 } + - match: { hits.hits.0.fields: null } + - match: { hits.hits.1.fields: null } + - match: { hits.hits.2.fields: {"event.success_count": [1]} } + - match: { hits.hits.3.fields: {"event.success_count": [0]} } diff --git a/x-pack/plugin/async-search/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/search/AsyncSearchSecurityIT.java b/x-pack/plugin/async-search/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/search/AsyncSearchSecurityIT.java index fb3696a79a579..1819ad7960006 100644 --- a/x-pack/plugin/async-search/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/search/AsyncSearchSecurityIT.java +++ b/x-pack/plugin/async-search/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/search/AsyncSearchSecurityIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.search; import org.apache.http.util.EntityUtils; +import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; @@ -182,15 +183,19 @@ private void testCase(String user, String other) throws Exception { private SearchHit[] getSearchHits(String asyncId, String user) throws IOException { final Response resp = getAsyncSearch(asyncId, user); assertOK(resp); - AsyncSearchResponse searchResponse = AsyncSearchResponse.fromXContent( + SearchResponse searchResponse = AsyncSearchResponse.fromXContent( XContentHelper.createParser( NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, new BytesArray(EntityUtils.toByteArray(resp.getEntity())), XContentType.JSON ) - ); - return searchResponse.getSearchResponse().getHits().getHits(); + ).getSearchResponse(); + try { + return searchResponse.getHits().getHits(); + } finally { + searchResponse.decRef(); + } } public void testAuthorizationOfPointInTime() throws Exception { @@ -229,7 +234,7 @@ public void testRejectPointInTimeWithIndices() throws Exception { try { final Request request = new Request("POST", "/_async_search"); setRunAsHeader(request, authorizedUser); - request.addParameter("wait_for_completion_timeout", "true"); + request.addParameter("wait_for_completion_timeout", "1s"); request.addParameter("keep_on_completion", "true"); if (randomBoolean()) { request.addParameter("index", "index-" + authorizedUser); diff --git a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchActionIT.java b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchActionIT.java index dcf47deeebc2b..58644208a1da0 100644 --- a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchActionIT.java +++ b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchActionIT.java @@ -109,38 +109,43 @@ public void testMaxMinAggregation() throws Exception { .aggregation(AggregationBuilders.max("max").field("metric")); try (SearchResponseIterator it = assertBlockingIterator(indexName, numShards, source, numFailures, step)) { AsyncSearchResponse response = it.next(); - while (it.hasNext()) { - response = it.next(); - assertNotNull(response.getSearchResponse()); - if (response.getSearchResponse().getSuccessfulShards() > 0) { + try { + while (it.hasNext()) { + response.decRef(); + response = it.next(); + assertNotNull(response.getSearchResponse()); + if (response.getSearchResponse().getSuccessfulShards() > 0) { + assertNotNull(response.getSearchResponse().getAggregations()); + assertNotNull(response.getSearchResponse().getAggregations().get("max")); + assertNotNull(response.getSearchResponse().getAggregations().get("min")); + Max max = response.getSearchResponse().getAggregations().get("max"); + Min min = response.getSearchResponse().getAggregations().get("min"); + assertThat((float) min.value(), greaterThanOrEqualTo(minMetric)); + assertThat((float) max.value(), lessThanOrEqualTo(maxMetric)); + } + } + if (numFailures == numShards) { + assertNotNull(response.getFailure()); + } else { + assertNotNull(response.getSearchResponse()); assertNotNull(response.getSearchResponse().getAggregations()); assertNotNull(response.getSearchResponse().getAggregations().get("max")); assertNotNull(response.getSearchResponse().getAggregations().get("min")); Max max = response.getSearchResponse().getAggregations().get("max"); Min min = response.getSearchResponse().getAggregations().get("min"); - assertThat((float) min.value(), greaterThanOrEqualTo(minMetric)); - assertThat((float) max.value(), lessThanOrEqualTo(maxMetric)); - } - } - if (numFailures == numShards) { - assertNotNull(response.getFailure()); - } else { - assertNotNull(response.getSearchResponse()); - assertNotNull(response.getSearchResponse().getAggregations()); - assertNotNull(response.getSearchResponse().getAggregations().get("max")); - assertNotNull(response.getSearchResponse().getAggregations().get("min")); - Max max = response.getSearchResponse().getAggregations().get("max"); - Min min = response.getSearchResponse().getAggregations().get("min"); - if (numFailures == 0) { - assertThat((float) min.value(), equalTo(minMetric)); - assertThat((float) max.value(), equalTo(maxMetric)); - } else { - assertThat((float) min.value(), greaterThanOrEqualTo(minMetric)); - assertThat((float) max.value(), lessThanOrEqualTo(maxMetric)); + if (numFailures == 0) { + assertThat((float) min.value(), equalTo(minMetric)); + assertThat((float) max.value(), equalTo(maxMetric)); + } else { + assertThat((float) min.value(), greaterThanOrEqualTo(minMetric)); + assertThat((float) max.value(), lessThanOrEqualTo(maxMetric)); + } } + deleteAsyncSearch(response.getId()); + ensureTaskRemoval(response.getId()); + } finally { + response.decRef(); } - deleteAsyncSearch(response.getId()); - ensureTaskRemoval(response.getId()); } } @@ -152,10 +157,27 @@ public void testTermsAggregation() throws Exception { ); try (SearchResponseIterator it = assertBlockingIterator(indexName, numShards, source, numFailures, step)) { AsyncSearchResponse response = it.next(); - while (it.hasNext()) { - response = it.next(); - assertNotNull(response.getSearchResponse()); - if (response.getSearchResponse().getSuccessfulShards() > 0) { + try { + while (it.hasNext()) { + response.decRef(); + response = it.next(); + assertNotNull(response.getSearchResponse()); + if (response.getSearchResponse().getSuccessfulShards() > 0) { + assertNotNull(response.getSearchResponse().getAggregations()); + assertNotNull(response.getSearchResponse().getAggregations().get("terms")); + StringTerms terms = response.getSearchResponse().getAggregations().get("terms"); + assertThat(terms.getBuckets().size(), greaterThanOrEqualTo(0)); + assertThat(terms.getBuckets().size(), lessThanOrEqualTo(numKeywords)); + for (InternalTerms.Bucket bucket : terms.getBuckets()) { + long count = keywordFreqs.getOrDefault(bucket.getKeyAsString(), new AtomicInteger(0)).get(); + assertThat(bucket.getDocCount(), lessThanOrEqualTo(count)); + } + } + } + if (numFailures == numShards) { + assertNotNull(response.getFailure()); + } else { + assertNotNull(response.getSearchResponse()); assertNotNull(response.getSearchResponse().getAggregations()); assertNotNull(response.getSearchResponse().getAggregations().get("terms")); StringTerms terms = response.getSearchResponse().getAggregations().get("terms"); @@ -163,58 +185,55 @@ public void testTermsAggregation() throws Exception { assertThat(terms.getBuckets().size(), lessThanOrEqualTo(numKeywords)); for (InternalTerms.Bucket bucket : terms.getBuckets()) { long count = keywordFreqs.getOrDefault(bucket.getKeyAsString(), new AtomicInteger(0)).get(); - assertThat(bucket.getDocCount(), lessThanOrEqualTo(count)); + if (numFailures > 0) { + assertThat(bucket.getDocCount(), lessThanOrEqualTo(count)); + } else { + assertThat(bucket.getDocCount(), equalTo(count)); + } } } + deleteAsyncSearch(response.getId()); + ensureTaskRemoval(response.getId()); + } finally { + response.decRef(); } - if (numFailures == numShards) { - assertNotNull(response.getFailure()); - } else { - assertNotNull(response.getSearchResponse()); - assertNotNull(response.getSearchResponse().getAggregations()); - assertNotNull(response.getSearchResponse().getAggregations().get("terms")); - StringTerms terms = response.getSearchResponse().getAggregations().get("terms"); - assertThat(terms.getBuckets().size(), greaterThanOrEqualTo(0)); - assertThat(terms.getBuckets().size(), lessThanOrEqualTo(numKeywords)); - for (InternalTerms.Bucket bucket : terms.getBuckets()) { - long count = keywordFreqs.getOrDefault(bucket.getKeyAsString(), new AtomicInteger(0)).get(); - if (numFailures > 0) { - assertThat(bucket.getDocCount(), lessThanOrEqualTo(count)); - } else { - assertThat(bucket.getDocCount(), equalTo(count)); - } - } - } - deleteAsyncSearch(response.getId()); - ensureTaskRemoval(response.getId()); } } public void testRestartAfterCompletion() throws Exception { - final AsyncSearchResponse initial; + final String initialId; try (SearchResponseIterator it = assertBlockingIterator(indexName, numShards, new SearchSourceBuilder(), 0, 2)) { - initial = it.next(); + var initial = it.next(); + try { + initialId = initial.getId(); + } finally { + initial.decRef(); + } while (it.hasNext()) { - it.next(); + it.next().decRef(); } } - ensureTaskCompletion(initial.getId()); - restartTaskNode(initial.getId(), indexName); + ensureTaskCompletion(initialId); + restartTaskNode(initialId, indexName); - AsyncSearchResponse response = getAsyncSearch(initial.getId()); - assertNotNull(response.getSearchResponse()); - assertFalse(response.isRunning()); - assertFalse(response.isPartial()); + AsyncSearchResponse response = getAsyncSearch(initialId); + try { + assertNotNull(response.getSearchResponse()); + assertFalse(response.isRunning()); + assertFalse(response.isPartial()); - AsyncStatusResponse statusResponse = getAsyncStatus(initial.getId()); - assertFalse(statusResponse.isRunning()); - assertFalse(statusResponse.isPartial()); - assertEquals(numShards, statusResponse.getTotalShards()); - assertEquals(numShards, statusResponse.getSuccessfulShards()); - assertEquals(RestStatus.OK, statusResponse.getCompletionStatus()); + AsyncStatusResponse statusResponse = getAsyncStatus(initialId); + assertFalse(statusResponse.isRunning()); + assertFalse(statusResponse.isPartial()); + assertEquals(numShards, statusResponse.getTotalShards()); + assertEquals(numShards, statusResponse.getSuccessfulShards()); + assertEquals(RestStatus.OK, statusResponse.getCompletionStatus()); - deleteAsyncSearch(response.getId()); - ensureTaskRemoval(response.getId()); + deleteAsyncSearch(response.getId()); + ensureTaskRemoval(response.getId()); + } finally { + response.decRef(); + } } public void testDeleteCancelRunningTask() throws Exception { @@ -223,6 +242,7 @@ public void testDeleteCancelRunningTask() throws Exception { SearchResponseIterator it = assertBlockingIterator(indexName, numShards, new SearchSourceBuilder(), randomBoolean() ? 1 : 0, 2) ) { initial = it.next(); + initial.decRef(); deleteAsyncSearch(initial.getId()); it.close(); ensureTaskCompletion(initial.getId()); @@ -235,6 +255,7 @@ public void testDeleteCleanupIndex() throws Exception { SearchResponseIterator it = assertBlockingIterator(indexName, numShards, new SearchSourceBuilder(), randomBoolean() ? 1 : 0, 2) ) { AsyncSearchResponse response = it.next(); + response.decRef(); deleteAsyncSearch(response.getId()); it.close(); ensureTaskCompletion(response.getId()); @@ -243,19 +264,28 @@ public void testDeleteCleanupIndex() throws Exception { } public void testCleanupOnFailure() throws Exception { - final AsyncSearchResponse initial; + final String initialId; try (SearchResponseIterator it = assertBlockingIterator(indexName, numShards, new SearchSourceBuilder(), numShards, 2)) { - initial = it.next(); + var resp = it.next(); + try { + initialId = resp.getId(); + } finally { + resp.decRef(); + } + } + ensureTaskCompletion(initialId); + AsyncSearchResponse response = getAsyncSearch(initialId); + try { + assertFalse(response.isRunning()); + assertNotNull(response.getFailure()); + assertTrue(response.isPartial()); + assertThat(response.getSearchResponse().getTotalShards(), equalTo(numShards)); + assertThat(response.getSearchResponse().getShardFailures().length, equalTo(numShards)); + } finally { + response.decRef(); } - ensureTaskCompletion(initial.getId()); - AsyncSearchResponse response = getAsyncSearch(initial.getId()); - assertFalse(response.isRunning()); - assertNotNull(response.getFailure()); - assertTrue(response.isPartial()); - assertThat(response.getSearchResponse().getTotalShards(), equalTo(numShards)); - assertThat(response.getSearchResponse().getShardFailures().length, equalTo(numShards)); - - AsyncStatusResponse statusResponse = getAsyncStatus(initial.getId()); + + AsyncStatusResponse statusResponse = getAsyncStatus(initialId); assertFalse(statusResponse.isRunning()); assertTrue(statusResponse.isPartial()); assertEquals(numShards, statusResponse.getTotalShards()); @@ -263,8 +293,8 @@ public void testCleanupOnFailure() throws Exception { assertEquals(numShards, statusResponse.getFailedShards()); assertThat(statusResponse.getCompletionStatus().getStatus(), greaterThanOrEqualTo(400)); - deleteAsyncSearch(initial.getId()); - ensureTaskRemoval(initial.getId()); + deleteAsyncSearch(initialId); + ensureTaskRemoval(initialId); } public void testInvalidId() throws Exception { @@ -272,13 +302,18 @@ public void testInvalidId() throws Exception { SearchResponseIterator it = assertBlockingIterator(indexName, numShards, new SearchSourceBuilder(), randomBoolean() ? 1 : 0, 2) ) { AsyncSearchResponse response = it.next(); - ExecutionException exc = expectThrows(ExecutionException.class, () -> getAsyncSearch("invalid")); - assertThat(exc.getCause(), instanceOf(IllegalArgumentException.class)); - assertThat(exc.getMessage(), containsString("invalid id")); - while (it.hasNext()) { - response = it.next(); + try { + ExecutionException exc = expectThrows(ExecutionException.class, () -> getAsyncSearch("invalid")); + assertThat(exc.getCause(), instanceOf(IllegalArgumentException.class)); + assertThat(exc.getMessage(), containsString("invalid id")); + while (it.hasNext()) { + response.decRef(); + response = it.next(); + } + assertFalse(response.isRunning()); + } finally { + response.decRef(); } - assertFalse(response.isRunning()); } ExecutionException exc = expectThrows(ExecutionException.class, () -> getAsyncStatus("invalid")); @@ -289,49 +324,75 @@ public void testInvalidId() throws Exception { public void testNoIndex() throws Exception { SubmitAsyncSearchRequest request = new SubmitAsyncSearchRequest("invalid-*"); request.setWaitForCompletionTimeout(TimeValue.timeValueMillis(1)); - AsyncSearchResponse response = submitAsyncSearch(request); - assertNotNull(response.getSearchResponse()); - assertFalse(response.isRunning()); - assertThat(response.getSearchResponse().getTotalShards(), equalTo(0)); + { + final AsyncSearchResponse response = submitAsyncSearch(request); + try { + assertNotNull(response.getSearchResponse()); + assertFalse(response.isRunning()); + assertThat(response.getSearchResponse().getTotalShards(), equalTo(0)); + } finally { + response.decRef(); + } + } request = new SubmitAsyncSearchRequest("invalid"); request.setWaitForCompletionTimeout(TimeValue.timeValueMillis(1)); - response = submitAsyncSearch(request); - assertNull(response.getSearchResponse()); - assertNotNull(response.getFailure()); - assertFalse(response.isRunning()); - Exception exc = response.getFailure(); - assertThat(exc.getMessage(), containsString("error while executing search")); - assertThat(exc.getCause().getMessage(), containsString("no such index")); + { + final var response = submitAsyncSearch(request); + try { + assertNull(response.getSearchResponse()); + assertNotNull(response.getFailure()); + assertFalse(response.isRunning()); + Exception exc = response.getFailure(); + assertThat(exc.getMessage(), containsString("error while executing search")); + assertThat(exc.getCause().getMessage(), containsString("no such index")); + } finally { + response.decRef(); + } + } } public void testCancellation() throws Exception { SubmitAsyncSearchRequest request = new SubmitAsyncSearchRequest(indexName); request.getSearchRequest().source(new SearchSourceBuilder().aggregation(new CancellingAggregationBuilder("test", randomLong()))); request.setWaitForCompletionTimeout(TimeValue.timeValueMillis(1)); - AsyncSearchResponse response = submitAsyncSearch(request); - assertNotNull(response.getSearchResponse()); - assertTrue(response.isRunning()); - assertThat(response.getSearchResponse().getTotalShards(), equalTo(numShards)); - assertThat(response.getSearchResponse().getSuccessfulShards(), equalTo(0)); - assertThat(response.getSearchResponse().getFailedShards(), equalTo(0)); - - response = getAsyncSearch(response.getId()); - assertNotNull(response.getSearchResponse()); - assertTrue(response.isRunning()); - assertThat(response.getSearchResponse().getTotalShards(), equalTo(numShards)); - assertThat(response.getSearchResponse().getSuccessfulShards(), equalTo(0)); - assertThat(response.getSearchResponse().getFailedShards(), equalTo(0)); - - AsyncStatusResponse statusResponse = getAsyncStatus(response.getId()); - assertTrue(statusResponse.isRunning()); - assertEquals(numShards, statusResponse.getTotalShards()); - assertEquals(0, statusResponse.getSuccessfulShards()); - assertEquals(0, statusResponse.getSkippedShards()); - assertEquals(0, statusResponse.getFailedShards()); + final String responseId; + { + final AsyncSearchResponse response = submitAsyncSearch(request); + try { + responseId = response.getId(); + assertNotNull(response.getSearchResponse()); + assertTrue(response.isRunning()); + assertThat(response.getSearchResponse().getTotalShards(), equalTo(numShards)); + assertThat(response.getSearchResponse().getSuccessfulShards(), equalTo(0)); + assertThat(response.getSearchResponse().getFailedShards(), equalTo(0)); + } finally { + response.decRef(); + } + } - deleteAsyncSearch(response.getId()); - ensureTaskRemoval(response.getId()); + { + final var response = getAsyncSearch(responseId); + try { + assertNotNull(response.getSearchResponse()); + assertTrue(response.isRunning()); + assertThat(response.getSearchResponse().getTotalShards(), equalTo(numShards)); + assertThat(response.getSearchResponse().getSuccessfulShards(), equalTo(0)); + assertThat(response.getSearchResponse().getFailedShards(), equalTo(0)); + + AsyncStatusResponse statusResponse = getAsyncStatus(response.getId()); + assertTrue(statusResponse.isRunning()); + assertEquals(numShards, statusResponse.getTotalShards()); + assertEquals(0, statusResponse.getSuccessfulShards()); + assertEquals(0, statusResponse.getSkippedShards()); + assertEquals(0, statusResponse.getFailedShards()); + + deleteAsyncSearch(response.getId()); + ensureTaskRemoval(response.getId()); + } finally { + response.decRef(); + } + } } public void testUpdateRunningKeepAlive() throws Exception { @@ -339,45 +400,70 @@ public void testUpdateRunningKeepAlive() throws Exception { request.getSearchRequest().source(new SearchSourceBuilder().aggregation(new CancellingAggregationBuilder("test", randomLong()))); long now = System.currentTimeMillis(); request.setWaitForCompletionTimeout(TimeValue.timeValueMillis(1)); - AsyncSearchResponse response = submitAsyncSearch(request); - assertNotNull(response.getSearchResponse()); - assertTrue(response.isRunning()); - assertThat(response.getSearchResponse().getTotalShards(), equalTo(numShards)); - assertThat(response.getSearchResponse().getSuccessfulShards(), equalTo(0)); - assertThat(response.getSearchResponse().getFailedShards(), equalTo(0)); - assertThat(response.getExpirationTime(), greaterThan(now)); - long expirationTime = response.getExpirationTime(); - - response = getAsyncSearch(response.getId()); - assertNotNull(response.getSearchResponse()); - assertTrue(response.isRunning()); - assertThat(response.getSearchResponse().getTotalShards(), equalTo(numShards)); - assertThat(response.getSearchResponse().getSuccessfulShards(), equalTo(0)); - assertThat(response.getSearchResponse().getFailedShards(), equalTo(0)); + final long expirationTime; + final String responseId; + { + final AsyncSearchResponse response = submitAsyncSearch(request); + try { + responseId = response.getId(); + assertNotNull(response.getSearchResponse()); + assertTrue(response.isRunning()); + assertThat(response.getSearchResponse().getTotalShards(), equalTo(numShards)); + assertThat(response.getSearchResponse().getSuccessfulShards(), equalTo(0)); + assertThat(response.getSearchResponse().getFailedShards(), equalTo(0)); + assertThat(response.getExpirationTime(), greaterThan(now)); + expirationTime = response.getExpirationTime(); + } finally { + response.decRef(); + } + } - response = getAsyncSearch(response.getId(), TimeValue.timeValueDays(10)); - assertThat(response.getExpirationTime(), greaterThan(expirationTime)); + final String responseId2; + { + final AsyncSearchResponse response = getAsyncSearch(responseId); + try { + responseId2 = response.getId(); + assertNotNull(response.getSearchResponse()); + assertTrue(response.isRunning()); + assertThat(response.getSearchResponse().getTotalShards(), equalTo(numShards)); + assertThat(response.getSearchResponse().getSuccessfulShards(), equalTo(0)); + assertThat(response.getSearchResponse().getFailedShards(), equalTo(0)); + } finally { + response.decRef(); + } + } - assertTrue(response.isRunning()); - assertThat(response.getSearchResponse().getTotalShards(), equalTo(numShards)); - assertThat(response.getSearchResponse().getSuccessfulShards(), equalTo(0)); - assertThat(response.getSearchResponse().getFailedShards(), equalTo(0)); + final AsyncSearchResponse response = getAsyncSearch(responseId2, TimeValue.timeValueDays(10)); + try { + assertThat(response.getExpirationTime(), greaterThan(expirationTime)); + + assertTrue(response.isRunning()); + assertThat(response.getSearchResponse().getTotalShards(), equalTo(numShards)); + assertThat(response.getSearchResponse().getSuccessfulShards(), equalTo(0)); + assertThat(response.getSearchResponse().getFailedShards(), equalTo(0)); + + AsyncStatusResponse statusResponse = getAsyncStatus(response.getId()); + assertTrue(statusResponse.isRunning()); + assertTrue(statusResponse.isPartial()); + assertThat(statusResponse.getExpirationTime(), greaterThan(expirationTime)); + assertThat(statusResponse.getStartTime(), lessThan(statusResponse.getExpirationTime())); + assertEquals(numShards, statusResponse.getTotalShards()); + assertEquals(0, statusResponse.getSuccessfulShards()); + assertEquals(0, statusResponse.getFailedShards()); + assertEquals(0, statusResponse.getSkippedShards()); + assertEquals(null, statusResponse.getCompletionStatus()); + } finally { + response.decRef(); + } - AsyncStatusResponse statusResponse = getAsyncStatus(response.getId()); - assertTrue(statusResponse.isRunning()); - assertTrue(statusResponse.isPartial()); - assertThat(statusResponse.getExpirationTime(), greaterThan(expirationTime)); - assertThat(statusResponse.getStartTime(), lessThan(statusResponse.getExpirationTime())); - assertEquals(numShards, statusResponse.getTotalShards()); - assertEquals(0, statusResponse.getSuccessfulShards()); - assertEquals(0, statusResponse.getFailedShards()); - assertEquals(0, statusResponse.getSkippedShards()); - assertEquals(null, statusResponse.getCompletionStatus()); - - response = getAsyncSearch(response.getId(), TimeValue.timeValueMillis(1)); - assertThat(response.getExpirationTime(), lessThan(expirationTime)); - ensureTaskNotRunning(response.getId()); - ensureTaskRemoval(response.getId()); + final AsyncSearchResponse response2 = getAsyncSearch(response.getId(), TimeValue.timeValueMillis(1)); + try { + assertThat(response2.getExpirationTime(), lessThan(expirationTime)); + ensureTaskNotRunning(response2.getId()); + ensureTaskRemoval(response2.getId()); + } finally { + response2.decRef(); + } } public void testUpdateStoreKeepAlive() throws Exception { @@ -386,34 +472,50 @@ public void testUpdateStoreKeepAlive() throws Exception { request.setWaitForCompletionTimeout(TimeValue.timeValueMinutes(10)); request.setKeepOnCompletion(true); AsyncSearchResponse response = submitAsyncSearch(request); - assertNotNull(response.getSearchResponse()); - assertFalse(response.isRunning()); - assertThat(response.getSearchResponse().getTotalShards(), equalTo(numShards)); - assertThat(response.getSearchResponse().getSuccessfulShards(), equalTo(numShards)); - assertThat(response.getSearchResponse().getFailedShards(), equalTo(0)); - assertThat(response.getExpirationTime(), greaterThan(now)); + try { + assertNotNull(response.getSearchResponse()); + assertFalse(response.isRunning()); + assertThat(response.getSearchResponse().getTotalShards(), equalTo(numShards)); + assertThat(response.getSearchResponse().getSuccessfulShards(), equalTo(numShards)); + assertThat(response.getSearchResponse().getFailedShards(), equalTo(0)); + assertThat(response.getExpirationTime(), greaterThan(now)); + } finally { + response.decRef(); + } final String searchId = response.getId(); long expirationTime = response.getExpirationTime(); response = getAsyncSearch(searchId); - assertNotNull(response.getSearchResponse()); - assertFalse(response.isRunning()); - assertThat(response.getSearchResponse().getTotalShards(), equalTo(numShards)); - assertThat(response.getSearchResponse().getSuccessfulShards(), equalTo(numShards)); - assertThat(response.getSearchResponse().getFailedShards(), equalTo(0)); + try { + assertNotNull(response.getSearchResponse()); + assertFalse(response.isRunning()); + assertThat(response.getSearchResponse().getTotalShards(), equalTo(numShards)); + assertThat(response.getSearchResponse().getSuccessfulShards(), equalTo(numShards)); + assertThat(response.getSearchResponse().getFailedShards(), equalTo(0)); + } finally { + response.decRef(); + } response = getAsyncSearch(searchId, TimeValue.timeValueDays(10)); - assertThat(response.getExpirationTime(), greaterThan(expirationTime)); + try { + assertThat(response.getExpirationTime(), greaterThan(expirationTime)); - assertFalse(response.isRunning()); - assertThat(response.getSearchResponse().getTotalShards(), equalTo(numShards)); - assertThat(response.getSearchResponse().getSuccessfulShards(), equalTo(numShards)); - assertThat(response.getSearchResponse().getFailedShards(), equalTo(0)); + assertFalse(response.isRunning()); + assertThat(response.getSearchResponse().getTotalShards(), equalTo(numShards)); + assertThat(response.getSearchResponse().getSuccessfulShards(), equalTo(numShards)); + assertThat(response.getSearchResponse().getFailedShards(), equalTo(0)); + } finally { + response.decRef(); + } try { AsyncSearchResponse finalResponse = getAsyncSearch(searchId, TimeValue.timeValueMillis(1)); - assertThat(finalResponse.getExpirationTime(), lessThan(expirationTime)); + try { + assertThat(finalResponse.getExpirationTime(), lessThan(expirationTime)); + } finally { + finalResponse.decRef(); + } } catch (ExecutionException e) { // The 'get async search' method first updates the expiration time, then gets the response. So the // maintenance service might remove the document right after it's updated, which means the get request @@ -433,18 +535,24 @@ public void testRemoveAsyncIndex() throws Exception { request.setKeepOnCompletion(true); long now = System.currentTimeMillis(); - AsyncSearchResponse response = submitAsyncSearch(request); - assertNotNull(response.getSearchResponse()); - assertFalse(response.isRunning()); - assertThat(response.getSearchResponse().getTotalShards(), equalTo(numShards)); - assertThat(response.getSearchResponse().getSuccessfulShards(), equalTo(numShards)); - assertThat(response.getSearchResponse().getFailedShards(), equalTo(0)); - assertThat(response.getExpirationTime(), greaterThan(now)); + final String responseId; + final AsyncSearchResponse response = submitAsyncSearch(request); + try { + assertNotNull(response.getSearchResponse()); + assertFalse(response.isRunning()); + assertThat(response.getSearchResponse().getTotalShards(), equalTo(numShards)); + assertThat(response.getSearchResponse().getSuccessfulShards(), equalTo(numShards)); + assertThat(response.getSearchResponse().getFailedShards(), equalTo(0)); + assertThat(response.getExpirationTime(), greaterThan(now)); + responseId = response.getId(); + } finally { + response.decRef(); + } // remove the async search index indicesAdmin().prepareDelete(XPackPlugin.ASYNC_RESULTS_INDEX).get(); - Exception exc = expectThrows(Exception.class, () -> getAsyncSearch(response.getId())); + Exception exc = expectThrows(Exception.class, () -> getAsyncSearch(responseId)); Throwable cause = exc instanceof ExecutionException ? ExceptionsHelper.unwrapCause(exc.getCause()) : ExceptionsHelper.unwrapCause(exc); @@ -453,16 +561,20 @@ public void testRemoveAsyncIndex() throws Exception { SubmitAsyncSearchRequest newReq = new SubmitAsyncSearchRequest(indexName); newReq.getSearchRequest().source(new SearchSourceBuilder().aggregation(new CancellingAggregationBuilder("test", randomLong()))); newReq.setWaitForCompletionTimeout(TimeValue.timeValueMillis(1)).setKeepAlive(TimeValue.timeValueSeconds(1)); - AsyncSearchResponse newResp = submitAsyncSearch(newReq); - assertNotNull(newResp.getSearchResponse()); - assertTrue(newResp.isRunning()); - assertThat(newResp.getSearchResponse().getTotalShards(), equalTo(numShards)); - assertThat(newResp.getSearchResponse().getSuccessfulShards(), equalTo(0)); - assertThat(newResp.getSearchResponse().getFailedShards(), equalTo(0)); - - // check garbage collection - ensureTaskNotRunning(newResp.getId()); - ensureTaskRemoval(newResp.getId()); + final AsyncSearchResponse newResp = submitAsyncSearch(newReq); + try { + assertNotNull(newResp.getSearchResponse()); + assertTrue(newResp.isRunning()); + assertThat(newResp.getSearchResponse().getTotalShards(), equalTo(numShards)); + assertThat(newResp.getSearchResponse().getSuccessfulShards(), equalTo(0)); + assertThat(newResp.getSearchResponse().getFailedShards(), equalTo(0)); + + // check garbage collection + ensureTaskNotRunning(newResp.getId()); + ensureTaskRemoval(newResp.getId()); + } finally { + newResp.decRef(); + } } public void testSearchPhaseFailure() throws Exception { @@ -473,11 +585,15 @@ public void testSearchPhaseFailure() throws Exception { request.getSearchRequest() .source(new SearchSourceBuilder().query(new ThrowingQueryBuilder(randomLong(), new AlreadyClosedException("boom"), 0))); AsyncSearchResponse response = submitAsyncSearch(request); - assertFalse(response.isRunning()); - assertTrue(response.isPartial()); - assertThat(response.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE)); - assertNotNull(response.getFailure()); - ensureTaskNotRunning(response.getId()); + try { + assertFalse(response.isRunning()); + assertTrue(response.isPartial()); + assertThat(response.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE)); + assertNotNull(response.getFailure()); + ensureTaskNotRunning(response.getId()); + } finally { + response.decRef(); + } } public void testSearchPhaseFailureLeak() throws Exception { @@ -494,11 +610,15 @@ public void testSearchPhaseFailureLeak() throws Exception { request.getSearchRequest().source().aggregation(terms("f").field("f").size(between(1, 10))); AsyncSearchResponse response = submitAsyncSearch(request); - assertFalse(response.isRunning()); - assertTrue(response.isPartial()); - assertThat(response.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE)); - assertNotNull(response.getFailure()); - ensureTaskNotRunning(response.getId()); + try { + assertFalse(response.isRunning()); + assertTrue(response.isPartial()); + assertThat(response.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE)); + assertNotNull(response.getFailure()); + ensureTaskNotRunning(response.getId()); + } finally { + response.decRef(); + } } public void testMaxResponseSize() { @@ -538,9 +658,13 @@ public TransportVersion getMinimalSupportedVersion() { }), indexName); AsyncSearchResponse response = submitAsyncSearch(request); - assertFalse(response.isRunning()); - Exception failure = response.getFailure(); - assertThat(failure.getMessage(), containsString("error while executing search")); - assertThat(failure.getCause().getMessage(), containsString("the 'search.check_ccs_compatibility' setting is enabled")); + try { + assertFalse(response.isRunning()); + Exception failure = response.getFailure(); + assertThat(failure.getMessage(), containsString("error while executing search")); + assertThat(failure.getCause().getMessage(), containsString("the 'search.check_ccs_compatibility' setting is enabled")); + } finally { + response.decRef(); + } } } diff --git a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchIntegTestCase.java b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchIntegTestCase.java index 3f888685f33db..1cdc1dd1d396c 100644 --- a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchIntegTestCase.java +++ b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchIntegTestCase.java @@ -38,10 +38,10 @@ import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; import org.elasticsearch.xpack.core.async.AsyncExecutionId; import org.elasticsearch.xpack.core.async.AsyncTaskMaintenanceService; -import org.elasticsearch.xpack.core.async.DeleteAsyncResultAction; import org.elasticsearch.xpack.core.async.DeleteAsyncResultRequest; import org.elasticsearch.xpack.core.async.GetAsyncResultRequest; import org.elasticsearch.xpack.core.async.GetAsyncStatusRequest; +import org.elasticsearch.xpack.core.async.TransportDeleteAsyncResultAction; import org.elasticsearch.xpack.core.search.action.AsyncSearchResponse; import org.elasticsearch.xpack.core.search.action.AsyncStatusResponse; import org.elasticsearch.xpack.core.search.action.GetAsyncSearchAction; @@ -175,7 +175,7 @@ protected AsyncStatusResponse getAsyncStatus(String id) throws ExecutionExceptio } protected AcknowledgedResponse deleteAsyncSearch(String id) throws ExecutionException, InterruptedException { - return client().execute(DeleteAsyncResultAction.INSTANCE, new DeleteAsyncResultRequest(id)).get(); + return client().execute(TransportDeleteAsyncResultAction.TYPE, new DeleteAsyncResultRequest(id)).get(); } /** @@ -193,7 +193,11 @@ protected void ensureTaskNotRunning(String id) throws Exception { assertBusy(() -> { try { AsyncSearchResponse resp = getAsyncSearch(id); - assertFalse(resp.isRunning()); + try { + assertFalse(resp.isRunning()); + } finally { + resp.decRef(); + } } catch (Exception exc) { if (ExceptionsHelper.unwrapCause(exc.getCause()) instanceof ResourceNotFoundException == false) { throw exc; diff --git a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java index 218a99a23be0c..f160f4acdb76e 100644 --- a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java +++ b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java @@ -49,10 +49,10 @@ import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xpack.async.AsyncResultsIndexPlugin; import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; -import org.elasticsearch.xpack.core.async.DeleteAsyncResultAction; import org.elasticsearch.xpack.core.async.DeleteAsyncResultRequest; import org.elasticsearch.xpack.core.async.GetAsyncResultRequest; import org.elasticsearch.xpack.core.async.GetAsyncStatusRequest; +import org.elasticsearch.xpack.core.async.TransportDeleteAsyncResultAction; import org.elasticsearch.xpack.core.search.action.AsyncSearchResponse; import org.elasticsearch.xpack.core.search.action.AsyncStatusResponse; import org.elasticsearch.xpack.core.search.action.GetAsyncSearchAction; @@ -1608,7 +1608,7 @@ protected AsyncStatusResponse getAsyncStatus(String id) throws ExecutionExceptio } protected AcknowledgedResponse deleteAsyncSearch(String id) throws ExecutionException, InterruptedException { - return client().execute(DeleteAsyncResultAction.INSTANCE, new DeleteAsyncResultRequest(id)).get(); + return client().execute(TransportDeleteAsyncResultAction.TYPE, new DeleteAsyncResultRequest(id)).get(); } private Map setupTwoClusters() { diff --git a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/AsyncSearchTask.java b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/AsyncSearchTask.java index 8851d27fb087d..635d8db76cb8b 100644 --- a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/AsyncSearchTask.java +++ b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/AsyncSearchTask.java @@ -180,7 +180,7 @@ protected void onCancelled() { * listener when the task is finished or when the provided waitForCompletion * timeout occurs. In such case the consumed {@link AsyncSearchResponse} will contain partial results. */ - public void addCompletionListener(ActionListener listener, TimeValue waitForCompletion) { + public boolean addCompletionListener(ActionListener listener, TimeValue waitForCompletion) { boolean executeImmediately = false; long startTime = threadPool.relativeTimeInMillis(); synchronized (this) { @@ -203,6 +203,7 @@ public void addCompletionListener(ActionListener listener, if (executeImmediately) { ActionListener.respondAndRelease(listener, getResponseWithHeaders()); } + return true; // unused } /** diff --git a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/MutableSearchResponse.java b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/MutableSearchResponse.java index dc6c780c64644..b8cf914eaea73 100644 --- a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/MutableSearchResponse.java +++ b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/MutableSearchResponse.java @@ -18,7 +18,6 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.xpack.core.search.action.AsyncSearchResponse; import org.elasticsearch.xpack.core.search.action.AsyncStatusResponse; @@ -156,18 +155,15 @@ void addQueryFailure(int shardIndex, ShardSearchFailure shardSearchFailure) { } private SearchResponse buildResponse(long taskStartTimeNanos, InternalAggregations reducedAggs) { - InternalSearchResponse internal = new InternalSearchResponse( - new SearchHits(SearchHits.EMPTY, totalHits, Float.NaN), + long tookInMillis = TimeValue.timeValueNanos(System.nanoTime() - taskStartTimeNanos).getMillis(); + return new SearchResponse( + SearchHits.empty(totalHits, Float.NaN), reducedAggs, null, - null, false, false, - reducePhase - ); - long tookInMillis = TimeValue.timeValueNanos(System.nanoTime() - taskStartTimeNanos).getMillis(); - return new SearchResponse( - internal, + null, + reducePhase, null, totalShards, successfulShards, diff --git a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/RestDeleteAsyncSearchAction.java b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/RestDeleteAsyncSearchAction.java index 10c2923d57edc..850e904362da0 100644 --- a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/RestDeleteAsyncSearchAction.java +++ b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/RestDeleteAsyncSearchAction.java @@ -12,8 +12,8 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xpack.core.async.DeleteAsyncResultAction; import org.elasticsearch.xpack.core.async.DeleteAsyncResultRequest; +import org.elasticsearch.xpack.core.async.TransportDeleteAsyncResultAction; import java.io.IOException; import java.util.List; @@ -35,6 +35,6 @@ public String getName() { @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { DeleteAsyncResultRequest delete = new DeleteAsyncResultRequest(request.param("id")); - return channel -> client.execute(DeleteAsyncResultAction.INSTANCE, delete, new RestToXContentListener<>(channel)); + return channel -> client.execute(TransportDeleteAsyncResultAction.TYPE, delete, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/RestGetAsyncSearchAction.java b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/RestGetAsyncSearchAction.java index 8ee4af819def0..7d35c072c2ba2 100644 --- a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/RestGetAsyncSearchAction.java +++ b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/RestGetAsyncSearchAction.java @@ -12,7 +12,7 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import org.elasticsearch.xpack.core.async.GetAsyncResultRequest; import org.elasticsearch.xpack.core.search.action.AsyncSearchResponse; import org.elasticsearch.xpack.core.search.action.GetAsyncSearchAction; @@ -44,7 +44,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli if (request.hasParam("keep_alive")) { get.setKeepAlive(request.paramAsTime("keep_alive", get.getKeepAlive())); } - return channel -> client.execute(GetAsyncSearchAction.INSTANCE, get, new RestChunkedToXContentListener<>(channel) { + return channel -> client.execute(GetAsyncSearchAction.INSTANCE, get, new RestRefCountedChunkedToXContentListener<>(channel) { @Override protected RestStatus getRestStatus(AsyncSearchResponse asyncSearchResponse) { return asyncSearchResponse.status(); diff --git a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/RestSubmitAsyncSearchAction.java b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/RestSubmitAsyncSearchAction.java index 66eb8ce6fb518..8f554d4d8705c 100644 --- a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/RestSubmitAsyncSearchAction.java +++ b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/RestSubmitAsyncSearchAction.java @@ -13,7 +13,7 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import org.elasticsearch.usage.SearchUsageHolder; import org.elasticsearch.xpack.core.search.action.AsyncSearchResponse; import org.elasticsearch.xpack.core.search.action.SubmitAsyncSearchAction; @@ -78,14 +78,13 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli submit.setKeepOnCompletion(request.paramAsBoolean("keep_on_completion", submit.isKeepOnCompletion())); } return channel -> { - RestChunkedToXContentListener listener = new RestChunkedToXContentListener<>(channel) { + RestCancellableNodeClient cancelClient = new RestCancellableNodeClient(client, request.getHttpChannel()); + cancelClient.execute(SubmitAsyncSearchAction.INSTANCE, submit, new RestRefCountedChunkedToXContentListener<>(channel) { @Override protected RestStatus getRestStatus(AsyncSearchResponse asyncSearchResponse) { return asyncSearchResponse.status(); } - }; - RestCancellableNodeClient cancelClient = new RestCancellableNodeClient(client, request.getHttpChannel()); - cancelClient.execute(SubmitAsyncSearchAction.INSTANCE, submit, listener); + }); }; } diff --git a/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/AsyncSearchResponseTests.java b/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/AsyncSearchResponseTests.java index dae7d79913690..f3d6f352db186 100644 --- a/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/AsyncSearchResponseTests.java +++ b/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/AsyncSearchResponseTests.java @@ -11,7 +11,6 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchResponseSections; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.Strings; @@ -25,8 +24,8 @@ import org.elasticsearch.script.ScriptException; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.search.SearchShardTarget; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.xcontent.ToXContent; @@ -129,15 +128,13 @@ static SearchResponse randomSearchResponse(boolean ccs) { int totalShards = randomIntBetween(1, Integer.MAX_VALUE); int successfulShards = randomIntBetween(0, totalShards); int skippedShards = randomIntBetween(0, successfulShards); - InternalSearchResponse internalSearchResponse = InternalSearchResponse.EMPTY_WITH_TOTAL_HITS; SearchResponse.Clusters clusters; if (ccs) { clusters = createCCSClusterObjects(20, 19, true, 10, 1, 2); } else { clusters = SearchResponse.Clusters.EMPTY; } - return new SearchResponse( - internalSearchResponse, + return SearchResponseUtils.emptyWithTotalHits( null, totalShards, successfulShards, @@ -211,9 +208,14 @@ public void testToXContentWithSearchResponseAfterCompletion() throws IOException long expectedCompletionTime = startTimeMillis + took; SearchHits hits = SearchHits.EMPTY_WITHOUT_TOTAL_HITS; - SearchResponseSections sections = new SearchResponseSections(hits, null, null, false, null, null, 2); SearchResponse searchResponse = new SearchResponse( - sections, + hits, + null, + null, + false, + null, + null, + 2, null, 10, 9, @@ -316,11 +318,25 @@ public void testToXContentWithCCSSearchResponseWhileRunning() throws IOException long took = 22968L; SearchHits hits = SearchHits.EMPTY_WITHOUT_TOTAL_HITS; - SearchResponseSections sections = new SearchResponseSections(hits, null, null, false, null, null, 2); SearchResponse.Clusters clusters = createCCSClusterObjects(3, 3, true); - SearchResponse searchResponse = new SearchResponse(sections, null, 10, 9, 1, took, ShardSearchFailure.EMPTY_ARRAY, clusters); + SearchResponse searchResponse = new SearchResponse( + hits, + null, + null, + false, + null, + null, + 2, + null, + 10, + 9, + 1, + took, + ShardSearchFailure.EMPTY_ARRAY, + clusters + ); AsyncSearchResponse asyncSearchResponse = new AsyncSearchResponse( "id", @@ -462,7 +478,6 @@ public void testToXContentWithCCSSearchResponseAfterCompletion() throws IOExcept long expectedCompletionTime = startTimeMillis + took; SearchHits hits = SearchHits.EMPTY_WITHOUT_TOTAL_HITS; - SearchResponseSections sections = new SearchResponseSections(hits, null, null, true, null, null, 2); SearchResponse.Clusters clusters = createCCSClusterObjects(4, 3, true); SearchResponse.Cluster updated = clusters.swapCluster( @@ -532,7 +547,22 @@ public void testToXContentWithCCSSearchResponseAfterCompletion() throws IOExcept ); assertNotNull("Set cluster failed for cluster " + cluster2.getClusterAlias(), updated); - SearchResponse searchResponse = new SearchResponse(sections, null, 10, 9, 1, took, new ShardSearchFailure[0], clusters); + SearchResponse searchResponse = new SearchResponse( + hits, + null, + null, + true, + null, + null, + 2, + null, + 10, + 9, + 1, + took, + new ShardSearchFailure[0], + clusters + ); AsyncSearchResponse asyncSearchResponse = new AsyncSearchResponse( "id", @@ -659,9 +689,14 @@ public void testToXContentWithSearchResponseWhileRunning() throws IOException { long took = 22968L; SearchHits hits = SearchHits.EMPTY_WITHOUT_TOTAL_HITS; - SearchResponseSections sections = new SearchResponseSections(hits, null, null, false, null, null, 2); SearchResponse searchResponse = new SearchResponse( - sections, + hits, + null, + null, + false, + null, + null, + 2, null, 10, 9, diff --git a/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/AsyncSearchTaskTests.java b/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/AsyncSearchTaskTests.java index 9dccdf39128ea..f119e590cc75c 100644 --- a/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/AsyncSearchTaskTests.java +++ b/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/AsyncSearchTaskTests.java @@ -28,7 +28,6 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.terms.StringTerms; import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpClient; @@ -405,17 +404,14 @@ private static SearchResponse newSearchResponse( int skippedShards, ShardSearchFailure... failures ) { - InternalSearchResponse response = new InternalSearchResponse( + return new SearchResponse( SearchHits.EMPTY_WITH_TOTAL_HITS, InternalAggregations.EMPTY, null, - null, false, null, - 1 - ); - return new SearchResponse( - response, + null, + 1, null, totalShards, successfulShards, diff --git a/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/AsyncStatusResponseTests.java b/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/AsyncStatusResponseTests.java index 5aab26b3eba58..653ae8cafc531 100644 --- a/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/AsyncStatusResponseTests.java +++ b/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/AsyncStatusResponseTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -316,10 +316,8 @@ public void testGetStatusFromStoredSearchFailedShardsScenario() { int totalShards = randomIntBetween(1, Integer.MAX_VALUE); int successfulShards = randomIntBetween(0, totalShards); int skippedShards = randomIntBetween(0, successfulShards); - InternalSearchResponse internalSearchResponse = InternalSearchResponse.EMPTY_WITH_TOTAL_HITS; SearchResponse.Clusters clusters = new SearchResponse.Clusters(100, 99, 1); - SearchResponse searchResponse = new SearchResponse( - internalSearchResponse, + SearchResponse searchResponse = SearchResponseUtils.emptyWithTotalHits( null, totalShards, successfulShards, @@ -343,10 +341,8 @@ public void testGetStatusFromStoredSearchWithEmptyClustersSuccessfullyCompleted( int totalShards = randomIntBetween(1, Integer.MAX_VALUE); int successfulShards = randomIntBetween(0, totalShards); int skippedShards = randomIntBetween(0, successfulShards); - InternalSearchResponse internalSearchResponse = InternalSearchResponse.EMPTY_WITH_TOTAL_HITS; - SearchResponse searchResponse = new SearchResponse( - internalSearchResponse, + SearchResponse searchResponse = SearchResponseUtils.emptyWithTotalHits( null, totalShards, successfulShards, @@ -370,7 +366,6 @@ public void testGetStatusFromStoredSearchWithNonEmptyClustersSuccessfullyComplet int totalShards = randomIntBetween(1, Integer.MAX_VALUE); int successfulShards = randomIntBetween(0, totalShards); int skippedShards = randomIntBetween(0, successfulShards); - InternalSearchResponse internalSearchResponse = InternalSearchResponse.EMPTY_WITH_TOTAL_HITS; int totalClusters; int successfulClusters; @@ -390,8 +385,7 @@ public void testGetStatusFromStoredSearchWithNonEmptyClustersSuccessfullyComplet skippedClusters = totalClusters - (successfulClusters + partial); clusters = AsyncSearchResponseTests.createCCSClusterObjects(80, 80, true, successfulClusters, skippedClusters, partial); } - SearchResponse searchResponse = new SearchResponse( - internalSearchResponse, + SearchResponse searchResponse = SearchResponseUtils.emptyWithTotalHits( null, totalShards, successfulShards, @@ -421,7 +415,6 @@ public void testGetStatusFromStoredSearchWithNonEmptyClustersStillRunning() { int totalShards = randomIntBetween(1, Integer.MAX_VALUE); int successfulShards = randomIntBetween(0, totalShards); int skippedShards = randomIntBetween(0, successfulShards); - InternalSearchResponse internalSearchResponse = InternalSearchResponse.EMPTY_WITH_TOTAL_HITS; int successful = randomInt(10); int partial = randomInt(10); int skipped = randomInt(10); @@ -437,8 +430,7 @@ public void testGetStatusFromStoredSearchWithNonEmptyClustersStillRunning() { } SearchResponse.Clusters clusters = AsyncSearchResponseTests.createCCSClusterObjects(100, 99, true, successful, skipped, partial); - SearchResponse searchResponse = new SearchResponse( - internalSearchResponse, + SearchResponse searchResponse = SearchResponseUtils.emptyWithTotalHits( null, totalShards, successfulShards, diff --git a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportDeleteAutoscalingPolicyActionIT.java b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportDeleteAutoscalingPolicyActionIT.java index f65710cffe9f9..f0c96255dfe3b 100644 --- a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportDeleteAutoscalingPolicyActionIT.java +++ b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportDeleteAutoscalingPolicyActionIT.java @@ -43,7 +43,7 @@ public void testDeletePolicy() { final GetAutoscalingPolicyAction.Request getRequest = new GetAutoscalingPolicyAction.Request(policy.name()); final ResourceNotFoundException e = expectThrows( ResourceNotFoundException.class, - () -> client().execute(GetAutoscalingPolicyAction.INSTANCE, getRequest).actionGet() + client().execute(GetAutoscalingPolicyAction.INSTANCE, getRequest) ); assertThat(e.getMessage(), equalTo("autoscaling policy with name [" + policy.name() + "] does not exist")); } diff --git a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportGetAutoscalingPolicyActionIT.java b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportGetAutoscalingPolicyActionIT.java index 201b56f1db3a1..0b23a69179f36 100644 --- a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportGetAutoscalingPolicyActionIT.java +++ b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportGetAutoscalingPolicyActionIT.java @@ -38,7 +38,7 @@ public void testGetNonExistentPolicy() { final GetAutoscalingPolicyAction.Request getRequest = new GetAutoscalingPolicyAction.Request(name); final ResourceNotFoundException e = expectThrows( ResourceNotFoundException.class, - () -> client().execute(GetAutoscalingPolicyAction.INSTANCE, getRequest).actionGet() + client().execute(GetAutoscalingPolicyAction.INSTANCE, getRequest) ); assertThat(e.getMessage(), containsString("autoscaling policy with name [" + name + "] does not exist")); } diff --git a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/existence/FrozenExistenceDeciderIT.java b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/existence/FrozenExistenceDeciderIT.java index 497734fd5ac28..3b84ce4ea881c 100644 --- a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/existence/FrozenExistenceDeciderIT.java +++ b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/existence/FrozenExistenceDeciderIT.java @@ -29,7 +29,8 @@ import org.elasticsearch.xpack.core.ilm.SearchableSnapshotAction; import org.elasticsearch.xpack.core.ilm.WaitForDataTierStep; import org.elasticsearch.xpack.core.ilm.action.ExplainLifecycleAction; -import org.elasticsearch.xpack.core.ilm.action.PutLifecycleAction; +import org.elasticsearch.xpack.core.ilm.action.ILMActions; +import org.elasticsearch.xpack.core.ilm.action.PutLifecycleRequest; import org.elasticsearch.xpack.slm.SnapshotLifecycle; import java.util.Collection; @@ -99,8 +100,8 @@ public void testZeroToOne() throws Exception { singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(fsRepoName, randomBoolean())) ); LifecyclePolicy lifecyclePolicy = new LifecyclePolicy("policy", Map.of("hot", hotPhase, "frozen", frozenPhase)); - PutLifecycleAction.Request putLifecycleRequest = new PutLifecycleAction.Request(lifecyclePolicy); - assertAcked(client().execute(PutLifecycleAction.INSTANCE, putLifecycleRequest).get()); + PutLifecycleRequest putLifecycleRequest = new PutLifecycleRequest(lifecyclePolicy); + assertAcked(client().execute(ILMActions.PUT, putLifecycleRequest).get()); Settings settings = Settings.builder() .put(indexSettings()) diff --git a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/FrozenStorageDeciderIT.java b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/FrozenStorageDeciderIT.java index 13056ed2e4d5e..13e7d3aca1501 100644 --- a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/FrozenStorageDeciderIT.java +++ b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/FrozenStorageDeciderIT.java @@ -31,8 +31,8 @@ public void testScale() throws Exception { capacity().results().get("frozen").requiredCapacity().total().storage(), equalTo( ByteSizeValue.ofBytes( - (long) (statsResponse.getPrimaries().store.totalDataSetSize().getBytes() - * FrozenStorageDeciderService.DEFAULT_PERCENTAGE) / 100 + (long) (statsResponse.getPrimaries().store.totalDataSetSizeInBytes() * FrozenStorageDeciderService.DEFAULT_PERCENTAGE) + / 100 ) ) ); diff --git a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ProactiveStorageIT.java b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ProactiveStorageIT.java index c5e062df5e77c..0f4983c1b6994 100644 --- a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ProactiveStorageIT.java +++ b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ProactiveStorageIT.java @@ -9,7 +9,7 @@ import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.datastreams.CreateDataStreamAction; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; @@ -67,7 +67,7 @@ public void testScaleUp() throws IOException, InterruptedException { capacity(); IndicesStatsResponse stats = indicesAdmin().prepareStats(dsName).clear().setStore(true).get(); - long used = stats.getTotal().getStore().getSizeInBytes(); + long used = stats.getTotal().getStore().sizeInBytes(); long maxShardSize = Arrays.stream(stats.getShards()).mapToLong(s -> s.getStats().getStore().sizeInBytes()).max().orElseThrow(); // As long as usage is above low watermark, we will trigger a proactive scale up, since the simulated shards have an in-sync // set and therefore allocating these do not skip the low watermark check in the disk threshold decider. @@ -120,8 +120,8 @@ private void putAutoscalingPolicy(String policyName, Settings settings) { private static void createDataStreamAndTemplate(String dataStreamName) throws IOException { client().execute( - PutComposableIndexTemplateAction.INSTANCE, - new PutComposableIndexTemplateAction.Request(dataStreamName + "_template").indexTemplate( + TransportPutComposableIndexTemplateAction.TYPE, + new TransportPutComposableIndexTemplateAction.Request(dataStreamName + "_template").indexTemplate( ComposableIndexTemplate.builder() .indexPatterns(Collections.singletonList(dataStreamName)) .template(new Template(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build(), null, null)) diff --git a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageIT.java b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageIT.java index 5c097cdc24ed1..5f724509ec98a 100644 --- a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageIT.java +++ b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageIT.java @@ -78,7 +78,7 @@ public void testScaleUp() throws InterruptedException { capacity(); IndicesStatsResponse stats = indicesAdmin().prepareStats(indexName).clear().setStore(true).get(); - long used = stats.getTotal().getStore().getSizeInBytes(); + long used = stats.getTotal().getStore().sizeInBytes(); long minShardSize = Arrays.stream(stats.getShards()).mapToLong(s -> s.getStats().getStore().sizeInBytes()).min().orElseThrow(); long maxShardSize = Arrays.stream(stats.getShards()).mapToLong(s -> s.getStats().getStore().sizeInBytes()).max().orElseThrow(); long enoughSpace = used + HIGH_WATERMARK_BYTES + 1; @@ -274,14 +274,14 @@ public void testScaleWhileShrinking() throws Exception { refresh(); IndicesStatsResponse stats = indicesAdmin().prepareStats(indexName).clear().setStore(true).get(); - long used = stats.getTotal().getStore().getSizeInBytes(); + long used = stats.getTotal().getStore().sizeInBytes(); long maxShardSize = Arrays.stream(stats.getShards()).mapToLong(s -> s.getStats().getStore().sizeInBytes()).max().orElseThrow(); Map byNode = Arrays.stream(stats.getShards()) .collect( Collectors.groupingBy( s -> s.getShardRouting().currentNodeId(), - Collectors.summingLong(s -> s.getStats().getStore().getSizeInBytes()) + Collectors.summingLong(s -> s.getStats().getStore().sizeInBytes()) ) ); @@ -427,7 +427,7 @@ public void testScaleDuringSplitOrClone() throws Exception { refresh(); IndicesStatsResponse stats = indicesAdmin().prepareStats(indexName).clear().setStore(true).get(); - long used = stats.getTotal().getStore().getSizeInBytes(); + long used = stats.getTotal().getStore().sizeInBytes(); long enoughSpace = used + HIGH_WATERMARK_BYTES + 1; diff --git a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodesInfoServiceTests.java b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodesInfoServiceTests.java index 5c47f5a9dc6a4..3f147c94c5ec2 100644 --- a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodesInfoServiceTests.java +++ b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodesInfoServiceTests.java @@ -63,6 +63,9 @@ import java.util.stream.IntStream; import java.util.stream.Stream; +import static org.elasticsearch.test.hamcrest.OptionalMatchers.isEmpty; +import static org.elasticsearch.test.hamcrest.OptionalMatchers.isPresent; +import static org.elasticsearch.test.hamcrest.OptionalMatchers.isPresentWith; import static org.elasticsearch.xpack.autoscaling.capacity.nodeinfo.AutoscalingNodeInfoService.FETCH_TIMEOUT; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; @@ -148,8 +151,8 @@ public void testAddRemoveNode() { ); client.respondStats(response, () -> { Sets.union(missingNodes, Sets.difference(previousNodes, nodes)) - .forEach(n -> assertThat(service.snapshot().get(n).isEmpty(), is(true))); - Sets.intersection(previousSucceededNodes, nodes).forEach(n -> assertThat(service.snapshot().get(n).isPresent(), is(true))); + .forEach(n -> assertThat(service.snapshot().get(n), isEmpty())); + Sets.intersection(previousSucceededNodes, nodes).forEach(n -> assertThat(service.snapshot().get(n), isPresent())); }); client.respondInfo(responseInfo, () -> { @@ -159,7 +162,7 @@ public void testAddRemoveNode() { client.assertNoResponder(); assertMatchesResponse(succeedingNodes, response, responseInfo); - failingNodes.forEach(n -> assertThat(service.snapshot().get(n).isEmpty(), is(true))); + failingNodes.forEach(n -> assertThat(service.snapshot().get(n), isEmpty())); previousNodes.clear(); previousNodes.addAll(nodes); @@ -177,7 +180,7 @@ public void testNotMaster() { // client throws if called. service.onClusterChanged(new ClusterChangedEvent("test", state, ClusterState.EMPTY_STATE)); - nodes.forEach(n -> assertThat(service.snapshot().get(n).isEmpty(), is(true))); + nodes.forEach(n -> assertThat(service.snapshot().get(n), isEmpty())); } public void testNoLongerMaster() { @@ -208,7 +211,7 @@ public void testNoLongerMaster() { // client throws if called. service.onClusterChanged(new ClusterChangedEvent("test", notMasterState, masterState)); - nodes.forEach(n -> assertThat(service.snapshot().get(n).isEmpty(), is(true))); + nodes.forEach(n -> assertThat(service.snapshot().get(n), isEmpty())); } public void testStatsFails() { @@ -218,7 +221,7 @@ public void testStatsFails() { client.respondStats((r, listener) -> listener.onFailure(randomFrom(new IllegalStateException(), new RejectedExecutionException()))); service.onClusterChanged(new ClusterChangedEvent("test", state, ClusterState.EMPTY_STATE)); - nodes.forEach(n -> assertThat(service.snapshot().get(n).isEmpty(), is(true))); + nodes.forEach(n -> assertThat(service.snapshot().get(n), isEmpty())); NodesStatsResponse response = new NodesStatsResponse( ClusterName.DEFAULT, @@ -249,7 +252,7 @@ public void testInfoFails() { client.respondStats(response, () -> {}); client.respondInfo((r, listener) -> listener.onFailure(randomFrom(new IllegalStateException(), new RejectedExecutionException()))); service.onClusterChanged(new ClusterChangedEvent("test", state, ClusterState.EMPTY_STATE)); - nodes.forEach(n -> assertThat(service.snapshot().get(n).isEmpty(), is(true))); + nodes.forEach(n -> assertThat(service.snapshot().get(n), isEmpty())); NodesInfoResponse responseInfo = new NodesInfoResponse( ClusterName.DEFAULT, nodes.stream().map(n -> infoForNode(n, randomIntBetween(1, 64))).collect(Collectors.toList()), @@ -316,7 +319,7 @@ public void testRestartNode() { assertMatchesResponse(Sets.intersection(restartedNodes, nodes), response, responseInfo); assertMatchesResponse(Sets.difference(restartedNodes, nodes), restartedStatsResponse, restartedInfoResponse); - Sets.difference(nodes, restartedNodes).forEach(n -> assertThat(service.snapshot().get(n).isEmpty(), is(true))); + Sets.difference(nodes, restartedNodes).forEach(n -> assertThat(service.snapshot().get(n), isEmpty())); } public void testConcurrentStateUpdate() throws Exception { @@ -396,10 +399,9 @@ private Set randomIrrelevantRoles(Set> relevantRo public void assertMatchesResponse(Set nodes, NodesStatsResponse response, NodesInfoResponse infoResponse) { nodes.forEach(n -> { - assertThat(service.snapshot().get(n).isPresent(), is(true)); assertThat( - service.snapshot().get(n).get(), - equalTo( + service.snapshot().get(n), + isPresentWith( new AutoscalingNodeInfo( response.getNodesMap().get(n.getId()).getOs().getMem().getAdjustedTotal().getBytes(), Processors.of(infoResponse.getNodesMap().get(n.getId()).getInfo(OsInfo.class).getFractionalAllocatedProcessors()) diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java index 1766d8fe47820..7ca37f376045f 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java @@ -20,19 +20,19 @@ public class BlobCacheMetrics { public BlobCacheMetrics(MeterRegistry meterRegistry) { this( meterRegistry.registerLongCounter( - "elasticsearch.blob_cache.miss_that_triggered_read", + "es.blob_cache.miss_that_triggered_read.total", "The number of times there was a cache miss that triggered a read from the blob store", "count" ), meterRegistry.registerLongCounter( - "elasticsearch.blob_cache.count_of_evicted_used_regions", + "es.blob_cache.count_of_evicted_used_regions.total", "The number of times a cache entry was evicted where the frequency was not zero", "entries" ), meterRegistry.registerLongHistogram( - "elasticsearch.blob_cache.cache_miss_load_times", - "The timing data for populating entries in the blob store resulting from a cache miss.", - "count" + "es.blob_cache.cache_miss_load_times.histogram", + "The time in microseconds for populating entries in the blob store resulting from a cache miss, expressed as a histogram.", + "micros" ) ); } diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java index be95f5c883de8..5e8933f86ae7d 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java @@ -55,6 +55,7 @@ import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.LongAdder; import java.util.function.IntConsumer; @@ -815,7 +816,7 @@ public int populateAndRead( ) throws Exception { // We are interested in the total time that the system spends when fetching a result (including time spent queuing), so we start // our measurement here. - final long startTime = threadPool.relativeTimeInMillis(); + final long startTime = threadPool.relativeTimeInNanos(); RangeMissingHandler writerInstrumentationDecorator = ( SharedBytes.IO channel, int channelPos, @@ -823,7 +824,7 @@ public int populateAndRead( int length, IntConsumer progressUpdater) -> { writer.fillCacheRange(channel, channelPos, relativePos, length, progressUpdater); - var elapsedTime = threadPool.relativeTimeInMillis() - startTime; + var elapsedTime = TimeUnit.NANOSECONDS.toMicros(threadPool.relativeTimeInNanos() - startTime); SharedBlobCacheService.this.blobCacheMetrics.getCacheMissLoadTimes().record(elapsedTime); SharedBlobCacheService.this.blobCacheMetrics.getCacheMissCounter().increment(); }; diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java index e1ec9013ef257..df8f19ab3888c 100644 --- a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java @@ -11,7 +11,7 @@ import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.datastreams.CreateDataStreamAction; import org.elasticsearch.action.datastreams.ModifyDataStreamsAction; import org.elasticsearch.client.internal.Client; @@ -636,7 +636,7 @@ public void testAutoFollowExclusion() throws Exception { public void testAutoFollowDatastreamWithClosingFollowerIndex() throws Exception { final String datastream = "logs-1"; - PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request("template-id"); + TransportPutComposableIndexTemplateAction.Request request = new TransportPutComposableIndexTemplateAction.Request("template-id"); request.indexTemplate( ComposableIndexTemplate.builder() .indexPatterns(List.of("logs-*")) @@ -653,7 +653,7 @@ public void testAutoFollowDatastreamWithClosingFollowerIndex() throws Exception .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) .build() ); - assertAcked(leaderClient().execute(PutComposableIndexTemplateAction.INSTANCE, request).get()); + assertAcked(leaderClient().execute(TransportPutComposableIndexTemplateAction.TYPE, request).get()); CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(datastream); assertAcked(leaderClient().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get()); diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java index f7baafa8402d0..dff3ff935595f 100644 --- a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java @@ -537,7 +537,7 @@ public void testCcrRepositoryFetchesSnapshotShardSizeFromIndexShardStoreStats() assertThat(indexShardSnapshotStatus.getStage(), is(IndexShardSnapshotStatus.Stage.DONE)); assertThat( indexShardSnapshotStatus.getTotalSize(), - equalTo(indexStats.getIndexShards().get(shardId).getPrimary().getStore().getSizeInBytes()) + equalTo(indexStats.getIndexShards().get(shardId).getPrimary().getStore().sizeInBytes()) ); } @@ -594,7 +594,7 @@ public void testCcrRepositoryFetchesSnapshotShardSizeFromIndexShardStoreStats() assertThat( "Snapshot shard size fetched for follower shard [" + shardId + "] does not match leader store size", fetchedSnapshotShardSizes.get(shardId), - equalTo(indexStats.getIndexShards().get(shardId).getPrimary().getStore().getSizeInBytes()) + equalTo(indexStats.getIndexShards().get(shardId).getPrimary().getStore().sizeInBytes()) ); } diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java index 070338c07003c..097592a03d5d0 100644 --- a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java @@ -34,7 +34,7 @@ import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.admin.indices.stats.ShardStats; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.bulk.BulkProcessor2; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; @@ -1734,8 +1734,8 @@ private void putFollowerTemplate(String setting, String settingValue) { ComposableIndexTemplate cit = ComposableIndexTemplate.builder().indexPatterns(List.of("follower")).template(template).build(); assertAcked( followerClient().execute( - PutComposableIndexTemplateAction.INSTANCE, - new PutComposableIndexTemplateAction.Request("my-it").indexTemplate(cit) + TransportPutComposableIndexTemplateAction.TYPE, + new TransportPutComposableIndexTemplateAction.Request("my-it").indexTemplate(cit) ) ); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java index b90b203e2d29f..c99726803e00e 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java @@ -517,7 +517,7 @@ public IndexShardSnapshotStatus.Copy getShardSnapshotStatus(SnapshotId snapshotI final ShardRouting shardRouting = shardStats.getShardRouting(); if (shardRouting.shardId().id() == shardId.getId() && shardRouting.primary() && shardRouting.active()) { // we only care about the shard size here for shard allocation, populate the rest with dummy values - final long totalSize = shardStats.getStats().getStore().getSizeInBytes(); + final long totalSize = shardStats.getStats().getStore().sizeInBytes(); return IndexShardSnapshotStatus.newDone(0L, 0L, 1, 1, totalSize, totalSize, DUMMY_GENERATION); } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCcrStatsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCcrStatsAction.java index 6e5c2e4f396ca..e394f708b07f5 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCcrStatsAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCcrStatsAction.java @@ -13,7 +13,7 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import org.elasticsearch.xpack.ccr.Ccr; import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction; @@ -43,7 +43,7 @@ protected RestChannelConsumer prepareRequest(final RestRequest restRequest, fina request, new ThreadedActionListener<>( client.threadPool().executor(Ccr.CCR_THREAD_POOL_NAME), - new RestChunkedToXContentListener<>(channel) + new RestRefCountedChunkedToXContentListener<>(channel) ) ); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowInfoAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowInfoAction.java index 6607b532c56e0..4b3ac9f605d3f 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowInfoAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowInfoAction.java @@ -11,7 +11,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import org.elasticsearch.xpack.core.ccr.action.FollowInfoAction; import java.util.List; @@ -34,7 +34,7 @@ public String getName() { protected RestChannelConsumer prepareRequest(final RestRequest restRequest, final NodeClient client) { final FollowInfoAction.Request request = new FollowInfoAction.Request(); request.setFollowerIndices(Strings.splitStringByCommaToArray(restRequest.param("index"))); - return channel -> client.execute(FollowInfoAction.INSTANCE, request, new RestChunkedToXContentListener<>(channel)); + return channel -> client.execute(FollowInfoAction.INSTANCE, request, new RestRefCountedChunkedToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowStatsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowStatsAction.java index 07fdaef94637c..7592db0480b92 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowStatsAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowStatsAction.java @@ -14,7 +14,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import org.elasticsearch.xpack.ccr.Ccr; import org.elasticsearch.xpack.core.ccr.action.FollowStatsAction; @@ -45,7 +45,7 @@ protected RestChannelConsumer prepareRequest(final RestRequest restRequest, fina request, new ThreadedActionListener<>( client.threadPool().executor(Ccr.CCR_THREAD_POOL_NAME), - new RestChunkedToXContentListener<>(channel) + new RestRefCountedChunkedToXContentListener<>(channel) ) ); } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java index 047a2d6225035..ea4bc8c92047a 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java @@ -7,10 +7,10 @@ package org.elasticsearch.xpack; +import org.apache.logging.log4j.Level; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.action.admin.cluster.node.hotthreads.NodeHotThreads; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; @@ -38,6 +38,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.network.NetworkModule; @@ -59,6 +60,7 @@ import org.elasticsearch.indices.store.IndicesStore; import org.elasticsearch.license.LicenseSettings; import org.elasticsearch.license.LicensesMetadata; +import org.elasticsearch.monitor.jvm.HotThreads; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.SearchResponseUtils; @@ -421,24 +423,19 @@ private ClusterHealthStatus ensureColor( {} timed out: leader cluster state: {} - leader cluster hot threads: - {} leader cluster tasks: {} follower cluster state: {} - follower cluster hot threads: - {} follower cluster tasks: {}""", method, leaderClient().admin().cluster().prepareState().get().getState(), - getHotThreads(leaderClient()), - leaderClient().admin().cluster().preparePendingClusterTasks().get(), + ESIntegTestCase.getClusterPendingTasks(leaderClient()), followerClient().admin().cluster().prepareState().get().getState(), - getHotThreads(followerClient()), - followerClient().admin().cluster().preparePendingClusterTasks().get() + ESIntegTestCase.getClusterPendingTasks(followerClient()) ); + HotThreads.logLocalHotThreads(logger, Level.INFO, "hot threads at timeout", ReferenceDocs.LOGGING); fail("timed out waiting for " + color + " state"); } assertThat( @@ -450,19 +447,6 @@ private ClusterHealthStatus ensureColor( return actionGet.getStatus(); } - static String getHotThreads(Client client) { - return client.admin() - .cluster() - .prepareNodesHotThreads() - .setThreads(99999) - .setIgnoreIdleThreads(false) - .get() - .getNodes() - .stream() - .map(NodeHotThreads::getHotThreads) - .collect(Collectors.joining("\n")); - } - protected final Index resolveLeaderIndex(String index) { GetIndexResponse getIndexResponse = leaderClient().admin().indices().prepareGetIndex().setIndices(index).get(); assertTrue("index " + index + " not found", getIndexResponse.getSettings().containsKey(index)); diff --git a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotIT.java b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotIT.java index d1599c8b6a827..9d3821d64626f 100644 --- a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotIT.java +++ b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotIT.java @@ -144,14 +144,14 @@ public void testSnapshotAndRestoreWithNested() throws Exception { assertMappings(sourceIdx, requireRouting, true); SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch(sourceIdx).setQuery(QueryBuilders.idsQuery().addIds("" + randomIntBetween(0, builders.length))).get() + prepareSearch(sourceIdx).setQuery(QueryBuilders.idsQuery().addIds("" + randomIntBetween(0, builders.length))) ); assertTrue(e.toString().contains("_source only indices can't be searched or filtered")); // can-match phase pre-filters access to non-existing field assertHitCount(prepareSearch(sourceIdx).setQuery(QueryBuilders.termQuery("field1", "bar")), 0); // make sure deletes do not work String idToDelete = "" + randomIntBetween(0, builders.length); - expectThrows(ClusterBlockException.class, () -> client().prepareDelete(sourceIdx, idToDelete).setRouting("r" + idToDelete).get()); + expectThrows(ClusterBlockException.class, client().prepareDelete(sourceIdx, idToDelete).setRouting("r" + idToDelete)); internalCluster().ensureAtLeastNumDataNodes(2); setReplicaCount(1, sourceIdx); ensureGreen(sourceIdx); diff --git a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderIT.java b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderIT.java index f46c97e0ffda6..3e5be8b4ae2ff 100644 --- a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderIT.java +++ b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderIT.java @@ -11,7 +11,7 @@ import org.elasticsearch.action.admin.cluster.desirednodes.UpdateDesiredNodesAction; import org.elasticsearch.action.admin.cluster.desirednodes.UpdateDesiredNodesRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeType; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DesiredNode; @@ -343,8 +343,8 @@ public void testTemplateOverridden() { Template t = new Template(Settings.builder().putNull(DataTier.TIER_PREFERENCE).build(), null, null); ComposableIndexTemplate ct = ComposableIndexTemplate.builder().indexPatterns(Collections.singletonList(index)).template(t).build(); client().execute( - PutComposableIndexTemplateAction.INSTANCE, - new PutComposableIndexTemplateAction.Request("template").indexTemplate(ct) + TransportPutComposableIndexTemplateAction.TYPE, + new TransportPutComposableIndexTemplateAction.Request("template").indexTemplate(ct) ).actionGet(); indicesAdmin().prepareCreate(index).setWaitForActiveShards(0).get(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/DeleteLicenseAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/DeleteLicenseAction.java deleted file mode 100644 index 4a17e1a8d0ec5..0000000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/DeleteLicenseAction.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.license; - -import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.support.master.AcknowledgedResponse; - -public class DeleteLicenseAction extends ActionType { - - public static final DeleteLicenseAction INSTANCE = new DeleteLicenseAction(); - public static final String NAME = "cluster:admin/xpack/license/delete"; - - private DeleteLicenseAction() { - super(NAME, AcknowledgedResponse::readFrom); - } - -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/DeleteLicenseRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/DeleteLicenseRequestBuilder.java index 6ec8b29235a07..b5e34f20002d7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/DeleteLicenseRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/DeleteLicenseRequestBuilder.java @@ -17,6 +17,6 @@ public class DeleteLicenseRequestBuilder extends AcknowledgedRequestBuilder< DeleteLicenseRequestBuilder> { public DeleteLicenseRequestBuilder(ElasticsearchClient client) { - super(client, DeleteLicenseAction.INSTANCE, new DeleteLicenseRequest()); + super(client, TransportDeleteLicenseAction.TYPE, new DeleteLicenseRequest()); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java index bcfd58f2a545f..6c942ab911416 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java @@ -14,15 +14,14 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.RestApiVersion; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; import java.io.IOException; -import java.io.InputStream; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Base64; @@ -660,9 +659,11 @@ public static License fromSource(BytesReference bytes, XContentType xContentType } // EMPTY is safe here because we don't call namedObject try ( - InputStream byteStream = bytes.streamInput(); - XContentParser parser = xContentType.xContent() - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, byteStream) + XContentParser parser = XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG, + bytes, + xContentType + ) ) { License license = null; if (parser.nextToken() == XContentParser.Token.START_OBJECT) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestDeleteLicenseAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestDeleteLicenseAction.java index 4a9c5340dde6a..1a2ed66f6d5f5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestDeleteLicenseAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestDeleteLicenseAction.java @@ -41,6 +41,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC return channel -> client.admin() .cluster() - .execute(DeleteLicenseAction.INSTANCE, deleteLicenseRequest, new RestToXContentListener<>(channel)); + .execute(TransportDeleteLicenseAction.TYPE, deleteLicenseRequest, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportDeleteLicenseAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportDeleteLicenseAction.java index 606583e83b337..9d11cf2b59fec 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportDeleteLicenseAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportDeleteLicenseAction.java @@ -8,6 +8,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.AcknowledgedTransportMasterNodeAction; @@ -25,6 +26,7 @@ public class TransportDeleteLicenseAction extends AcknowledgedTransportMasterNodeAction { + public static final ActionType TYPE = ActionType.localOnly("cluster:admin/xpack/license/delete"); private final MutableLicenseService licenseService; @Inject @@ -37,7 +39,7 @@ public TransportDeleteLicenseAction( IndexNameExpressionResolver indexNameExpressionResolver ) { super( - DeleteLicenseAction.NAME, + TYPE.name(), transportService, clusterService, threadPool, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java index 59fd1db1e72f2..0ea0cba8198c5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java @@ -362,7 +362,7 @@ public FeatureSet(String name, boolean available, boolean enabled) { public FeatureSet(StreamInput in) throws IOException { this(in.readString(), readAvailable(in), in.readBoolean()); if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { - in.readMap(); // backcompat reading native code info, but no longer used here + in.readGenericMap(); // backcompat reading native code info, but no longer used here } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java index 0b092a4717d16..5bf5ecb445c57 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java @@ -13,23 +13,17 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.TimeValue; import org.elasticsearch.protocol.xpack.graph.Connection.ConnectionId; -import org.elasticsearch.protocol.xpack.graph.Connection.UnresolvedConnection; import org.elasticsearch.protocol.xpack.graph.Vertex.VertexId; -import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Collection; import java.util.HashMap; -import java.util.List; import java.util.Map; import static org.elasticsearch.action.search.ShardSearchFailure.readShardSearchFailure; -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; -import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; /** * Graph explore response holds a graph of {@link Vertex} and {@link Connection} objects @@ -201,50 +195,4 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "GraphExploreResponsenParser", - true, - args -> { - GraphExploreResponse result = new GraphExploreResponse(); - result.vertices = new HashMap<>(); - result.connections = new HashMap<>(); - - result.tookInMillis = (Long) args[0]; - result.timedOut = (Boolean) args[1]; - - @SuppressWarnings("unchecked") - List vertices = (List) args[2]; - @SuppressWarnings("unchecked") - List unresolvedConnections = (List) args[3]; - @SuppressWarnings("unchecked") - List failures = (List) args[4]; - for (Vertex vertex : vertices) { - // reverse-engineer if detailed stats were requested - - // mainly here for testing framework's equality tests - result.returnDetailedInfo = result.returnDetailedInfo || vertex.getFg() > 0; - result.vertices.put(vertex.getId(), vertex); - } - for (UnresolvedConnection unresolvedConnection : unresolvedConnections) { - Connection resolvedConnection = unresolvedConnection.resolve(vertices); - result.connections.put(resolvedConnection.getId(), resolvedConnection); - } - if (failures.size() > 0) { - result.shardFailures = failures.toArray(new ShardSearchFailure[failures.size()]); - } - return result; - } - ); - - static { - PARSER.declareLong(constructorArg(), TOOK); - PARSER.declareBoolean(constructorArg(), TIMED_OUT); - PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> Vertex.fromXContent(p), VERTICES); - PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> UnresolvedConnection.fromXContent(p), CONNECTIONS); - PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> ShardSearchFailure.fromXContent(p), FAILURES); - } - - public static GraphExploreResponse fromXContent(XContentParser parser) throws IOException { - return PARSER.apply(parser, null); - } - } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/Vertex.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/Vertex.java index 1b5f26d23aca6..c5bcf33e341c7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/Vertex.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/Vertex.java @@ -8,18 +8,13 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Objects; -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; -import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; - /** * A vertex in a graph response represents a single term (a field and value pair) * which appears in one or more documents found as part of the graph exploration. @@ -99,31 +94,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("VertexParser", true, args -> { - String field = (String) args[0]; - String term = (String) args[1]; - double weight = (Double) args[2]; - int depth = (Integer) args[3]; - Long optionalBg = (Long) args[4]; - Long optionalFg = (Long) args[5]; - long bg = optionalBg == null ? 0 : optionalBg; - long fg = optionalFg == null ? 0 : optionalFg; - return new Vertex(field, term, weight, depth, bg, fg); - }); - - static { - PARSER.declareString(constructorArg(), FIELD); - PARSER.declareString(constructorArg(), TERM); - PARSER.declareDouble(constructorArg(), WEIGHT); - PARSER.declareInt(constructorArg(), DEPTH); - PARSER.declareLong(optionalConstructorArg(), BG); - PARSER.declareLong(optionalConstructorArg(), FG); - } - - static Vertex fromXContent(XContentParser parser) throws IOException { - return PARSER.apply(parser, null); - } - /** * @return a {@link VertexId} object that uniquely identifies this Vertex */ diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchResponse.java index 80863c8b20cec..c6d673aec7d2a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchResponse.java @@ -9,27 +9,14 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xcontent.ObjectParser; -import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Objects; public class DeleteWatchResponse extends ActionResponse implements ToXContentObject { - private static final ObjectParser PARSER = new ObjectParser<>( - "x_pack_delete_watch_response", - DeleteWatchResponse::new - ); - static { - PARSER.declareString(DeleteWatchResponse::setId, new ParseField("_id")); - PARSER.declareLong(DeleteWatchResponse::setVersion, new ParseField("_version")); - PARSER.declareBoolean(DeleteWatchResponse::setFound, new ParseField("found")); - } - private String id; private long version; private boolean found; @@ -100,7 +87,4 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder.startObject().field("_id", id).field("_version", version).field("found", found).endObject(); } - public static DeleteWatchResponse fromXContent(XContentParser parser) throws IOException { - return PARSER.parse(parser, null); - } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/PutWatchResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/PutWatchResponse.java index f4b355b7ff1f1..5c1f53bef3ef0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/PutWatchResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/watcher/PutWatchResponse.java @@ -10,29 +10,14 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.seqno.SequenceNumbers; -import org.elasticsearch.xcontent.ObjectParser; -import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Objects; public class PutWatchResponse extends ActionResponse implements ToXContentObject { - private static final ObjectParser PARSER = new ObjectParser<>( - "x_pack_put_watch_response", - PutWatchResponse::new - ); - static { - PARSER.declareString(PutWatchResponse::setId, new ParseField("_id")); - PARSER.declareLong(PutWatchResponse::setVersion, new ParseField("_version")); - PARSER.declareLong(PutWatchResponse::setSeqNo, new ParseField("_seq_no")); - PARSER.declareLong(PutWatchResponse::setPrimaryTerm, new ParseField("_primary_term")); - PARSER.declareBoolean(PutWatchResponse::setCreated, new ParseField("created")); - } - private String id; private long version; private long seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; @@ -137,8 +122,4 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws .endObject(); } - public static PutWatchResponse fromXContent(XContentParser parser) throws IOException { - return PARSER.parse(parser, null); - } - } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/HealthApiFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/HealthApiFeatureSetUsage.java index 789a927dd6bf4..ac261270db6d6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/HealthApiFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/HealthApiFeatureSetUsage.java @@ -77,7 +77,7 @@ public class HealthApiFeatureSetUsage extends XPackFeatureSet.Usage { public HealthApiFeatureSetUsage(StreamInput in) throws IOException { super(in); - usageStats = in.readMap(); + usageStats = in.readGenericMap(); } public HealthApiFeatureSetUsage(boolean available, boolean enabled, @Nullable Counters stats) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java index 761390266231e..5960c4c6f79d2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java @@ -38,7 +38,6 @@ import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.license.ClusterStateLicenseService; -import org.elasticsearch.license.DeleteLicenseAction; import org.elasticsearch.license.GetBasicStatusAction; import org.elasticsearch.license.GetLicenseAction; import org.elasticsearch.license.GetTrialStatusAction; @@ -97,7 +96,6 @@ import org.elasticsearch.xpack.core.action.XPackUsageAction; import org.elasticsearch.xpack.core.action.XPackUsageFeatureAction; import org.elasticsearch.xpack.core.action.XPackUsageResponse; -import org.elasticsearch.xpack.core.async.DeleteAsyncResultAction; import org.elasticsearch.xpack.core.async.TransportDeleteAsyncResultAction; import org.elasticsearch.xpack.core.datatiers.DataTiersInfoTransportAction; import org.elasticsearch.xpack.core.datatiers.DataTiersUsageTransportAction; @@ -351,14 +349,14 @@ public Collection createComponents(PluginServices services) { actions.add(new ActionHandler<>(XPackUsageAction.INSTANCE, getUsageAction())); actions.add(new ActionHandler<>(PutLicenseAction.INSTANCE, TransportPutLicenseAction.class)); actions.add(new ActionHandler<>(GetLicenseAction.INSTANCE, TransportGetLicenseAction.class)); - actions.add(new ActionHandler<>(DeleteLicenseAction.INSTANCE, TransportDeleteLicenseAction.class)); + actions.add(new ActionHandler<>(TransportDeleteLicenseAction.TYPE, TransportDeleteLicenseAction.class)); actions.add(new ActionHandler<>(PostStartTrialAction.INSTANCE, TransportPostStartTrialAction.class)); actions.add(new ActionHandler<>(GetTrialStatusAction.INSTANCE, TransportGetTrialStatusAction.class)); actions.add(new ActionHandler<>(PostStartBasicAction.INSTANCE, TransportPostStartBasicAction.class)); actions.add(new ActionHandler<>(GetBasicStatusAction.INSTANCE, TransportGetBasicStatusAction.class)); actions.add(new ActionHandler<>(TransportGetFeatureUsageAction.TYPE, TransportGetFeatureUsageAction.class)); actions.add(new ActionHandler<>(TermsEnumAction.INSTANCE, TransportTermsEnumAction.class)); - actions.add(new ActionHandler<>(DeleteAsyncResultAction.INSTANCE, TransportDeleteAsyncResultAction.class)); + actions.add(new ActionHandler<>(TransportDeleteAsyncResultAction.TYPE, TransportDeleteAsyncResultAction.class)); actions.add(new ActionHandler<>(XPackInfoFeatureAction.DATA_TIERS, DataTiersInfoTransportAction.class)); actions.add(new ActionHandler<>(XPackUsageFeatureAction.DATA_TIERS, DataTiersUsageTransportAction.class)); actions.add(new ActionHandler<>(XPackUsageFeatureAction.DATA_STREAMS, DataStreamUsageTransportAction.class)); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java index 133819cd601d7..f10e7cf170bde 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java @@ -160,6 +160,12 @@ public Iterator> settings() { Property.NodeScope ); + /** Optional setting to prevent startup if required providers are not discovered at runtime */ + public static final Setting> FIPS_REQUIRED_PROVIDERS = Setting.stringListSetting( + "xpack.security.fips_mode.required_providers", + Property.NodeScope + ); + /** * Setting for enabling the enrollment process, ie the enroll APIs are enabled, and the initial cluster node generates and displays * enrollment tokens (for Kibana and sometimes for ES nodes) when starting up for the first time. diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/AbstractTransportGetResourcesAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/AbstractTransportGetResourcesAction.java index ce52af5513873..dea158b425071 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/AbstractTransportGetResourcesAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/AbstractTransportGetResourcesAction.java @@ -15,11 +15,11 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; @@ -32,14 +32,12 @@ import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; -import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.action.util.ExpandedIdsMatcher; import org.elasticsearch.xpack.core.action.util.QueryPage; import java.io.IOException; -import java.io.InputStream; import java.util.ArrayList; import java.util.HashSet; import java.util.List; @@ -109,11 +107,12 @@ protected void searchResources(AbstractGetResourcesRequest request, TaskId paren Set foundResourceIds = new HashSet<>(); long totalHitCount = response.getHits().getTotalHits().value; for (SearchHit hit : response.getHits().getHits()) { - BytesReference docSource = hit.getSourceRef(); try ( - InputStream stream = docSource.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, stream) + XContentParser parser = XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG.withRegistry(xContentRegistry), + hit.getSourceRef(), + XContentType.JSON + ) ) { Resource resource = parse(parser); String id = extractIdFromResource(resource); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/EnterpriseSearchFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/EnterpriseSearchFeatureSetUsage.java index 505d85c764b17..dc5169648e0cd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/EnterpriseSearchFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/EnterpriseSearchFeatureSetUsage.java @@ -23,7 +23,7 @@ public class EnterpriseSearchFeatureSetUsage extends XPackFeatureSet.Usage { static final TransportVersion BEHAVIORAL_ANALYTICS_TRANSPORT_VERSION = TransportVersions.V_8_8_1; - static final TransportVersion QUERY_RULES_TRANSPORT_VERSION = TransportVersions.V_8_500_046; + static final TransportVersion QUERY_RULES_TRANSPORT_VERSION = TransportVersions.V_8_500_061; public static final String SEARCH_APPLICATIONS = "search_applications"; public static final String ANALYTICS_COLLECTIONS = "analytics_collections"; @@ -54,14 +54,14 @@ public EnterpriseSearchFeatureSetUsage( public EnterpriseSearchFeatureSetUsage(StreamInput in) throws IOException { super(in); - this.searchApplicationsUsage = in.readMap(); + this.searchApplicationsUsage = in.readGenericMap(); Map analyticsCollectionsUsage = new HashMap<>(); Map queryRulesUsage = new HashMap<>(); if (in.getTransportVersion().onOrAfter(QUERY_RULES_TRANSPORT_VERSION)) { - analyticsCollectionsUsage = in.readMap(); - queryRulesUsage = in.readMap(); + analyticsCollectionsUsage = in.readGenericMap(); + queryRulesUsage = in.readGenericMap(); } else if (in.getTransportVersion().onOrAfter(BEHAVIORAL_ANALYTICS_TRANSPORT_VERSION)) { - analyticsCollectionsUsage = in.readMap(); + analyticsCollectionsUsage = in.readGenericMap(); } this.analyticsCollectionsUsage = analyticsCollectionsUsage; this.queryRulesUsage = queryRulesUsage; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/AsyncResultsService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/AsyncResultsService.java index 945084395448a..217274f963aec 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/AsyncResultsService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/AsyncResultsService.java @@ -14,7 +14,8 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.TriConsumer; +import org.elasticsearch.common.TriFunction; +import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.TimeValue; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.TaskManager; @@ -34,7 +35,7 @@ public class AsyncResultsService store; private final boolean updateInitialResultsInStore; - private final TriConsumer, TimeValue> addCompletionListener; + private final TriFunction, TimeValue, Boolean> addCompletionListener; /** * Creates async results service @@ -50,7 +51,7 @@ public AsyncResultsService( AsyncTaskIndexService store, boolean updateInitialResultsInStore, Class asyncTaskClass, - TriConsumer, TimeValue> addCompletionListener, + TriFunction, TimeValue, Boolean> addCompletionListener, TaskManager taskManager, ClusterService clusterService ) { @@ -128,11 +129,16 @@ private void getSearchResponseFromTask( if (expirationTimeMillis != -1) { task.setExpirationTime(expirationTimeMillis); } - addCompletionListener.apply( + boolean added = addCompletionListener.apply( task, listener.delegateFailure((l, response) -> sendFinalResponse(request, response, nowInMillis, l)), request.getWaitForCompletionTimeout() ); + if (added == false) { + // the task must have completed, since we cannot add a completion listener + assert store.getTaskAndCheckAuthentication(taskManager, searchId, asyncTaskClass) == null; + getSearchResponseFromIndex(searchId, request, nowInMillis, listener); + } } catch (Exception exc) { listener.onFailure(exc); } @@ -144,7 +150,17 @@ private void getSearchResponseFromIndex( long nowInMillis, ActionListener listener ) { - store.getResponse(searchId, true, listener.delegateFailure((l, response) -> sendFinalResponse(request, response, nowInMillis, l))); + store.getResponse(searchId, true, listener.delegateFailure((l, response) -> { + try { + sendFinalResponse(request, response, nowInMillis, l); + } finally { + if (response instanceof StoredAsyncResponse storedAsyncResponse + && storedAsyncResponse.getResponse() instanceof RefCounted refCounted) { + refCounted.decRef(); + } + } + + })); } private void sendFinalResponse(GetAsyncResultRequest request, Response response, long nowInMillis, ActionListener listener) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/AsyncTaskIndexService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/AsyncTaskIndexService.java index 746172fef18f3..c20300db84a3d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/AsyncTaskIndexService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/AsyncTaskIndexService.java @@ -43,6 +43,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParserUtils; +import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasables; import org.elasticsearch.core.Streams; import org.elasticsearch.index.engine.DocumentMissingException; @@ -215,7 +216,17 @@ public SecurityContext getSecurityContext() { * TODO: add limit for stored async response in EQL, and instead of this method use createResponse */ public void createResponseForEQL(String docId, Map headers, R response, ActionListener listener) { - indexResponse(docId, headers, response, false, listener); + indexResponse(docId, headers, null, response, false, listener); + } + + public void createResponseForEQL( + String docId, + Map headers, + Map> responseHeaders, + R response, + ActionListener listener + ) { + indexResponse(docId, headers, responseHeaders, response, false, listener); } /** @@ -223,7 +234,7 @@ public void createResponseForEQL(String docId, Map headers, R re * and the expected expiration time. */ public void createResponse(String docId, Map headers, R response, ActionListener listener) { - indexResponse(docId, headers, response, true, listener); + indexResponse(docId, headers, null, response, true, listener); } public void updateResponse( @@ -238,6 +249,7 @@ public void updateResponse( private void indexResponse( String docId, Map headers, + @Nullable Map> responseHeaders, R response, boolean limitToMaxResponseSize, ActionListener listener @@ -249,6 +261,10 @@ private void indexResponse( .startObject() .field(HEADERS_FIELD, headers) .field(EXPIRATION_TIME_FIELD, response.getExpirationTime()); + if (responseHeaders != null) { + source.field(RESPONSE_HEADERS_FIELD, responseHeaders); + } + addResultFieldAndFinish(response, source); clientWithOrigin.index(new IndexRequest(index).create(true).id(docId).source(buffer.bytes(), source.contentType()), listener); } catch (Exception e) { @@ -566,10 +582,13 @@ public int read() { }); TransportVersion version = TransportVersion.readVersion(new InputStreamStreamInput(encodedIn)); assert version.onOrBefore(TransportVersion.current()) : version + " >= " + TransportVersion.current(); + final StreamInput input; if (version.onOrAfter(TransportVersions.V_7_15_0)) { - encodedIn = CompressorFactory.COMPRESSOR.threadLocalInputStream(encodedIn); + input = CompressorFactory.COMPRESSOR.threadLocalStreamInput(encodedIn); + } else { + input = new InputStreamStreamInput(encodedIn); } - try (StreamInput in = new NamedWriteableAwareStreamInput(new InputStreamStreamInput(encodedIn), registry)) { + try (StreamInput in = new NamedWriteableAwareStreamInput(input, registry)) { in.setTransportVersion(version); return reader.read(in); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/DeleteAsyncResultAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/DeleteAsyncResultAction.java deleted file mode 100644 index e31ab544c293a..0000000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/DeleteAsyncResultAction.java +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.core.async; - -import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.support.master.AcknowledgedResponse; - -public class DeleteAsyncResultAction extends ActionType { - public static final DeleteAsyncResultAction INSTANCE = new DeleteAsyncResultAction(); - public static final String NAME = "indices:data/read/async_search/delete"; - - private DeleteAsyncResultAction() { - super(NAME, AcknowledgedResponse::readFrom); - } -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/StoredAsyncTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/StoredAsyncTask.java index 9f75ac0f5f564..69be247ca3608 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/StoredAsyncTask.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/StoredAsyncTask.java @@ -17,13 +17,15 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.function.Supplier; public abstract class StoredAsyncTask extends CancellableTask implements AsyncTask { private final AsyncExecutionId asyncExecutionId; private final Map originHeaders; private volatile long expirationTimeMillis; - private final List> completionListeners; + protected final List> completionListeners; + private boolean hasCompleted = false; @SuppressWarnings("this-escape") public StoredAsyncTask( @@ -66,8 +68,12 @@ public long getExpirationTimeMillis() { return expirationTimeMillis; } - public synchronized void addCompletionListener(ActionListener listener) { - completionListeners.add(listener); + public synchronized boolean addCompletionListener(Supplier> listenerSupplier) { + if (hasCompleted) { + return false; + } + completionListeners.add(listenerSupplier.get()); + return true; } public synchronized void removeCompletionListener(ActionListener listener) { @@ -77,17 +83,32 @@ public synchronized void removeCompletionListener(ActionListener liste /** * This method is called when the task is finished successfully before unregistering the task and storing the results */ - public synchronized void onResponse(Response response) { - for (ActionListener listener : completionListeners) { - listener.onResponse(response); + public void onResponse(Response response) { + List> completionListenersCopy; + synchronized (this) { + assert hasCompleted == false; + hasCompleted = true; + completionListenersCopy = new ArrayList<>(completionListeners); + completionListeners.clear(); + } + for (ActionListener listener : completionListenersCopy) { + response.incRef(); + ActionListener.respondAndRelease(listener, response); } } /** * This method is called when the task failed before unregistering the task and storing the results */ - public synchronized void onFailure(Exception e) { - for (ActionListener listener : completionListeners) { + public void onFailure(Exception e) { + List> completionListenersCopy; + synchronized (this) { + assert hasCompleted == false; + hasCompleted = true; + completionListenersCopy = new ArrayList<>(completionListeners); + completionListeners.clear(); + } + for (ActionListener listener : completionListenersCopy) { listener.onFailure(e); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/TransportDeleteAsyncResultAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/TransportDeleteAsyncResultAction.java index ac9645980c8fd..f3fa745053ba2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/TransportDeleteAsyncResultAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/TransportDeleteAsyncResultAction.java @@ -8,6 +8,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -26,6 +27,7 @@ import static org.elasticsearch.xpack.core.ClientHelper.ASYNC_SEARCH_ORIGIN; public class TransportDeleteAsyncResultAction extends HandledTransportAction { + public static final ActionType TYPE = ActionType.localOnly("indices:data/read/async_search/delete"); private final DeleteAsyncResultsService deleteResultsService; private final ClusterService clusterService; private final TransportService transportService; @@ -40,13 +42,7 @@ public TransportDeleteAsyncResultAction( ThreadPool threadPool, BigArrays bigArrays ) { - super( - DeleteAsyncResultAction.NAME, - transportService, - actionFilters, - DeleteAsyncResultRequest::new, - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + super(TYPE.name(), transportService, actionFilters, DeleteAsyncResultRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.transportService = transportService; this.clusterService = clusterService; AsyncTaskIndexService store = new AsyncTaskIndexService<>( @@ -73,7 +69,7 @@ protected void doExecute(Task task, DeleteAsyncResultRequest request, ActionList } else { transportService.sendRequest( node, - DeleteAsyncResultAction.NAME, + TYPE.name(), request, new ActionListenerResponseHandler<>(listener, AcknowledgedResponse::readFrom, EsExecutors.DIRECT_EXECUTOR_SERVICE) ); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditor.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditor.java index f4c3704cd65c1..8371f018a6bde 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditor.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditor.java @@ -11,7 +11,7 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.client.internal.OriginSettingClient; @@ -49,7 +49,7 @@ public abstract class AbstractAuditor { private final String nodeName; private final String auditIndex; private final String templateName; - private final Supplier templateSupplier; + private final Supplier templateSupplier; private final AbstractAuditMessageFactory messageFactory; private final AtomicBoolean hasLatestTemplate; @@ -67,11 +67,9 @@ protected AbstractAuditor( ) { this(client, auditIndex, templateConfig.getTemplateName(), () -> { - try { - return new PutComposableIndexTemplateAction.Request(templateConfig.getTemplateName()).indexTemplate( - ComposableIndexTemplate.parse( - JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, templateConfig.loadBytes()) - ) + try (var parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, templateConfig.loadBytes())) { + return new TransportPutComposableIndexTemplateAction.Request(templateConfig.getTemplateName()).indexTemplate( + ComposableIndexTemplate.parse(parser) ).masterNodeTimeout(MASTER_TIMEOUT); } catch (IOException e) { throw new ElasticsearchParseException("unable to parse composable template " + templateConfig.getTemplateName(), e); @@ -83,7 +81,7 @@ protected AbstractAuditor( OriginSettingClient client, String auditIndex, String templateName, - Supplier templateSupplier, + Supplier templateSupplier, String nodeName, AbstractAuditMessageFactory messageFactory, ClusterService clusterService diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/DeprecationIssue.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/DeprecationIssue.java index 8e0a445db6278..2a888506598d7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/DeprecationIssue.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/DeprecationIssue.java @@ -98,7 +98,7 @@ public DeprecationIssue(StreamInput in) throws IOException { url = in.readString(); details = in.readOptionalString(); resolveDuringRollingUpgrade = in.readBoolean(); - meta = in.readMap(); + meta = in.readGenericMap(); } public Level getLevel() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleIndexerAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleIndexerAction.java index 10ae1846e91dc..ef93ab914f08f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleIndexerAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleIndexerAction.java @@ -69,7 +69,7 @@ public Request() {} public Request(StreamInput in) throws IOException { super(in); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_040) && in.readBoolean()) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061) && in.readBoolean()) { this.indexStartTimeMillis = in.readVLong(); this.indexEndTimeMillis = in.readVLong(); } else { @@ -132,7 +132,7 @@ public Task createTask(long id, String type, String action, TaskId parentTaskId, @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_040)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { out.writeBoolean(true); out.writeVLong(indexStartTimeMillis); out.writeVLong(indexEndTimeMillis); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleShardStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleShardStatus.java index 8f254043cf7c2..2700ed844d063 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleShardStatus.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleShardStatus.java @@ -144,7 +144,7 @@ public DownsampleShardStatus(StreamInput in) throws IOException { numSent = in.readLong(); numIndexed = in.readLong(); numFailed = in.readLong(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_040) && in.readBoolean()) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061) && in.readBoolean()) { totalShardDocCount = in.readVLong(); lastSourceTimestamp = in.readVLong(); lastTargetTimestamp = in.readVLong(); @@ -254,7 +254,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(numSent); out.writeLong(numIndexed); out.writeLong(numFailed); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_040)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { out.writeBoolean(true); out.writeVLong(totalShardDocCount); out.writeVLong(lastSourceTimestamp); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/eql/EqlFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/eql/EqlFeatureSetUsage.java index 0f384ef2a66fa..6285840b66039 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/eql/EqlFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/eql/EqlFeatureSetUsage.java @@ -24,7 +24,7 @@ public class EqlFeatureSetUsage extends XPackFeatureSet.Usage { public EqlFeatureSetUsage(StreamInput in) throws IOException { super(in); - stats = in.readMap(); + stats = in.readGenericMap(); } public EqlFeatureSetUsage(Map stats) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/EsqlFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/EsqlFeatureSetUsage.java index 4f68ff1db6033..c5c5ff708c0b7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/EsqlFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/EsqlFeatureSetUsage.java @@ -24,7 +24,7 @@ public class EsqlFeatureSetUsage extends XPackFeatureSet.Usage { public EsqlFeatureSetUsage(StreamInput in) throws IOException { super(in); - stats = in.readMap(); + stats = in.readGenericMap(); } public EsqlFeatureSetUsage(Map stats) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleAction.java index 22a2c3a880ce5..818b45c2b5d00 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleAction.java @@ -91,7 +91,7 @@ public DownsampleAction(final DateHistogramInterval fixedInterval, final TimeVal public DownsampleAction(StreamInput in) throws IOException { this( new DateHistogramInterval(in), - in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_054) + in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061) ? TimeValue.parseTimeValue(in.readString(), WAIT_TIMEOUT_FIELD.getPreferredName()) : DEFAULT_WAIT_TIMEOUT ); @@ -100,7 +100,7 @@ public DownsampleAction(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { fixedInterval.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_054)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { out.writeString(waitTimeout.getStringRep()); } else { out.writeString(DEFAULT_WAIT_TIMEOUT.getStringRep()); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponse.java index 890045101c35c..c3c9fa88a1a96 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponse.java @@ -227,24 +227,28 @@ private IndexLifecycleExplainResponse( if (policyName == null) { throw new IllegalArgumentException("[" + POLICY_NAME_FIELD.getPreferredName() + "] cannot be null for managed index"); } - // check to make sure that step details are either all null or all set. - long numNull = Stream.of(phase, action, step).filter(Objects::isNull).count(); - if (numNull > 0 && numNull < 3) { - throw new IllegalArgumentException( - "managed index response must have complete step details [" - + PHASE_FIELD.getPreferredName() - + "=" - + phase - + ", " - + ACTION_FIELD.getPreferredName() - + "=" - + action - + ", " - + STEP_FIELD.getPreferredName() - + "=" - + step - + "]" - ); + + // If at least one detail is null, but not *all* are null + if (Stream.of(phase, action, step).anyMatch(Objects::isNull) + && Stream.of(phase, action, step).allMatch(Objects::isNull) == false) { + // …and it's not in the error step + if (ErrorStep.NAME.equals(step) == false) { + throw new IllegalArgumentException( + "managed index response must have complete step details [" + + PHASE_FIELD.getPreferredName() + + "=" + + phase + + ", " + + ACTION_FIELD.getPreferredName() + + "=" + + action + + ", " + + STEP_FIELD.getPreferredName() + + "=" + + step + + "]" + ); + } } } else { if (policyName != null diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicy.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicy.java index 322f64405ca1f..b8bb0233b2c91 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicy.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicy.java @@ -109,7 +109,7 @@ public LifecyclePolicy(StreamInput in) throws IOException { type = in.readNamedWriteable(LifecycleType.class); name = in.readString(); phases = in.readImmutableMap(Phase::new); - this.metadata = in.readMap(); + this.metadata = in.readGenericMap(); if (in.getTransportVersion().onOrAfter(TransportVersions.DEPRECATED_COMPONENT_TEMPLATES_ADDED)) { this.deprecated = in.readOptionalBoolean(); } else { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/action/ILMActions.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/action/ILMActions.java new file mode 100644 index 0000000000000..ed3c88ef86be1 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/action/ILMActions.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.core.ilm.action; + +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.master.AcknowledgedResponse; + +public enum ILMActions { + ; + public static final ActionType START = ActionType.localOnly("cluster:admin/ilm/start"); + public static final ActionType STOP = ActionType.localOnly("cluster:admin/ilm/stop"); + public static final ActionType RETRY = ActionType.localOnly("indices:admin/ilm/retry"); + public static final ActionType MOVE_TO_STEP = ActionType.localOnly("cluster:admin/ilm/_move/post"); + public static final ActionType PUT = ActionType.localOnly("cluster:admin/ilm/put"); +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/action/MoveToStepAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/action/MoveToStepAction.java deleted file mode 100644 index 15ff37c6881c2..0000000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/action/MoveToStepAction.java +++ /dev/null @@ -1,245 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - * - */ -package org.elasticsearch.xpack.core.ilm.action; - -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.support.master.AcknowledgedRequest; -import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.core.Nullable; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.ToXContentObject; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xpack.core.ilm.Step.StepKey; - -import java.io.IOException; -import java.util.Objects; - -public class MoveToStepAction extends ActionType { - public static final MoveToStepAction INSTANCE = new MoveToStepAction(); - public static final String NAME = "cluster:admin/ilm/_move/post"; - - protected MoveToStepAction() { - super(NAME, AcknowledgedResponse::readFrom); - } - - public static class Request extends AcknowledgedRequest implements ToXContentObject { - static final ParseField CURRENT_KEY_FIELD = new ParseField("current_step"); - static final ParseField NEXT_KEY_FIELD = new ParseField("next_step"); - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "move_to_step_request", - false, - (a, index) -> { - StepKey currentStepKey = (StepKey) a[0]; - PartialStepKey nextStepKey = (PartialStepKey) a[1]; - return new Request(index, currentStepKey, nextStepKey); - } - ); - - static { - // The current step uses the strict parser (meaning it requires all three parts of a stepkey) - PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, name) -> StepKey.parse(p), CURRENT_KEY_FIELD); - // The target step uses the parser that allows specifying only the phase, or the phase and action - PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, name) -> PartialStepKey.parse(p), NEXT_KEY_FIELD); - } - - private String index; - private StepKey currentStepKey; - private PartialStepKey nextStepKey; - - public Request(String index, StepKey currentStepKey, PartialStepKey nextStepKey) { - this.index = index; - this.currentStepKey = currentStepKey; - this.nextStepKey = nextStepKey; - } - - public Request(StreamInput in) throws IOException { - super(in); - this.index = in.readString(); - this.currentStepKey = StepKey.readFrom(in); - this.nextStepKey = new PartialStepKey(in); - } - - public Request() {} - - public String getIndex() { - return index; - } - - public StepKey getCurrentStepKey() { - return currentStepKey; - } - - public PartialStepKey getNextStepKey() { - return nextStepKey; - } - - @Override - public ActionRequestValidationException validate() { - return null; - } - - public static Request parseRequest(String name, XContentParser parser) { - return PARSER.apply(parser, name); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeString(index); - currentStepKey.writeTo(out); - nextStepKey.writeTo(out); - } - - @Override - public int hashCode() { - return Objects.hash(index, currentStepKey, nextStepKey); - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (obj.getClass() != getClass()) { - return false; - } - Request other = (Request) obj; - return Objects.equals(index, other.index) - && Objects.equals(currentStepKey, other.currentStepKey) - && Objects.equals(nextStepKey, other.nextStepKey); - } - - @Override - public String toString() { - return Strings.toString(this); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return builder.startObject() - .field(CURRENT_KEY_FIELD.getPreferredName(), currentStepKey) - .field(NEXT_KEY_FIELD.getPreferredName(), nextStepKey) - .endObject(); - } - - /** - * A PartialStepKey is like a {@link StepKey}, however, the action and step name are optional. - */ - public static class PartialStepKey implements Writeable, ToXContentObject { - private final String phase; - private final String action; - private final String name; - - public static final ParseField PHASE_FIELD = new ParseField("phase"); - public static final ParseField ACTION_FIELD = new ParseField("action"); - public static final ParseField NAME_FIELD = new ParseField("name"); - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "step_specification", - a -> new PartialStepKey((String) a[0], (String) a[1], (String) a[2]) - ); - static { - PARSER.declareString(ConstructingObjectParser.constructorArg(), PHASE_FIELD); - PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), ACTION_FIELD); - PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), NAME_FIELD); - } - - public PartialStepKey(String phase, @Nullable String action, @Nullable String name) { - this.phase = phase; - this.action = action; - this.name = name; - if (name != null && action == null) { - throw new IllegalArgumentException( - "phase; phase and action; or phase, action, and step must be provided, " - + "but a step name was specified without a corresponding action" - ); - } - } - - public PartialStepKey(StreamInput in) throws IOException { - this.phase = in.readString(); - this.action = in.readOptionalString(); - this.name = in.readOptionalString(); - if (name != null && action == null) { - throw new IllegalArgumentException( - "phase; phase and action; or phase, action, and step must be provided, " - + "but a step name was specified without a corresponding action" - ); - } - } - - public static PartialStepKey parse(XContentParser parser) { - return PARSER.apply(parser, null); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(phase); - out.writeOptionalString(action); - out.writeOptionalString(name); - } - - @Nullable - public String getPhase() { - return phase; - } - - @Nullable - public String getAction() { - return action; - } - - @Nullable - public String getName() { - return name; - } - - @Override - public int hashCode() { - return Objects.hash(phase, action, name); - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - PartialStepKey other = (PartialStepKey) obj; - return Objects.equals(phase, other.phase) && Objects.equals(action, other.action) && Objects.equals(name, other.name); - } - - @Override - public String toString() { - return Strings.toString(this); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.field(PHASE_FIELD.getPreferredName(), phase); - if (action != null) { - builder.field(ACTION_FIELD.getPreferredName(), action); - } - if (name != null) { - builder.field(NAME_FIELD.getPreferredName(), name); - } - builder.endObject(); - return builder; - } - } - } -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/action/PutLifecycleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/action/PutLifecycleAction.java deleted file mode 100644 index 62aeba41d81b8..0000000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/action/PutLifecycleAction.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.core.ilm.action; - -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.ValidateActions; -import org.elasticsearch.action.support.master.AcknowledgedRequest; -import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.ToXContentObject; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xpack.core.ilm.LifecyclePolicy; -import org.elasticsearch.xpack.core.ilm.TimeseriesLifecycleType; - -import java.io.IOException; -import java.util.Objects; - -public class PutLifecycleAction extends ActionType { - public static final PutLifecycleAction INSTANCE = new PutLifecycleAction(); - public static final String NAME = "cluster:admin/ilm/put"; - - protected PutLifecycleAction() { - super(NAME, AcknowledgedResponse::readFrom); - } - - public static class Request extends AcknowledgedRequest implements ToXContentObject { - - public static final ParseField POLICY_FIELD = new ParseField("policy"); - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "put_lifecycle_request", - a -> new Request((LifecyclePolicy) a[0]) - ); - static { - PARSER.declareObject(ConstructingObjectParser.constructorArg(), LifecyclePolicy::parse, POLICY_FIELD); - } - - private LifecyclePolicy policy; - - public Request(LifecyclePolicy policy) { - this.policy = policy; - } - - public Request(StreamInput in) throws IOException { - super(in); - policy = new LifecyclePolicy(in); - } - - public Request() {} - - public LifecyclePolicy getPolicy() { - return policy; - } - - @Override - public ActionRequestValidationException validate() { - ActionRequestValidationException err = null; - try { - this.policy.validate(); - } catch (IllegalArgumentException iae) { - err = ValidateActions.addValidationError(iae.getMessage(), null); - } - String phaseTimingErr = TimeseriesLifecycleType.validateMonotonicallyIncreasingPhaseTimings(this.policy.getPhases().values()); - if (Strings.hasText(phaseTimingErr)) { - err = new ActionRequestValidationException(); - err.addValidationError(phaseTimingErr); - } - return err; - } - - public static Request parseRequest(String name, XContentParser parser) { - return PARSER.apply(parser, name); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.field(POLICY_FIELD.getPreferredName(), policy); - builder.endObject(); - return builder; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - policy.writeTo(out); - } - - @Override - public int hashCode() { - return Objects.hash(policy); - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (obj.getClass() != getClass()) { - return false; - } - Request other = (Request) obj; - return Objects.equals(policy, other.policy); - } - - @Override - public String toString() { - return Strings.toString(this, true, true); - } - - } - -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/action/PutLifecycleRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/action/PutLifecycleRequest.java new file mode 100644 index 0000000000000..fe6754b735ef7 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/action/PutLifecycleRequest.java @@ -0,0 +1,111 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.core.ilm.action; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ValidateActions; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ilm.LifecyclePolicy; +import org.elasticsearch.xpack.core.ilm.TimeseriesLifecycleType; + +import java.io.IOException; +import java.util.Objects; + +public class PutLifecycleRequest extends AcknowledgedRequest implements ToXContentObject { + + public static final ParseField POLICY_FIELD = new ParseField("policy"); + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "put_lifecycle_request", + a -> new PutLifecycleRequest((LifecyclePolicy) a[0]) + ); + + static { + PARSER.declareObject(ConstructingObjectParser.constructorArg(), LifecyclePolicy::parse, POLICY_FIELD); + } + + private LifecyclePolicy policy; + + public PutLifecycleRequest(LifecyclePolicy policy) { + this.policy = policy; + } + + public PutLifecycleRequest(StreamInput in) throws IOException { + super(in); + policy = new LifecyclePolicy(in); + } + + public PutLifecycleRequest() {} + + public LifecyclePolicy getPolicy() { + return policy; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException err = null; + try { + this.policy.validate(); + } catch (IllegalArgumentException iae) { + err = ValidateActions.addValidationError(iae.getMessage(), null); + } + String phaseTimingErr = TimeseriesLifecycleType.validateMonotonicallyIncreasingPhaseTimings(this.policy.getPhases().values()); + if (Strings.hasText(phaseTimingErr)) { + err = new ActionRequestValidationException(); + err.addValidationError(phaseTimingErr); + } + return err; + } + + public static PutLifecycleRequest parseRequest(String name, XContentParser parser) { + return PARSER.apply(parser, name); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(POLICY_FIELD.getPreferredName(), policy); + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + policy.writeTo(out); + } + + @Override + public int hashCode() { + return Objects.hash(policy); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + PutLifecycleRequest other = (PutLifecycleRequest) obj; + return Objects.equals(policy, other.policy); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/action/RetryAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/action/RetryAction.java deleted file mode 100644 index 5e20a17f31c9e..0000000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/action/RetryAction.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.core.ilm.action; - -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.IndicesRequest; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.action.support.master.AcknowledgedRequest; -import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Objects; - -public class RetryAction extends ActionType { - public static final RetryAction INSTANCE = new RetryAction(); - public static final String NAME = "indices:admin/ilm/retry"; - - protected RetryAction() { - super(NAME, AcknowledgedResponse::readFrom); - } - - public static class Request extends AcknowledgedRequest implements IndicesRequest.Replaceable { - private String[] indices = Strings.EMPTY_ARRAY; - private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen(); - - public Request(String... indices) { - this.indices = indices; - } - - public Request(StreamInput in) throws IOException { - super(in); - this.indices = in.readStringArray(); - this.indicesOptions = IndicesOptions.readIndicesOptions(in); - } - - public Request() {} - - @Override - public Request indices(String... indices) { - this.indices = indices; - return this; - } - - @Override - public String[] indices() { - return indices; - } - - @Override - public IndicesOptions indicesOptions() { - return indicesOptions; - } - - public Request indicesOptions(IndicesOptions indicesOptions) { - this.indicesOptions = indicesOptions; - return this; - } - - @Override - public ActionRequestValidationException validate() { - return null; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeStringArray(indices); - indicesOptions.writeIndicesOptions(out); - } - - @Override - public int hashCode() { - return Objects.hash(Arrays.hashCode(indices), indicesOptions); - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (obj.getClass() != getClass()) { - return false; - } - Request other = (Request) obj; - return Objects.deepEquals(indices, other.indices) && Objects.equals(indicesOptions, other.indicesOptions); - } - - } -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/action/StartILMAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/action/StartILMAction.java deleted file mode 100644 index b445c63a5296e..0000000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/action/StartILMAction.java +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.core.ilm.action; - -import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.support.master.AcknowledgedResponse; - -public class StartILMAction extends ActionType { - public static final StartILMAction INSTANCE = new StartILMAction(); - public static final String NAME = "cluster:admin/ilm/start"; - - protected StartILMAction() { - super(NAME, AcknowledgedResponse::readFrom); - } -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/action/StopILMAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/action/StopILMAction.java deleted file mode 100644 index ee22a9b930c71..0000000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/action/StopILMAction.java +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.core.ilm.action; - -import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.support.master.AcknowledgedResponse; - -public class StopILMAction extends ActionType { - public static final StopILMAction INSTANCE = new StopILMAction(); - public static final String NAME = "cluster:admin/ilm/stop"; - - protected StopILMAction() { - super(NAME, AcknowledgedResponse::readFrom); - } -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/PutInferenceModelAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/PutInferenceModelAction.java index e6e4ea1001f68..d09b96f897e06 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/PutInferenceModelAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/PutInferenceModelAction.java @@ -20,6 +20,8 @@ import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.utils.MlStrings; import java.io.IOException; import java.util.Objects; @@ -82,7 +84,16 @@ public void writeTo(StreamOutput out) throws IOException { @Override public ActionRequestValidationException validate() { - return null; + ActionRequestValidationException validationException = new ActionRequestValidationException(); + if (MlStrings.isValidId(this.modelId) == false) { + validationException.addValidationError(Messages.getMessage(Messages.INVALID_ID, "model_id", this.modelId)); + } + + if (validationException.validationErrors().isEmpty() == false) { + return validationException; + } else { + return null; + } } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java index ef2e5324678a4..98c31dd9106d0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java @@ -57,10 +57,10 @@ public MachineLearningFeatureSetUsage( public MachineLearningFeatureSetUsage(StreamInput in) throws IOException { super(in); - this.jobsUsage = in.readMap(); - this.datafeedsUsage = in.readMap(); - this.analyticsUsage = in.readMap(); - this.inferenceUsage = in.readMap(); + this.jobsUsage = in.readGenericMap(); + this.datafeedsUsage = in.readGenericMap(); + this.analyticsUsage = in.readGenericMap(); + this.inferenceUsage = in.readGenericMap(); this.nodeCount = in.readInt(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlTasks.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlTasks.java index 7cef2bed04ce3..6209ead0cc6a1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlTasks.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlTasks.java @@ -229,9 +229,13 @@ public static SnapshotUpgradeState getSnapshotUpgradeState(@Nullable PersistentT public static DatafeedState getDatafeedState(String datafeedId, @Nullable PersistentTasksCustomMetadata tasks) { PersistentTasksCustomMetadata.PersistentTask task = getDatafeedTask(datafeedId, tasks); + return getDatafeedState(task); + } + + public static DatafeedState getDatafeedState(PersistentTasksCustomMetadata.PersistentTask task) { if (task == null) { // If we haven't started a datafeed then there will be no persistent task, - // which is the same as if the datafeed was't started + // which is the same as if the datafeed wasn't started return DatafeedState.STOPPED; } DatafeedState taskState = (DatafeedState) task.getState(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CoordinatedInferenceAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CoordinatedInferenceAction.java index 8ff0c1179ea61..03270e0dda0f7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CoordinatedInferenceAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CoordinatedInferenceAction.java @@ -125,8 +125,8 @@ public Request(StreamInput in) throws IOException { this.modelId = in.readString(); this.requestModelType = in.readEnum(RequestModelType.class); this.inputs = in.readOptionalStringCollectionAsList(); - this.taskSettings = in.readMap(); - this.objectsToInfer = in.readOptionalCollectionAsList(StreamInput::readMap); + this.taskSettings = in.readGenericMap(); + this.objectsToInfer = in.readOptionalCollectionAsList(StreamInput::readGenericMap); this.inferenceConfigUpdate = in.readOptionalNamedWriteable(InferenceConfigUpdate.class); this.previouslyLicensed = in.readOptionalBoolean(); this.inferenceTimeout = in.readOptionalTimeValue(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/InferModelAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/InferModelAction.java index 296aec12b1a63..d38897ad3e5e7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/InferModelAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/InferModelAction.java @@ -176,7 +176,7 @@ public static Request forTextInput( public Request(StreamInput in) throws IOException { super(in); this.id = in.readString(); - this.objectsToInfer = in.readCollectionAsImmutableList(StreamInput::readMap); + this.objectsToInfer = in.readCollectionAsImmutableList(StreamInput::readGenericMap); this.update = in.readNamedWriteable(InferenceConfigUpdate.class); this.previouslyLicensed = in.readBoolean(); if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0)) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/InferTrainedModelDeploymentAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/InferTrainedModelDeploymentAction.java index 806f935d5f394..99d190a786564 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/InferTrainedModelDeploymentAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/InferTrainedModelDeploymentAction.java @@ -147,7 +147,7 @@ public static Request forTextInput(String id, InferenceConfigUpdate update, List public Request(StreamInput in) throws IOException { super(in); id = in.readString(); - docs = in.readCollectionAsImmutableList(StreamInput::readMap); + docs = in.readCollectionAsImmutableList(StreamInput::readGenericMap); update = in.readOptionalNamedWriteable(InferenceConfigUpdate.class); inferenceTimeout = in.readOptionalTimeValue(); if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0)) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/IsolateDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/IsolateDatafeedAction.java index 630c7b60c961b..72af61d8dafb2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/IsolateDatafeedAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/IsolateDatafeedAction.java @@ -14,10 +14,8 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.tasks.Task; -import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; @@ -45,23 +43,6 @@ private IsolateDatafeedAction() { public static class Request extends BaseTasksRequest implements ToXContentObject { - public static final ObjectParser PARSER = new ObjectParser<>(NAME, Request::new); - static { - PARSER.declareString((request, datafeedId) -> request.datafeedId = datafeedId, DatafeedConfig.ID); - } - - public static Request fromXContent(XContentParser parser) { - return parseRequest(null, parser); - } - - public static Request parseRequest(String datafeedId, XContentParser parser) { - Request request = PARSER.apply(parser, null); - if (datafeedId != null) { - request.datafeedId = datafeedId; - } - return request; - } - private String datafeedId; public Request(String datafeedId) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/MlInfoAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/MlInfoAction.java index b5cb34812601e..3c69056625e89 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/MlInfoAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/MlInfoAction.java @@ -59,7 +59,7 @@ public Response() { public Response(StreamInput in) throws IOException { super(in); - info = in.readMap(); + info = in.readGenericMap(); } public Map getInfo() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java index 9fad95d49158e..c50342dd78157 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java @@ -44,10 +44,6 @@ private OpenJobAction() { public static class Request extends MasterNodeRequest implements ToXContentObject { - public static Request fromXContent(XContentParser parser) { - return parseRequest(null, parser); - } - public static Request parseRequest(String jobId, XContentParser parser) { JobParams jobParams = JobParams.PARSER.apply(parser, null); if (jobId != null) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PreviewDataFrameAnalyticsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PreviewDataFrameAnalyticsAction.java index 97cbc29b3cf44..86c794084388f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PreviewDataFrameAnalyticsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PreviewDataFrameAnalyticsAction.java @@ -143,7 +143,7 @@ public Response(List> featureValues) { public Response(StreamInput in) throws IOException { super(in); - this.featureValues = in.readCollectionAsList(StreamInput::readMap); + this.featureValues = in.readCollectionAsList(StreamInput::readGenericMap); } public List> getFeatureValues() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelDefinitionPartAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelDefinitionPartAction.java index 61b39e40a065c..5341efeec1094 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelDefinitionPartAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelDefinitionPartAction.java @@ -91,7 +91,7 @@ public Request(StreamInput in) throws IOException { this.part = in.readVInt(); this.totalDefinitionLength = in.readVLong(); this.totalParts = in.readVInt(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_043)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { this.allowOverwriting = in.readBoolean(); } else { this.allowOverwriting = false; @@ -148,7 +148,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(part); out.writeVLong(totalDefinitionLength); out.writeVInt(totalParts); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_043)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { out.writeBoolean(allowOverwriting); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelVocabularyAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelVocabularyAction.java index 71d4ebdcb6ea5..c153cbc2c039b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelVocabularyAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelVocabularyAction.java @@ -91,7 +91,7 @@ public Request(StreamInput in) throws IOException { } else { this.scores = List.of(); } - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_043)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { this.allowOverwriting = in.readBoolean(); } else { this.allowOverwriting = false; @@ -139,7 +139,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { out.writeCollection(scores, StreamOutput::writeDouble); } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_043)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { out.writeBoolean(allowOverwriting); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java index ad9ab7088fef5..c05c73bc31ddf 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java @@ -374,8 +374,10 @@ public static class TaskParams implements MlTaskParams, Writeable, ToXContentObj // TODO add support for other roles? If so, it may have to be an instance method... // NOTE, whatever determines assignment should not be dynamically set on the node // Otherwise assignment logic might fail - public static boolean mayAssignToNode(DiscoveryNode node) { - return node.getRoles().contains(DiscoveryNodeRole.ML_ROLE) && MlConfigVersion.fromNode(node).onOrAfter(VERSION_INTRODUCED); + public static boolean mayAssignToNode(@Nullable DiscoveryNode node) { + return node != null + && node.getRoles().contains(DiscoveryNodeRole.ML_ROLE) + && MlConfigVersion.fromNode(node).onOrAfter(VERSION_INTRODUCED); } public static final MlConfigVersion VERSION_INTRODUCED = MlConfigVersion.V_8_0_0; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/AggProvider.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/AggProvider.java index d994647743634..aa7e21fcd1aed 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/AggProvider.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/AggProvider.java @@ -114,7 +114,7 @@ static AggProvider fromParsedAggs(AggregatorFactories.Builder parsedAggs) throws static AggProvider fromStream(StreamInput in) throws IOException { return new AggProvider( - in.readMap(), + in.readGenericMap(), in.readOptionalWriteable(AggregatorFactories.Builder::new), in.readException(), in.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0) ? in.readBoolean() : false diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java index db1b66982f105..f818199ac1ef9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java @@ -300,7 +300,7 @@ public DatafeedConfig(StreamInput in) throws IOException { delayedDataCheckConfig = in.readOptionalWriteable(DelayedDataCheckConfig::new); maxEmptySearches = in.readOptionalVInt(); indicesOptions = IndicesOptions.readIndicesOptions(in); - runtimeMappings = in.readMap(); + runtimeMappings = in.readGenericMap(); } /** @@ -815,7 +815,7 @@ public Builder(StreamInput in) throws IOException { if (in.readBoolean()) { indicesOptions = IndicesOptions.readIndicesOptions(in); } - runtimeMappings = in.readMap(); + runtimeMappings = in.readGenericMap(); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java index 6b34277ac49ba..35eb672fe69cb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java @@ -166,7 +166,7 @@ public DatafeedUpdate(StreamInput in) throws IOException { delayedDataCheckConfig = in.readOptionalWriteable(DelayedDataCheckConfig::new); maxEmptySearches = in.readOptionalInt(); indicesOptions = in.readBoolean() ? IndicesOptions.readIndicesOptions(in) : null; - this.runtimeMappings = in.readBoolean() ? in.readMap() : null; + this.runtimeMappings = in.readBoolean() ? in.readGenericMap() : null; } /** diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsConfig.java index bff15c016af0e..4c9028f64c2fd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsConfig.java @@ -192,7 +192,7 @@ public DataFrameAnalyticsConfig(StreamInput in) throws IOException { this.allowLazyStart = in.readBoolean(); this.maxNumThreads = in.readVInt(); if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { - Map readMeta = in.readMap(); + Map readMeta = in.readGenericMap(); this.meta = readMeta == null ? null : Collections.unmodifiableMap(readMeta); } else { this.meta = null; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsConfigUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsConfigUpdate.java index 330683981d1ca..403e4f6de0e36 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsConfigUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsConfigUpdate.java @@ -84,7 +84,7 @@ public DataFrameAnalyticsConfigUpdate(StreamInput in) throws IOException { this.allowLazyStart = in.readOptionalBoolean(); this.maxNumThreads = in.readOptionalVInt(); if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { - Map readMeta = in.readMap(); + Map readMeta = in.readGenericMap(); this.meta = readMeta == null ? null : Collections.unmodifiableMap(readMeta); } else { this.meta = null; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsSource.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsSource.java index 9c326f067caf7..b7564ff5ecbf5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsSource.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsSource.java @@ -107,7 +107,7 @@ public DataFrameAnalyticsSource(StreamInput in) throws IOException { index = in.readStringArray(); queryProvider = QueryProvider.fromStream(in); sourceFiltering = in.readOptionalWriteable(FetchSourceContext::readFrom); - runtimeMappings = in.readMap(); + runtimeMappings = in.readGenericMap(); } public DataFrameAnalyticsSource(DataFrameAnalyticsSource other) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ModelAliasMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/ModelAliasMetadata.java similarity index 99% rename from x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ModelAliasMetadata.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/ModelAliasMetadata.java index 0847479489ec2..1d6c5e564a442 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ModelAliasMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/ModelAliasMetadata.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.ml.inference; +package org.elasticsearch.xpack.core.ml.inference; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfig.java index b469d35b90383..c80027a7234f3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfig.java @@ -271,7 +271,7 @@ public TrainedModelConfig(StreamInput in) throws IOException { createTime = in.readInstant(); definition = in.readOptionalWriteable(LazyModelDefinition::fromStreamInput); tags = in.readCollectionAsImmutableList(StreamInput::readString); - metadata = in.readMap(); + metadata = in.readGenericMap(); input = new TrainedModelInput(in); modelSize = in.readVLong(); estimatedOperations = in.readVLong(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/RoutingInfo.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/RoutingInfo.java index fd2f3627e3fb1..826b0785aa563 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/RoutingInfo.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/RoutingInfo.java @@ -86,6 +86,10 @@ public int getTargetAllocations() { return targetAllocations; } + public int getFailedAllocations() { + return state == RoutingState.FAILED ? targetAllocations : 0; + } + public RoutingState getState() { return state; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java index d27d325a5c596..8147dabda7b48 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java @@ -287,6 +287,10 @@ public int totalTargetAllocations() { return nodeRoutingTable.values().stream().mapToInt(RoutingInfo::getTargetAllocations).sum(); } + public int totalFailedAllocations() { + return nodeRoutingTable.values().stream().mapToInt(RoutingInfo::getFailedAllocations).sum(); + } + @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentMetadata.java similarity index 98% rename from x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentMetadata.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentMetadata.java index aabedfc4351b5..36fec9ec7b243 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentMetadata.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.ml.inference.assignment; +package org.elasticsearch.xpack.core.ml.inference.assignment; import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.ResourceNotFoundException; @@ -23,7 +23,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import java.io.IOException; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentUtils.java similarity index 76% rename from x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentUtils.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentUtils.java index 3640d8dcb2808..fa0ce4a095ba0 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentUtils.java @@ -5,15 +5,10 @@ * 2.0. */ -package org.elasticsearch.xpack.ml.inference.assignment; +package org.elasticsearch.xpack.core.ml.inference.assignment; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingInfo; -import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingInfoUpdate; -import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingState; -import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingStateAndReason; -import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; -import org.elasticsearch.xpack.ml.inference.ModelAliasMetadata; +import org.elasticsearch.xpack.core.ml.inference.ModelAliasMetadata; import java.util.List; import java.util.Optional; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearningToRankConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearningToRankConfig.java index 7a51fb9a0fce3..293769371999e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearningToRankConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearningToRankConfig.java @@ -98,7 +98,7 @@ public LearningToRankConfig( public LearningToRankConfig(StreamInput in) throws IOException { super(in); this.featureExtractorBuilders = in.readNamedWriteableCollectionAsList(LearningToRankFeatureExtractorBuilder.class); - this.paramsDefaults = in.readMap(); + this.paramsDefaults = in.readGenericMap(); } public List getFeatureExtractorBuilders() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ModelPackageConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ModelPackageConfig.java index 536cce95df527..14bb5499ac4ab 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ModelPackageConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ModelPackageConfig.java @@ -173,8 +173,8 @@ public ModelPackageConfig(StreamInput in) throws IOException { this.createTime = in.readOptionalInstant(); this.size = in.readVLong(); this.sha256 = in.readOptionalString(); - this.inferenceConfigSource = in.readMap(); - this.metadata = in.readMap(); + this.inferenceConfigSource = in.readGenericMap(); + this.metadata = in.readGenericMap(); this.modelType = in.readOptionalString(); this.tags = in.readOptionalCollectionAsList(StreamInput::readString); this.vocabularyFile = in.readOptionalString(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/CategorizationAnalyzerConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/CategorizationAnalyzerConfig.java index ac934a71ec311..0337000a201f2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/CategorizationAnalyzerConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/CategorizationAnalyzerConfig.java @@ -294,11 +294,14 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws */ public Map asMap(NamedXContentRegistry xContentRegistry) throws IOException { String strRep = Strings.toString(this); - XContentParser parser = JsonXContent.jsonXContent.createParser( - XContentParserConfiguration.EMPTY.withRegistry(xContentRegistry).withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), - strRep - ); - return parser.mapOrdered(); + try ( + XContentParser parser = JsonXContent.jsonXContent.createParser( + XContentParserConfiguration.EMPTY.withRegistry(xContentRegistry).withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), + strRep + ) + ) { + return parser.mapOrdered(); + } } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java index 081ddb6a395a8..1686cdea4340a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java @@ -291,7 +291,7 @@ public Job(StreamInput in) throws IOException { modelSnapshotRetentionDays = in.readOptionalLong(); dailyModelSnapshotRetentionAfterDays = in.readOptionalLong(); resultsRetentionDays = in.readOptionalLong(); - Map readCustomSettings = in.readMap(); + Map readCustomSettings = in.readGenericMap(); customSettings = readCustomSettings == null ? null : Collections.unmodifiableMap(readCustomSettings); modelSnapshotId = in.readOptionalString(); if (in.readBoolean()) { @@ -843,7 +843,7 @@ public Builder(StreamInput in) throws IOException { modelSnapshotRetentionDays = in.readOptionalLong(); dailyModelSnapshotRetentionAfterDays = in.readOptionalLong(); resultsRetentionDays = in.readOptionalLong(); - customSettings = in.readMap(); + customSettings = in.readGenericMap(); modelSnapshotId = in.readOptionalString(); if (in.readBoolean()) { modelSnapshotMinVersion = MlConfigVersion.readVersion(in); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java index 6dcdad2dc0a8d..3ba40c70d0701 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java @@ -178,7 +178,7 @@ public JobUpdate(StreamInput in) throws IOException { categorizationFilters = null; } perPartitionCategorizationConfig = in.readOptionalWriteable(PerPartitionCategorizationConfig::new); - customSettings = in.readMap(); + customSettings = in.readGenericMap(); modelSnapshotId = in.readOptionalString(); if (in.readBoolean()) { jobVersion = MlConfigVersion.readVersion(in); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java index 36b4c0f1815ff..ad7a6b998fafd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java @@ -277,6 +277,8 @@ public final class Messages { public static final String REST_CANNOT_DELETE_FORECAST_IN_CURRENT_STATE = "Forecast(s) [{0}] for job [{1}] needs to be either FAILED or FINISHED to be deleted"; public static final String FIELD_CANNOT_BE_NULL = "Field [{0}] cannot be null"; + public static final String MODEL_ID_MATCHES_EXISTING_MODEL_IDS_BUT_MUST_NOT = + "Model IDs must be unique. Requested model ID [{}] matches existing model IDs but must not."; private Messages() {} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java index 088275ddabb3e..14097f5d363b4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java @@ -11,8 +11,8 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.action.admin.indices.mapping.put.TransportPutMappingAction; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexAbstraction; @@ -183,7 +183,7 @@ protected void doRun() throws Exception { executeAsyncWithOrigin( client, ML_ORIGIN, - PutMappingAction.INSTANCE, + TransportPutMappingAction.TYPE, putMappingRequest, listener.delegateFailureAndWrap((delegate, response) -> { if (response.isAcknowledged()) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshot.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshot.java index 75fd3e09f0a20..bf62a8a267f84 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshot.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshot.java @@ -12,13 +12,12 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; -import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ObjectParser.ValueType; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.common.time.TimeUtils; @@ -26,7 +25,6 @@ import org.elasticsearch.xpack.core.ml.job.config.Job; import java.io.IOException; -import java.io.InputStream; import java.util.ArrayList; import java.util.Date; import java.util.List; @@ -368,9 +366,11 @@ public static String v54DocumentId(String jobId, String snapshotId) { public static ModelSnapshot fromJson(BytesReference bytesReference) { try ( - InputStream stream = bytesReference.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream) + XContentParser parser = XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG, + bytesReference, + XContentType.JSON + ) ) { return LENIENT_PARSER.apply(parser, null).build(); } catch (IOException e) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAlias.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAlias.java index 5d1b2ef9a08e5..016540815fb0a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAlias.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAlias.java @@ -19,7 +19,7 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; @@ -328,12 +328,10 @@ public static void installIndexTemplateIfRequired( return; } - PutComposableIndexTemplateAction.Request request; - try { - request = new PutComposableIndexTemplateAction.Request(templateConfig.getTemplateName()).indexTemplate( - ComposableIndexTemplate.parse( - JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, templateConfig.loadBytes()) - ) + TransportPutComposableIndexTemplateAction.Request request; + try (var parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, templateConfig.loadBytes())) { + request = new TransportPutComposableIndexTemplateAction.Request(templateConfig.getTemplateName()).indexTemplate( + ComposableIndexTemplate.parse(parser) ).masterNodeTimeout(masterTimeout); } catch (IOException e) { throw new ElasticsearchParseException("unable to parse composable template " + templateConfig.getTemplateName(), e); @@ -355,7 +353,7 @@ public static void installIndexTemplateIfRequired( public static void installIndexTemplateIfRequired( ClusterState clusterState, Client client, - PutComposableIndexTemplateAction.Request templateRequest, + TransportPutComposableIndexTemplateAction.Request templateRequest, ActionListener listener ) { // The check for existence of the template is against the cluster state, so very cheap @@ -371,7 +369,7 @@ public static void installIndexTemplateIfRequired( l.onResponse(response.isAcknowledged()); }); - executeAsyncWithOrigin(client, ML_ORIGIN, PutComposableIndexTemplateAction.INSTANCE, templateRequest, innerListener); + executeAsyncWithOrigin(client, ML_ORIGIN, TransportPutComposableIndexTemplateAction.TYPE, templateRequest, innerListener); } public static boolean hasIndexTemplate(ClusterState state, String templateName) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/QueryProvider.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/QueryProvider.java index 5b22165b57443..007b748a71d80 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/QueryProvider.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/QueryProvider.java @@ -73,7 +73,7 @@ public static QueryProvider fromParsedQuery(QueryBuilder parsedQuery) throws IOE } public static QueryProvider fromStream(StreamInput in) throws IOException { - return new QueryProvider(in.readMap(), in.readOptionalNamedWriteable(QueryBuilder.class), in.readException()); + return new QueryProvider(in.readGenericMap(), in.readOptionalNamedWriteable(QueryBuilder.class), in.readException()); } QueryProvider(Map query, QueryBuilder parsedQuery, Exception parsingException) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/XContentObjectTransformer.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/XContentObjectTransformer.java index 07fbd61cec82b..e9392e868e377 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/XContentObjectTransformer.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/XContentObjectTransformer.java @@ -18,6 +18,7 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.deprecation.LoggingDeprecationAccumulationHandler; @@ -82,8 +83,11 @@ public T fromMap(Map stringObjectMap, List deprecationWa LoggingDeprecationAccumulationHandler deprecationLogger = new LoggingDeprecationAccumulationHandler(); try ( XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().map(stringObjectMap); - XContentParser parser = XContentType.JSON.xContent() - .createParser(registry, deprecationLogger, BytesReference.bytes(xContentBuilder).streamInput()) + XContentParser parser = XContentHelper.createParserNotCompressed( + XContentParserConfiguration.EMPTY.withRegistry(registry).withDeprecationHandler(deprecationLogger), + BytesReference.bytes(xContentBuilder), + XContentType.JSON + ) ) { T retVal = parserFunction.apply(parser); deprecationWarnings.addAll(deprecationLogger.getDeprecations()); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/MonitoringFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/MonitoringFeatureSetUsage.java index 546c139766248..4c0f347a3ffed 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/MonitoringFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/MonitoringFeatureSetUsage.java @@ -28,7 +28,7 @@ public class MonitoringFeatureSetUsage extends XPackFeatureSet.Usage { public MonitoringFeatureSetUsage(StreamInput in) throws IOException { super(in); - exporters = in.readMap(); + exporters = in.readGenericMap(); collectionEnabled = in.readOptionalBoolean(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupJobCaps.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupJobCaps.java index 040f7e1637a50..292531499e619 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupJobCaps.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupJobCaps.java @@ -211,7 +211,7 @@ public static class RollupFieldCaps implements Writeable, ToXContentFragment { int size = in.readInt(); List> inAggs = new ArrayList<>(size); for (int i = 0; i < size; i++) { - inAggs.add(in.readMap()); + inAggs.add(in.readGenericMap()); } this.aggs = Collections.unmodifiableList(inAggs); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatus.java index 19c718073d702..1ba625a507a46 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatus.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatus.java @@ -74,7 +74,7 @@ public RollupJobStatus(IndexerState state, @Nullable Map positio public RollupJobStatus(StreamInput in) throws IOException { state = IndexerState.fromStream(in); - currentPosition = in.readBoolean() ? new TreeMap<>(in.readMap()) : null; + currentPosition = in.readBoolean() ? new TreeMap<>(in.readGenericMap()) : null; if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { // 7.x nodes serialize `upgradedDocumentID` flag. We don't need it anymore, but // we need to pull it off the stream diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/AsyncStatusResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/AsyncStatusResponse.java index c2d15e54ed667..7596fe75b4173 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/AsyncStatusResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/AsyncStatusResponse.java @@ -139,7 +139,7 @@ public AsyncStatusResponse(StreamInput in) throws IOException { } else { this.clusters = null; } - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_040)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { this.completionTimeMillis = in.readOptionalVLong(); } else { this.completionTimeMillis = null; @@ -164,7 +164,7 @@ public void writeTo(StreamOutput out) throws IOException { // optional since only CCS uses is; it is null for local-only searches out.writeOptionalWriteable(clusters); } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_040)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { out.writeOptionalVLong(completionTimeMillis); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/EnrollmentToken.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/EnrollmentToken.java index b5444449af1f4..466caa11771a5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/EnrollmentToken.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/EnrollmentToken.java @@ -109,11 +109,14 @@ public static EnrollmentToken decodeFromString(String encoded) throws IOExceptio if (Strings.isNullOrEmpty(encoded)) { throw new IOException("Cannot decode enrollment token from an empty string"); } - final XContentParser jsonParser = JsonXContent.jsonXContent.createParser( - XContentParserConfiguration.EMPTY, - Base64.getDecoder().decode(encoded) - ); - return EnrollmentToken.PARSER.parse(jsonParser, null); + try ( + XContentParser jsonParser = JsonXContent.jsonXContent.createParser( + XContentParserConfiguration.EMPTY, + Base64.getDecoder().decode(encoded) + ) + ) { + return EnrollmentToken.PARSER.parse(jsonParser, null); + } } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java index ca0ba9b8196bc..c88e13f80ba01 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java @@ -52,31 +52,31 @@ public class SecurityFeatureSetUsage extends XPackFeatureSet.Usage { public SecurityFeatureSetUsage(StreamInput in) throws IOException { super(in); - realmsUsage = in.readMap(); - rolesStoreUsage = in.readMap(); - sslUsage = in.readMap(); + realmsUsage = in.readGenericMap(); + rolesStoreUsage = in.readGenericMap(); + sslUsage = in.readGenericMap(); if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_2_0)) { - tokenServiceUsage = in.readMap(); - apiKeyServiceUsage = in.readMap(); + tokenServiceUsage = in.readGenericMap(); + apiKeyServiceUsage = in.readGenericMap(); } - auditUsage = in.readMap(); - ipFilterUsage = in.readMap(); - anonymousUsage = in.readMap(); - roleMappingStoreUsage = in.readMap(); + auditUsage = in.readGenericMap(); + ipFilterUsage = in.readGenericMap(); + anonymousUsage = in.readGenericMap(); + roleMappingStoreUsage = in.readGenericMap(); if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_5_0)) { - fips140Usage = in.readMap(); + fips140Usage = in.readGenericMap(); } if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_11_0)) { - operatorPrivilegesUsage = in.readMap(); + operatorPrivilegesUsage = in.readGenericMap(); } if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_2_0)) { - domainsUsage = in.readMap(); + domainsUsage = in.readGenericMap(); } if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_5_0)) { - userProfileUsage = in.readMap(); + userProfileUsage = in.readGenericMap(); } if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { - remoteClusterServerUsage = in.readMap(); + remoteClusterServerUsage = in.readGenericMap(); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/ActionTypes.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/ActionTypes.java new file mode 100644 index 0000000000000..fbc08a0dee8aa --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/ActionTypes.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.action; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; + +/** + * A collection of actions types for the Security plugin that need to be available in xpack.core.security and thus cannot be stored + * directly with their transport action implementation. + */ +public final class ActionTypes { + private ActionTypes() {}; + + public static final ActionType RELOAD_REMOTE_CLUSTER_CREDENTIALS_ACTION = ActionType.localOnly( + "cluster:admin/xpack/security/remote_cluster_credentials/reload" + ); +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java index b06b7728f541f..e57570ce7385b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java @@ -191,7 +191,7 @@ public ApiKey(StreamInput in) throws IOException { this.username = in.readString(); this.realm = in.readString(); if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { - this.metadata = in.readMap(); + this.metadata = in.readGenericMap(); } else { this.metadata = Map.of(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BaseBulkUpdateApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BaseBulkUpdateApiKeyRequest.java index 81c8479c47285..34b249d7a8233 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BaseBulkUpdateApiKeyRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BaseBulkUpdateApiKeyRequest.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import java.io.IOException; @@ -27,9 +28,10 @@ public abstract class BaseBulkUpdateApiKeyRequest extends BaseUpdateApiKeyReques public BaseBulkUpdateApiKeyRequest( final List ids, @Nullable final List roleDescriptors, - @Nullable final Map metadata + @Nullable final Map metadata, + @Nullable final TimeValue expiration ) { - super(roleDescriptors, metadata); + super(roleDescriptors, metadata, expiration); this.ids = Objects.requireNonNull(ids, "API key IDs must not be null"); } @@ -56,4 +58,21 @@ public void writeTo(StreamOutput out) throws IOException { public List getIds() { return ids; } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass() || super.equals(o)) return false; + + BaseBulkUpdateApiKeyRequest that = (BaseBulkUpdateApiKeyRequest) o; + return Objects.equals(getIds(), that.getIds()) + && Objects.equals(metadata, that.metadata) + && Objects.equals(expiration, that.expiration) + && Objects.equals(roleDescriptors, that.roleDescriptors); + } + + @Override + public int hashCode() { + return Objects.hash(getIds(), expiration, metadata, roleDescriptors); + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BaseSingleUpdateApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BaseSingleUpdateApiKeyRequest.java index 7d89c0dd39b0c..725a9fb197b07 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BaseSingleUpdateApiKeyRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BaseSingleUpdateApiKeyRequest.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import java.io.IOException; @@ -24,9 +25,10 @@ public abstract class BaseSingleUpdateApiKeyRequest extends BaseUpdateApiKeyRequ public BaseSingleUpdateApiKeyRequest( @Nullable final List roleDescriptors, @Nullable final Map metadata, + @Nullable final TimeValue expiration, String id ) { - super(roleDescriptors, metadata); + super(roleDescriptors, metadata, expiration); this.id = Objects.requireNonNull(id, "API key ID must not be null"); } @@ -44,4 +46,21 @@ public void writeTo(StreamOutput out) throws IOException { public String getId() { return id; } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass() || super.equals(o)) return false; + + BaseSingleUpdateApiKeyRequest that = (BaseSingleUpdateApiKeyRequest) o; + return Objects.equals(getId(), that.getId()) + && Objects.equals(metadata, that.metadata) + && Objects.equals(expiration, that.expiration) + && Objects.equals(roleDescriptors, that.roleDescriptors); + } + + @Override + public int hashCode() { + return Objects.hash(getId(), expiration, metadata, roleDescriptors); + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BaseUpdateApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BaseUpdateApiKeyRequest.java index b06f8868c53d1..3813e8cb496d6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BaseUpdateApiKeyRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BaseUpdateApiKeyRequest.java @@ -7,11 +7,13 @@ package org.elasticsearch.xpack.core.security.action.apikey; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.xpack.core.security.action.role.RoleDescriptorRequestValidator; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.support.MetadataUtils; @@ -28,16 +30,28 @@ public abstract class BaseUpdateApiKeyRequest extends ActionRequest { protected final List roleDescriptors; @Nullable protected final Map metadata; + @Nullable + protected final TimeValue expiration; - public BaseUpdateApiKeyRequest(@Nullable final List roleDescriptors, @Nullable final Map metadata) { + public BaseUpdateApiKeyRequest( + @Nullable final List roleDescriptors, + @Nullable final Map metadata, + @Nullable final TimeValue expiration + ) { this.roleDescriptors = roleDescriptors; this.metadata = metadata; + this.expiration = expiration; } public BaseUpdateApiKeyRequest(StreamInput in) throws IOException { super(in); this.roleDescriptors = in.readOptionalCollectionAsList(RoleDescriptor::new); - this.metadata = in.readMap(); + this.metadata = in.readGenericMap(); + if (in.getTransportVersion().onOrAfter(TransportVersions.UPDATE_API_KEY_EXPIRATION_TIME_ADDED)) { + expiration = in.readOptionalTimeValue(); + } else { + expiration = null; + } } public Map getMetadata() { @@ -48,6 +62,10 @@ public List getRoleDescriptors() { return roleDescriptors; } + public TimeValue getExpiration() { + return expiration; + } + public abstract ApiKey.Type getType(); @Override @@ -72,5 +90,8 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeOptionalCollection(roleDescriptors); out.writeGenericMap(metadata); + if (out.getTransportVersion().onOrAfter(TransportVersions.UPDATE_API_KEY_EXPIRATION_TIME_ADDED)) { + out.writeOptionalTimeValue(expiration); + } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyRequest.java index d4712abd2cfe2..f915781c6211a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyRequest.java @@ -9,6 +9,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import java.io.IOException; @@ -19,19 +20,25 @@ public final class BulkUpdateApiKeyRequest extends BaseBulkUpdateApiKeyRequest { public static BulkUpdateApiKeyRequest usingApiKeyIds(String... ids) { - return new BulkUpdateApiKeyRequest(Arrays.stream(ids).toList(), null, null); + return new BulkUpdateApiKeyRequest(Arrays.stream(ids).toList(), null, null, null); } public static BulkUpdateApiKeyRequest wrap(final UpdateApiKeyRequest request) { - return new BulkUpdateApiKeyRequest(List.of(request.getId()), request.getRoleDescriptors(), request.getMetadata()); + return new BulkUpdateApiKeyRequest( + List.of(request.getId()), + request.getRoleDescriptors(), + request.getMetadata(), + request.getExpiration() + ); } public BulkUpdateApiKeyRequest( final List ids, @Nullable final List roleDescriptors, - @Nullable final Map metadata + @Nullable final Map metadata, + @Nullable final TimeValue expiration ) { - super(ids, roleDescriptors, metadata); + super(ids, roleDescriptors, metadata, expiration); } public BulkUpdateApiKeyRequest(StreamInput in) throws IOException { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequest.java index e49ba19631a7e..32669d5dca447 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequest.java @@ -66,7 +66,7 @@ public CreateApiKeyRequest(StreamInput in) throws IOException { this.roleDescriptors = in.readCollectionAsImmutableList(RoleDescriptor::new); this.refreshPolicy = WriteRequest.RefreshPolicy.readFrom(in); if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { - this.metadata = in.readMap(); + this.metadata = in.readGenericMap(); } else { this.metadata = null; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequestBuilder.java index cd4cea270de6b..2747dc47058f8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequestBuilder.java @@ -11,16 +11,15 @@ import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import java.io.IOException; -import java.io.InputStream; import java.util.List; import java.util.Map; @@ -86,10 +85,12 @@ public CreateApiKeyRequestBuilder setMetadata(Map metadata) { } public CreateApiKeyRequestBuilder source(BytesReference source, XContentType xContentType) throws IOException { - final NamedXContentRegistry registry = NamedXContentRegistry.EMPTY; try ( - InputStream stream = source.streamInput(); - XContentParser parser = xContentType.xContent().createParser(registry, LoggingDeprecationHandler.INSTANCE, stream) + XContentParser parser = XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG, + source, + xContentType + ) ) { CreateApiKeyRequest createApiKeyRequest = parse(parser); setName(createApiKeyRequest.getName()); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateCrossClusterApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateCrossClusterApiKeyRequest.java index 46e19d8af6f74..a375808def6d7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateCrossClusterApiKeyRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateCrossClusterApiKeyRequest.java @@ -43,7 +43,7 @@ public CreateCrossClusterApiKeyRequest(StreamInput in) throws IOException { this.expiration = in.readOptionalTimeValue(); this.roleDescriptors = in.readCollectionAsImmutableList(RoleDescriptor::new); this.refreshPolicy = WriteRequest.RefreshPolicy.readFrom(in); - this.metadata = in.readMap(); + this.metadata = in.readGenericMap(); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CrossClusterApiKeyRoleDescriptorBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CrossClusterApiKeyRoleDescriptorBuilder.java index 0763c208abf64..9695aeae283e2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CrossClusterApiKeyRoleDescriptorBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CrossClusterApiKeyRoleDescriptorBuilder.java @@ -91,10 +91,9 @@ public RoleDescriptor build() { } public static CrossClusterApiKeyRoleDescriptorBuilder parse(String access) throws IOException { - return CrossClusterApiKeyRoleDescriptorBuilder.PARSER.parse( - JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, access), - null - ); + try (var parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, access)) { + return CrossClusterApiKeyRoleDescriptorBuilder.PARSER.parse(parser, null); + } } static void validate(RoleDescriptor roleDescriptor) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequest.java index d76696dc4fe99..71e0c98fb0012 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequest.java @@ -26,7 +26,7 @@ */ public final class GetApiKeyRequest extends ActionRequest { - static TransportVersion API_KEY_ACTIVE_ONLY_PARAM_TRANSPORT_VERSION = TransportVersions.V_8_500_054; + static TransportVersion API_KEY_ACTIVE_ONLY_PARAM_TRANSPORT_VERSION = TransportVersions.V_8_500_061; private final String realmName; private final String userName; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateApiKeyRequest.java index 456aa4d636335..c5c8bcc4fc87a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateApiKeyRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateApiKeyRequest.java @@ -9,6 +9,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import java.io.IOException; @@ -17,15 +18,16 @@ public final class UpdateApiKeyRequest extends BaseSingleUpdateApiKeyRequest { public static UpdateApiKeyRequest usingApiKeyId(final String id) { - return new UpdateApiKeyRequest(id, null, null); + return new UpdateApiKeyRequest(id, null, null, null); } public UpdateApiKeyRequest( final String id, @Nullable final List roleDescriptors, - @Nullable final Map metadata + @Nullable final Map metadata, + @Nullable final TimeValue expiration ) { - super(roleDescriptors, metadata, id); + super(roleDescriptors, metadata, expiration, id); } public UpdateApiKeyRequest(StreamInput in) throws IOException { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateCrossClusterApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateCrossClusterApiKeyRequest.java index ff6bb7da13660..184ce2c521ce0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateCrossClusterApiKeyRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateCrossClusterApiKeyRequest.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; import java.io.IOException; import java.util.List; @@ -22,9 +23,10 @@ public final class UpdateCrossClusterApiKeyRequest extends BaseSingleUpdateApiKe public UpdateCrossClusterApiKeyRequest( final String id, @Nullable CrossClusterApiKeyRoleDescriptorBuilder roleDescriptorBuilder, - @Nullable final Map metadata + @Nullable final Map metadata, + @Nullable TimeValue expiration ) { - super(roleDescriptorBuilder == null ? null : List.of(roleDescriptorBuilder.build()), metadata, id); + super(roleDescriptorBuilder == null ? null : List.of(roleDescriptorBuilder.build()), metadata, expiration, id); } public UpdateCrossClusterApiKeyRequest(StreamInput in) throws IOException { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequestBuilder.java index ec0ecfc909980..79dcec0e448fc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequestBuilder.java @@ -13,13 +13,12 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; -import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilegeDescriptor; import java.io.IOException; -import java.io.InputStream; import java.util.ArrayList; import java.util.List; import java.util.Objects; @@ -52,9 +51,11 @@ public PutPrivilegesRequestBuilder source(BytesReference source, XContentType xC Objects.requireNonNull(xContentType); // NamedXContentRegistry.EMPTY is ok here because we never call namedObject try ( - InputStream stream = source.streamInput(); - XContentParser parser = xContentType.xContent() - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream) + XContentParser parser = XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG, + source, + xContentType + ) ) { XContentParser.Token token = parser.currentToken(); if (token == null) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/Profile.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/Profile.java index 1ce0540946edd..26c4d9b324027 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/Profile.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/Profile.java @@ -108,7 +108,15 @@ public void writeTo(StreamOutput out) throws IOException { } public Profile(StreamInput in) throws IOException { - this(in.readString(), in.readBoolean(), in.readLong(), new ProfileUser(in), in.readMap(), in.readMap(), new VersionControl(in)); + this( + in.readString(), + in.readBoolean(), + in.readLong(), + new ProfileUser(in), + in.readGenericMap(), + in.readGenericMap(), + new VersionControl(in) + ); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/UpdateProfileDataRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/UpdateProfileDataRequest.java index cd93a3c7e5cc1..90d59e0e39f48 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/UpdateProfileDataRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/UpdateProfileDataRequest.java @@ -50,8 +50,8 @@ public UpdateProfileDataRequest( public UpdateProfileDataRequest(StreamInput in) throws IOException { super(in); this.uid = in.readString(); - this.labels = in.readMap(); - this.data = in.readMap(); + this.labels = in.readGenericMap(); + this.data = in.readGenericMap(); this.ifPrimaryTerm = in.readLong(); this.ifSeqNo = in.readLong(); this.refreshPolicy = RefreshPolicy.readFrom(in); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java index e82a77f311de6..bb7fe59dce5e6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java @@ -58,7 +58,7 @@ public PutRoleRequest(StreamInput in) throws IOException { configurableClusterPrivileges = ConfigurableClusterPrivileges.readArray(in); runAs = in.readStringArray(); refreshPolicy = RefreshPolicy.readFrom(in); - metadata = in.readMap(); + metadata = in.readGenericMap(); if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { remoteIndicesPrivileges = in.readCollectionAsList(RoleDescriptor.RemoteIndicesPrivileges::new); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequest.java index 8059a30b88952..039ed8aa5fb64 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequest.java @@ -51,7 +51,7 @@ public PutRoleMappingRequest(StreamInput in) throws IOException { this.roleTemplates = in.readCollectionAsList(TemplateRoleName::new); } this.rules = ExpressionParser.readExpression(in); - this.metadata = in.readMap(); + this.metadata = in.readGenericMap(); this.refreshPolicy = RefreshPolicy.readFrom(in); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/settings/UpdateSecuritySettingsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/settings/UpdateSecuritySettingsAction.java index a2342f43595e2..88bc63a3a78f8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/settings/UpdateSecuritySettingsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/settings/UpdateSecuritySettingsAction.java @@ -78,9 +78,9 @@ public Request( } public Request(StreamInput in) throws IOException { - this.mainIndexSettings = in.readMap(); - this.tokensIndexSettings = in.readMap(); - this.profilesIndexSettings = in.readMap(); + this.mainIndexSettings = in.readGenericMap(); + this.tokensIndexSettings = in.readGenericMap(); + this.profilesIndexSettings = in.readGenericMap(); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponse.java index 5c75bf685c330..73ee4d1f27299 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponse.java @@ -20,7 +20,7 @@ public class AuthenticateResponse extends ActionResponse implements ToXContent { - public static final TransportVersion VERSION_OPERATOR_FIELD = TransportVersions.V_8_500_040; + public static final TransportVersion VERSION_OPERATOR_FIELD = TransportVersions.V_8_500_061; private final Authentication authentication; private final boolean operator; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserRequest.java index 37f9d85a29573..a188c664f66de 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserRequest.java @@ -44,7 +44,7 @@ public PutUserRequest(StreamInput in) throws IOException { roles = in.readStringArray(); fullName = in.readOptionalString(); email = in.readOptionalString(); - metadata = in.readBoolean() ? in.readMap() : null; + metadata = in.readBoolean() ? in.readGenericMap() : null; refreshPolicy = RefreshPolicy.readFrom(in); enabled = in.readBoolean(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserRequestBuilder.java index ef33decc9f79c..7ae915d2db791 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserRequestBuilder.java @@ -15,7 +15,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; -import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParser.Token; import org.elasticsearch.xcontent.XContentType; @@ -25,7 +25,6 @@ import org.elasticsearch.xpack.core.security.xcontent.XContentUtils; import java.io.IOException; -import java.io.InputStream; import java.util.Locale; import java.util.Map; import java.util.Objects; @@ -118,9 +117,11 @@ public PutUserRequestBuilder source(String username, BytesReference source, XCon username(username); // EMPTY is ok here because we never call namedObject try ( - InputStream stream = source.streamInput(); - XContentParser parser = xContentType.xContent() - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream) + XContentParser parser = XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG, + source, + xContentType + ) ) { XContentUtils.verifyObject(parser); XContentParser.Token token; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Authentication.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Authentication.java index 4d285cf3b144d..d71690f3dc8e7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Authentication.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Authentication.java @@ -757,7 +757,7 @@ private static Map readMetadata(StreamInput in) throws IOExcepti } return metadata; } else { - return in.readMap(); + return in.readGenericMap(); } } @@ -1467,7 +1467,7 @@ private static User readUserWithoutTrailingBoolean(StreamInput input) throws IOE return InternalUsers.getUser(username); } String[] roles = input.readStringArray(); - Map metadata = input.readMap(); + Map metadata = input.readGenericMap(); String fullName = input.readOptionalString(); String email = input.readOptionalString(); boolean enabled = input.readBoolean(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java index 426e8b0563a90..f0976a058738a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java @@ -14,8 +14,8 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.script.ScriptService; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ObjectParser.ValueType; import org.elasticsearch.xcontent.ParseField; @@ -29,7 +29,6 @@ import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import java.io.IOException; -import java.io.InputStream; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -103,7 +102,7 @@ public ExpressionRoleMapping(StreamInput in) throws IOException { this.roleTemplates = Collections.emptyList(); } this.expression = ExpressionParser.readExpression(in); - this.metadata = in.readMap(); + this.metadata = in.readGenericMap(); } @Override @@ -200,10 +199,12 @@ public int hashCode() { * Parse an {@link ExpressionRoleMapping} from the provided XContent */ public static ExpressionRoleMapping parse(String name, BytesReference source, XContentType xContentType) throws IOException { - final NamedXContentRegistry registry = NamedXContentRegistry.EMPTY; try ( - InputStream stream = source.streamInput(); - XContentParser parser = xContentType.xContent().createParser(registry, LoggingDeprecationHandler.INSTANCE, stream) + XContentParser parser = XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG, + source, + xContentType + ) ) { return parse(name, parser); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/TemplateRoleName.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/TemplateRoleName.java index 83a36510aa201..cf42d73c75131 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/TemplateRoleName.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/TemplateRoleName.java @@ -116,32 +116,35 @@ public void validate(ScriptService scriptService) { } private static List convertJsonToList(String evaluation) throws IOException { - final XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(XContentParserConfiguration.EMPTY, evaluation); - XContentParser.Token token = parser.currentToken(); - if (token == null) { - token = parser.nextToken(); - } - if (token == XContentParser.Token.VALUE_STRING) { - return Collections.singletonList(parser.text()); - } else if (token == XContentParser.Token.START_ARRAY) { - return parser.list().stream().filter(Objects::nonNull).map(o -> { - if (o instanceof String) { - return (String) o; - } else { - throw new XContentParseException( - "Roles array may only contain strings but found [" + o.getClass().getName() + "] [" + o + "]" - ); - } - }).collect(Collectors.toList()); - } else { - throw new XContentParseException("Roles template must generate a string or an array of strings, but found [" + token + "]"); + try ( + XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(XContentParserConfiguration.EMPTY, evaluation) + ) { + XContentParser.Token token = parser.currentToken(); + if (token == null) { + token = parser.nextToken(); + } + if (token == XContentParser.Token.VALUE_STRING) { + return Collections.singletonList(parser.text()); + } else if (token == XContentParser.Token.START_ARRAY) { + return parser.list().stream().filter(Objects::nonNull).map(o -> { + if (o instanceof String) { + return (String) o; + } else { + throw new XContentParseException( + "Roles array may only contain strings but found [" + o.getClass().getName() + "] [" + o + "]" + ); + } + }).collect(Collectors.toList()); + } else { + throw new XContentParseException("Roles template must generate a string or an array of strings, but found [" + token + "]"); + } } } private String parseTemplate(ScriptService scriptService, Map parameters) throws IOException { - final XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, template, XContentType.JSON); - return MustacheTemplateEvaluator.evaluate(scriptService, parser, parameters); + try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, template, XContentType.JSON)) { + return MustacheTemplateEvaluator.evaluate(scriptService, parser, parameters); + } } private static BytesReference extractTemplate(XContentParser parser, Void ignore) throws IOException { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionParser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionParser.java index 5ec28dc68181e..3bbc3a18de210 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionParser.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionParser.java @@ -11,13 +11,11 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.xcontent.ContextParser; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; import java.io.IOException; -import java.io.InputStream; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -57,8 +55,8 @@ public static RoleMapperExpression parseObject(XContentParser parser, String id) * @param content The XContent (typically JSON) DSL representation of the expression */ public RoleMapperExpression parse(String name, XContentSource content) throws IOException { - try (InputStream stream = content.getBytes().streamInput()) { - return parse(name, content.parser(NamedXContentRegistry.EMPTY, stream)); + try (var parser = content.parser(content.getBytes())) { + return parse(name, parser); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java index 6a5df4370bd50..f39eca877432c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java @@ -19,14 +19,13 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.security.authz.AuthorizationEngine.PrivilegesToCheck; @@ -38,7 +37,6 @@ import org.elasticsearch.xpack.core.security.xcontent.XContentUtils; import java.io.IOException; -import java.io.InputStream; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -190,8 +188,8 @@ public RoleDescriptor(StreamInput in) throws IOException { indicesPrivileges[i] = new IndicesPrivileges(in); } this.runAs = in.readStringArray(); - this.metadata = in.readMap(); - this.transientMetadata = in.readMap(); + this.metadata = in.readGenericMap(); + this.transientMetadata = in.readGenericMap(); this.applicationPrivileges = in.readArray(ApplicationResourcePrivileges::new, ApplicationResourcePrivileges[]::new); this.configurableClusterPrivileges = ConfigurableClusterPrivileges.readArray(in); @@ -435,12 +433,7 @@ public static RoleDescriptor parse( boolean allowRestriction ) throws IOException { assert name != null; - // EMPTY is safe here because we never use namedObject - try ( - InputStream stream = source.streamInput(); - XContentParser parser = xContentType.xContent() - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream) - ) { + try (XContentParser parser = createParser(source, xContentType)) { return parse(name, parser, allow2xFormat, allowRestriction); } } @@ -610,11 +603,7 @@ public static PrivilegesToCheck parsePrivilegesToCheck( BytesReference source, XContentType xContentType ) throws IOException { - try ( - InputStream stream = source.streamInput(); - XContentParser parser = xContentType.xContent() - .createParser(XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), stream) - ) { + try (XContentParser parser = createParser(source, xContentType)) { // advance to the START_OBJECT token XContentParser.Token token = parser.nextToken(); if (token != XContentParser.Token.START_OBJECT) { @@ -628,6 +617,10 @@ public static PrivilegesToCheck parsePrivilegesToCheck( } } + private static XContentParser createParser(BytesReference source, XContentType xContentType) throws IOException { + return XContentHelper.createParserNotCompressed(LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG, source, xContentType); + } + private static RoleDescriptor.IndicesPrivileges[] parseIndices(String roleName, XContentParser parser, boolean allow2xFormat) throws IOException { if (parser.currentToken() != XContentParser.Token.START_ARRAY) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java index 13558175afaa3..d29b1dd67757a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java @@ -8,8 +8,8 @@ import org.apache.lucene.util.automaton.Automaton; import org.apache.lucene.util.automaton.Operations; -import org.elasticsearch.action.admin.indices.mapping.put.AutoPutMappingAction; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; +import org.elasticsearch.action.admin.indices.mapping.put.TransportAutoPutMappingAction; +import org.elasticsearch.action.admin.indices.mapping.put.TransportPutMappingAction; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.common.Strings; @@ -649,7 +649,7 @@ private boolean isConcreteRestrictedIndex(String indexPattern) { } private static boolean isMappingUpdateAction(String action) { - return action.equals(PutMappingAction.NAME) || action.equals(AutoPutMappingAction.NAME); + return action.equals(TransportPutMappingAction.TYPE.name()) || action.equals(TransportAutoPutMappingAction.TYPE.name()); } private static boolean containsPrivilegeThatGrantsMappingUpdatesForBwc(Group group) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ApplicationPrivilegeDescriptor.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ApplicationPrivilegeDescriptor.java index 1cf59710d2476..bd302fd4ac217 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ApplicationPrivilegeDescriptor.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ApplicationPrivilegeDescriptor.java @@ -66,7 +66,7 @@ public ApplicationPrivilegeDescriptor(StreamInput input) throws IOException { this.application = input.readString(); this.name = input.readString(); this.actions = input.readCollectionAsImmutableSet(StreamInput::readString); - this.metadata = Collections.unmodifiableMap(input.readMap()); + this.metadata = Collections.unmodifiableMap(input.readGenericMap()); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilegeResolver.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilegeResolver.java index 9230519a85773..f93599cdb98cc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilegeResolver.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilegeResolver.java @@ -28,8 +28,7 @@ import org.elasticsearch.xpack.core.action.XPackInfoAction; import org.elasticsearch.xpack.core.ilm.action.GetLifecycleAction; import org.elasticsearch.xpack.core.ilm.action.GetStatusAction; -import org.elasticsearch.xpack.core.ilm.action.StartILMAction; -import org.elasticsearch.xpack.core.ilm.action.StopILMAction; +import org.elasticsearch.xpack.core.ilm.action.ILMActions; import org.elasticsearch.xpack.core.security.action.DelegatePkiAuthenticationAction; import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.GrantApiKeyAction; @@ -149,8 +148,8 @@ public class ClusterPrivilegeResolver { private static final Set READ_ILM_PATTERN = Set.of(GetLifecycleAction.NAME, GetStatusAction.NAME); private static final Set MANAGE_SLM_PATTERN = Set.of( "cluster:admin/slm/*", - StartILMAction.NAME, - StopILMAction.NAME, + ILMActions.START.name(), + ILMActions.STOP.name(), GetStatusAction.NAME ); private static final Set READ_SLM_PATTERN = Set.of(GetSnapshotLifecycleAction.NAME, GetStatusAction.NAME); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivileges.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivileges.java index 390b443c42367..fed8b7e0d7a1c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivileges.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivileges.java @@ -166,7 +166,7 @@ public WriteProfileDataPrivileges(Set applicationNames) { if (request instanceof final UpdateProfileDataRequest updateProfileRequest) { assert null == updateProfileRequest.validate(); final Collection requestApplicationNames = updateProfileRequest.getApplicationNames(); - return requestApplicationNames.stream().allMatch(application -> applicationPredicate.test(application)); + return requestApplicationNames.stream().allMatch(applicationPredicate); } return false; }; @@ -274,7 +274,7 @@ public ManageApplicationPrivileges(Set applicationNames) { final Collection requestApplicationNames = privRequest.getApplicationNames(); return requestApplicationNames.isEmpty() ? this.applicationNames.contains("*") - : requestApplicationNames.stream().allMatch(application -> applicationPredicate.test(application)); + : requestApplicationNames.stream().allMatch(applicationPredicate); } return false; }; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java index 4968352439fb0..6a386fc55bb69 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java @@ -18,7 +18,7 @@ import org.elasticsearch.action.admin.indices.get.GetIndexAction; import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsAction; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsAction; -import org.elasticsearch.action.admin.indices.mapping.put.AutoPutMappingAction; +import org.elasticsearch.action.admin.indices.mapping.put.TransportAutoPutMappingAction; import org.elasticsearch.action.admin.indices.resolve.ResolveIndexAction; import org.elasticsearch.action.admin.indices.rollover.RolloverAction; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsAction; @@ -78,7 +78,11 @@ public final class IndexPrivilege extends Privilege { private static final Automaton READ_CROSS_CLUSTER_AUTOMATON = patterns( "internal:transport/proxy/indices:data/read/*", ClusterSearchShardsAction.NAME, - TransportSearchShardsAction.TYPE.name() + TransportSearchShardsAction.TYPE.name(), + // cross clusters query for ESQL + "internal:data/read/esql/open_exchange", + "internal:data/read/esql/exchange", + "indices:data/read/esql/cluster" ); private static final Automaton CREATE_AUTOMATON = patterns( "indices:data/write/index*", @@ -99,7 +103,7 @@ public final class IndexPrivilege extends Privilege { "indices:data/write/simulate/bulk*" ); private static final Automaton DELETE_AUTOMATON = patterns("indices:data/write/delete*", "indices:data/write/bulk*"); - private static final Automaton WRITE_AUTOMATON = patterns("indices:data/write/*", AutoPutMappingAction.NAME); + private static final Automaton WRITE_AUTOMATON = patterns("indices:data/write/*", TransportAutoPutMappingAction.TYPE.name()); private static final Automaton MONITOR_AUTOMATON = patterns("indices:monitor/*"); private static final Automaton MANAGE_AUTOMATON = unionAndMinimize( Arrays.asList( @@ -147,7 +151,7 @@ public final class IndexPrivilege extends Privilege { "indices:admin/synced_flush", "indices:admin/forcemerge*" ); - private static final Automaton AUTO_CONFIGURE_AUTOMATON = patterns(AutoPutMappingAction.NAME, AutoCreateAction.NAME); + private static final Automaton AUTO_CONFIGURE_AUTOMATON = patterns(TransportAutoPutMappingAction.TYPE.name(), AutoCreateAction.NAME); private static final Automaton CROSS_CLUSTER_REPLICATION_AUTOMATON = patterns( "indices:data/read/xpack/ccr/shard_changes*", diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/SystemPrivilege.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/SystemPrivilege.java index 2616b63df7c01..013d7cc21a54a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/SystemPrivilege.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/SystemPrivilege.java @@ -12,6 +12,7 @@ import org.elasticsearch.index.seqno.RetentionLeaseSyncAction; import org.elasticsearch.persistent.CompletionPersistentTaskAction; import org.elasticsearch.transport.TransportActionProxy; +import org.elasticsearch.xpack.core.security.action.ActionTypes; import org.elasticsearch.xpack.core.security.support.StringMatcher; import java.util.Collections; @@ -43,7 +44,8 @@ public final class SystemPrivilege extends Privilege { "indices:data/read/*", // needed for SystemIndexMigrator "indices:admin/refresh", // needed for SystemIndexMigrator "indices:admin/aliases", // needed for SystemIndexMigrator - TransportSearchShardsAction.TYPE.name() // added so this API can be called with the system user by other APIs + TransportSearchShardsAction.TYPE.name(), // added so this API can be called with the system user by other APIs + ActionTypes.RELOAD_REMOTE_CLUSTER_CREDENTIALS_ACTION.name() // needed for Security plugin reload of remote cluster credentials ); private static final Predicate PREDICATE = (action) -> { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java index 474ba25e3e117..46e4a5cec2486 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java @@ -9,9 +9,9 @@ import org.elasticsearch.action.admin.indices.alias.TransportIndicesAliasesAction; import org.elasticsearch.action.admin.indices.delete.TransportDeleteIndexAction; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; +import org.elasticsearch.action.admin.indices.mapping.put.TransportPutMappingAction; import org.elasticsearch.action.admin.indices.rollover.RolloverAction; -import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction; +import org.elasticsearch.action.admin.indices.settings.put.TransportUpdateSettingsAction; import org.elasticsearch.xpack.core.monitoring.action.MonitoringBulkAction; import org.elasticsearch.xpack.core.security.action.apikey.InvalidateApiKeyAction; import org.elasticsearch.xpack.core.security.action.privilege.GetBuiltinPrivilegesAction; @@ -245,14 +245,18 @@ static RoleDescriptor kibanaSystem(String name) { "profiling-*" ) .privileges( - UpdateSettingsAction.NAME, - PutMappingAction.NAME, + TransportUpdateSettingsAction.TYPE.name(), + TransportPutMappingAction.TYPE.name(), RolloverAction.NAME, "indices:admin/data_stream/lifecycle/put" ) .build(), - // Endpoint specific action responses. Kibana reads from these to display responses to the user. - RoleDescriptor.IndicesPrivileges.builder().indices(".logs-endpoint.action.responses-*").privileges("read").build(), + // Endpoint specific action responses. Kibana reads and writes (for third party agents) to the index + // to display action responses to the user. + RoleDescriptor.IndicesPrivileges.builder() + .indices(".logs-endpoint.action.responses-*") + .privileges("auto_configure", "read", "write") + .build(), // Endpoint specific actions. Kibana reads and writes to this index to track new actions and display them. RoleDescriptor.IndicesPrivileges.builder() .indices(".logs-endpoint.actions-*") @@ -304,7 +308,7 @@ static RoleDescriptor kibanaSystem(String name) { "read", "index", TransportIndicesAliasesAction.NAME, - UpdateSettingsAction.NAME + TransportUpdateSettingsAction.TYPE.name() ) .build(), // For destination indices of the Threat Intel (ti_*) packages that ships a transform for supporting IOC expiration @@ -318,7 +322,7 @@ static RoleDescriptor kibanaSystem(String name) { "delete", "manage", TransportIndicesAliasesAction.NAME, - UpdateSettingsAction.NAME + TransportUpdateSettingsAction.TYPE.name() ) .build(), // For source indices of the Threat Intel (ti_*) packages that ships a transform for supporting IOC expiration @@ -342,7 +346,7 @@ static RoleDescriptor kibanaSystem(String name) { "index", "view_index_metadata", TransportIndicesAliasesAction.NAME, - UpdateSettingsAction.NAME + TransportUpdateSettingsAction.TYPE.name() ) .build(), // For src/dest indices of the Cloud Security Posture packages that ships a transform @@ -356,7 +360,14 @@ static RoleDescriptor kibanaSystem(String name) { "logs-cloud_security_posture.scores-default*", "logs-cloud_security_posture.vulnerabilities_latest-default*" ) - .privileges("create_index", "read", "index", "delete", TransportIndicesAliasesAction.NAME, UpdateSettingsAction.NAME) + .privileges( + "create_index", + "read", + "index", + "delete", + TransportIndicesAliasesAction.NAME, + TransportUpdateSettingsAction.TYPE.name() + ) .build(), RoleDescriptor.IndicesPrivileges.builder().indices("risk-score.risk-*").privileges("all").build(), RoleDescriptor.IndicesPrivileges.builder() diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java index 96eaec5d93158..2bfcf9a12366e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java @@ -15,7 +15,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.xpack.core.ilm.action.GetLifecycleAction; -import org.elasticsearch.xpack.core.ilm.action.PutLifecycleAction; +import org.elasticsearch.xpack.core.ilm.action.ILMActions; import org.elasticsearch.xpack.core.monitoring.action.MonitoringBulkAction; import org.elasticsearch.xpack.core.security.SecurityField; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; @@ -204,7 +204,7 @@ private static Map initializeReservedRoles() { "manage_ingest_pipelines", "monitor", GetLifecycleAction.NAME, - PutLifecycleAction.NAME, + ILMActions.PUT.name(), "cluster:monitor/xpack/watcher/watch/get", "cluster:admin/xpack/watcher/watch/put", "cluster:admin/xpack/watcher/watch/delete" }, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/InternalUsers.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/InternalUsers.java index 0cc5c9367ea50..6162fb36c0497 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/InternalUsers.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/InternalUsers.java @@ -12,7 +12,7 @@ import org.elasticsearch.action.admin.indices.readonly.AddIndexBlockAction; import org.elasticsearch.action.admin.indices.refresh.RefreshAction; import org.elasticsearch.action.admin.indices.rollover.RolloverAction; -import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction; +import org.elasticsearch.action.admin.indices.settings.put.TransportUpdateSettingsAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; import org.elasticsearch.action.downsample.DownsampleAction; import org.elasticsearch.xpack.core.XPackPlugin; @@ -146,7 +146,7 @@ public class InternalUsers { ForceMergeAction.NAME + "*", // indices stats is used by rollover, so we need to grant it here IndicesStatsAction.NAME + "*", - UpdateSettingsAction.NAME, + TransportUpdateSettingsAction.TYPE.name(), DownsampleAction.NAME, AddIndexBlockAction.NAME ) @@ -165,7 +165,7 @@ public class InternalUsers { ForceMergeAction.NAME + "*", // indices stats is used by rollover, so we need to grant it here IndicesStatsAction.NAME + "*", - UpdateSettingsAction.NAME, + TransportUpdateSettingsAction.TYPE.name(), DownsampleAction.NAME, AddIndexBlockAction.NAME ) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java index 357be8768fd07..4dc0ea1d77e42 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java @@ -99,7 +99,7 @@ public SnapshotLifecyclePolicy(StreamInput in) throws IOException { this.name = in.readString(); this.schedule = in.readString(); this.repository = in.readString(); - this.configuration = in.readMap(); + this.configuration = in.readGenericMap(); this.retentionPolicy = in.readOptionalWriteable(SnapshotRetentionConfiguration::new); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecycleStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecycleStats.java index 7a46220644abd..d1b71a92c061d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecycleStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecycleStats.java @@ -337,10 +337,6 @@ public SnapshotPolicyStats(StreamInput in) throws IOException { this.snapshotDeleteFailures.inc(in.readVLong()); } - public static SnapshotPolicyStats parse(XContentParser parser) { - return PARSER.apply(parser, null); - } - public SnapshotPolicyStats merge(SnapshotPolicyStats other) { return new SnapshotPolicyStats( this.policyId, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/sql/SqlFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/sql/SqlFeatureSetUsage.java index fbb8025d0e446..a431dab0b34d5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/sql/SqlFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/sql/SqlFeatureSetUsage.java @@ -24,7 +24,7 @@ public class SqlFeatureSetUsage extends XPackFeatureSet.Usage { public SqlFeatureSetUsage(StreamInput in) throws IOException { super(in); - stats = in.readMap(); + stats = in.readGenericMap(); } public SqlFeatureSetUsage(Map stats) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java index b0f1c78b0c99d..ecec5991ade39 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java @@ -14,10 +14,10 @@ import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; import org.elasticsearch.action.admin.indices.template.put.PutComponentTemplateAction; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; -import org.elasticsearch.action.ingest.PutPipelineAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.ingest.PutPipelineRequest; +import org.elasticsearch.action.ingest.PutPipelineTransportAction; import org.elasticsearch.action.support.GroupedActionListener; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; @@ -47,7 +47,8 @@ import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.ilm.IndexLifecycleMetadata; import org.elasticsearch.xpack.core.ilm.LifecyclePolicy; -import org.elasticsearch.xpack.core.ilm.action.PutLifecycleAction; +import org.elasticsearch.xpack.core.ilm.action.ILMActions; +import org.elasticsearch.xpack.core.ilm.action.PutLifecycleRequest; import java.io.IOException; import java.util.Arrays; @@ -535,9 +536,8 @@ private void putComposableTemplate( ) { final Executor executor = threadPool.generic(); executor.execute(() -> { - PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request(templateName).indexTemplate( - indexTemplate - ); + TransportPutComposableIndexTemplateAction.Request request = new TransportPutComposableIndexTemplateAction.Request(templateName) + .indexTemplate(indexTemplate); request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); executeAsyncWithOrigin( client.threadPool().getThreadContext(), @@ -568,7 +568,7 @@ public void onFailure(Exception e) { onPutTemplateFailure(templateName, e); } }, - (req, listener) -> client.execute(PutComposableIndexTemplateAction.INSTANCE, req, listener) + (req, listener) -> client.execute(TransportPutComposableIndexTemplateAction.TYPE, req, listener) ); }); } @@ -614,7 +614,7 @@ protected boolean isUpgradeRequired(LifecyclePolicy currentPolicy, LifecyclePoli private void putPolicy(final LifecyclePolicy policy, final AtomicBoolean creationCheck) { final Executor executor = threadPool.generic(); executor.execute(() -> { - PutLifecycleAction.Request request = new PutLifecycleAction.Request(policy); + PutLifecycleRequest request = new PutLifecycleRequest(policy); request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); executeAsyncWithOrigin( client.threadPool().getThreadContext(), @@ -639,17 +639,15 @@ public void onFailure(Exception e) { onPutPolicyFailure(policy, e); } }, - (req, listener) -> client.execute(PutLifecycleAction.INSTANCE, req, listener) + (req, listener) -> client.execute(ILMActions.PUT, req, listener) ); }); } protected static Map parseComposableTemplates(IndexTemplateConfig... config) { return Arrays.stream(config).collect(Collectors.toUnmodifiableMap(IndexTemplateConfig::getTemplateName, indexTemplateConfig -> { - try { - return ComposableIndexTemplate.parse( - JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, indexTemplateConfig.loadBytes()) - ); + try (var parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, indexTemplateConfig.loadBytes())) { + return ComposableIndexTemplate.parse(parser); } catch (IOException e) { throw new AssertionError(e); } @@ -756,7 +754,7 @@ public void onFailure(Exception e) { onPutPipelineFailure(pipelineConfig.getId(), e); } }, - (req, listener) -> client.execute(PutPipelineAction.INSTANCE, req, listener) + (req, listener) -> client.execute(PutPipelineTransportAction.TYPE, req, listener) ); }); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/structurefinder/FieldStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/structurefinder/FieldStats.java index 815723e40a373..90e59b525e423 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/structurefinder/FieldStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/structurefinder/FieldStats.java @@ -123,7 +123,7 @@ public FieldStats(StreamInput in) throws IOException { medianValue = in.readOptionalDouble(); earliestTimestamp = in.readOptionalString(); latestTimestamp = in.readOptionalString(); - topHits = in.readCollectionAsList(StreamInput::readMap); + topHits = in.readCollectionAsList(StreamInput::readGenericMap); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/structurefinder/TextStructure.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/structurefinder/TextStructure.java index b39fc27b7a148..6c49d94a4ee90 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/structurefinder/TextStructure.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/structurefinder/TextStructure.java @@ -235,8 +235,8 @@ public TextStructure(StreamInput in) throws IOException { javaTimestampFormats = in.readBoolean() ? in.readCollectionAsImmutableList(StreamInput::readString) : null; timestampField = in.readOptionalString(); needClientTimezone = in.readBoolean(); - mappings = Collections.unmodifiableSortedMap(new TreeMap<>(in.readMap())); - ingestPipeline = in.readBoolean() ? Collections.unmodifiableMap(in.readMap()) : null; + mappings = Collections.unmodifiableSortedMap(new TreeMap<>(in.readGenericMap())); + ingestPipeline = in.readBoolean() ? Collections.unmodifiableMap(in.readGenericMap()) : null; fieldStats = Collections.unmodifiableSortedMap(new TreeMap<>(in.readMap(FieldStats::new))); explanation = in.readCollectionAsImmutableList(StreamInput::readString); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/PreviewTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/PreviewTransformAction.java index d70b10de348fa..98018589e73a4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/PreviewTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/PreviewTransformAction.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; @@ -84,12 +85,11 @@ public static Request fromXContent(final XContentParser parser, TimeValue timeou content.putIfAbsent(TransformField.ID.getPreferredName(), "transform-preview"); try ( XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().map(content); - XContentParser newParser = XContentType.JSON.xContent() - .createParser( - parser.getXContentRegistry(), - LoggingDeprecationHandler.INSTANCE, - BytesReference.bytes(xContentBuilder).streamInput() - ) + XContentParser newParser = XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG.withRegistry(parser.getXContentRegistry()), + BytesReference.bytes(xContentBuilder), + XContentType.JSON + ) ) { return new Request(TransformConfig.fromXContent(newParser, null, false), timeout); } @@ -183,7 +183,7 @@ public Response(StreamInput in) throws IOException { int size = in.readInt(); this.docs = new ArrayList<>(size); for (int i = 0; i < size; i++) { - this.docs.add(in.readMap()); + this.docs.add(in.readGenericMap()); } this.generatedDestIndexSettings = new TransformDestIndexSettings(in); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/QueryConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/QueryConfig.java index 2df00837f9a3a..451ec7ce8aadc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/QueryConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/QueryConfig.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; @@ -25,6 +26,7 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.deprecation.DeprecationIssue; import org.elasticsearch.xpack.core.deprecation.DeprecationIssue.Level; @@ -59,7 +61,7 @@ public QueryConfig(final Map source, final QueryBuilder query) { } public QueryConfig(final StreamInput in) throws IOException { - this.source = in.readMap(); + this.source = in.readGenericMap(); this.query = in.readOptionalNamedWriteable(QueryBuilder.class); } @@ -106,11 +108,17 @@ private static QueryBuilder queryFromXContent( NamedXContentRegistry namedXContentRegistry, DeprecationHandler deprecationHandler ) throws IOException { - QueryBuilder query = null; + final QueryBuilder query; XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().map(source); - XContentParser sourceParser = XContentType.JSON.xContent() - .createParser(namedXContentRegistry, deprecationHandler, BytesReference.bytes(xContentBuilder).streamInput()); - query = AbstractQueryBuilder.parseTopLevelQuery(sourceParser); + try ( + XContentParser sourceParser = XContentHelper.createParserNotCompressed( + XContentParserConfiguration.EMPTY.withRegistry(namedXContentRegistry).withDeprecationHandler(deprecationHandler), + BytesReference.bytes(xContentBuilder), + XContentType.JSON + ) + ) { + query = AbstractQueryBuilder.parseTopLevelQuery(sourceParser); + } return query; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/SourceConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/SourceConfig.java index 0957d70e9ab5c..476c34c1f5592 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/SourceConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/SourceConfig.java @@ -111,7 +111,7 @@ private static String[] extractIndices(String[] index) { public SourceConfig(final StreamInput in) throws IOException { index = in.readStringArray(); queryConfig = new QueryConfig(in); - runtimeMappings = in.readMap(); + runtimeMappings = in.readGenericMap(); } public String[] getIndex() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformCheckpoint.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformCheckpoint.java index b122e96339e9e..fea6299c73e62 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformCheckpoint.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformCheckpoint.java @@ -128,7 +128,7 @@ public TransformCheckpoint(StreamInput in) throws IOException { this.transformId = in.readString(); this.timestampMillis = in.readLong(); this.checkpoint = in.readLong(); - this.indicesCheckpoints = readCheckpoints(in.readMap()); + this.indicesCheckpoints = readCheckpoints(in.readGenericMap()); this.timeUpperBoundMillis = in.readLong(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfig.java index 9658cdd74e196..d89eb9b397180 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfig.java @@ -257,7 +257,7 @@ public TransformConfig(final StreamInput in) throws IOException { createTime = in.readOptionalInstant(); transformVersion = in.readBoolean() ? TransformConfigVersion.readVersion(in) : null; settings = new SettingsConfig(in); - metadata = in.readMap(); + metadata = in.readGenericMap(); retentionPolicyConfig = in.readOptionalNamedWriteable(RetentionPolicyConfig.class); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigUpdate.java index 05e43a11ba841..502d403cf979f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigUpdate.java @@ -117,7 +117,7 @@ public TransformConfigUpdate(final StreamInput in) throws IOException { setHeaders(in.readMap(StreamInput::readString)); } settings = in.readOptionalWriteable(SettingsConfig::new); - metadata = in.readMap(); + metadata = in.readGenericMap(); retentionPolicyConfig = in.readOptionalNamedWriteable(RetentionPolicyConfig.class); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformDestIndexSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformDestIndexSettings.java index e202ab37bdc02..16ca196253f26 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformDestIndexSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformDestIndexSettings.java @@ -75,7 +75,7 @@ public TransformDestIndexSettings(Map mappings, Settings setting } public TransformDestIndexSettings(StreamInput in) throws IOException { - mappings = in.readMap(); + mappings = in.readGenericMap(); settings = Settings.readSettingsFromStream(in); aliases = new HashSet<>(in.readCollectionAsList(Alias::new)); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformIndexerPosition.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformIndexerPosition.java index 2933abf554219..76174aae8e958 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformIndexerPosition.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformIndexerPosition.java @@ -52,9 +52,9 @@ public TransformIndexerPosition(Map indexerPosition, Map position = in.readMap(); + Map position = in.readGenericMap(); indexerPosition = position == null ? null : Collections.unmodifiableMap(position); - position = in.readMap(); + position = in.readGenericMap(); bucketPosition = position == null ? null : Collections.unmodifiableMap(position); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformState.java index 2508b5e1bf01b..b2f7ac28210e2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformState.java @@ -134,7 +134,7 @@ public TransformState(StreamInput in) throws IOException { if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_3_0)) { position = in.readOptionalWriteable(TransformIndexerPosition::new); } else { - Map pos = in.readMap(); + Map pos = in.readGenericMap(); position = new TransformIndexerPosition(pos, null); } checkpoint = in.readLong(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/AggregationConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/AggregationConfig.java index 763f328ecfa0b..6bf3686315762 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/AggregationConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/AggregationConfig.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; @@ -24,6 +25,7 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.deprecation.DeprecationIssue; import org.elasticsearch.xpack.core.deprecation.DeprecationIssue.Level; @@ -61,7 +63,7 @@ public AggregationConfig(final Map source, AggregatorFactories.B } public AggregationConfig(final StreamInput in) throws IOException { - source = in.readMap(); + source = in.readGenericMap(); aggregations = in.readOptionalWriteable(AggregatorFactories.Builder::new); } @@ -139,13 +141,18 @@ private static AggregatorFactories.Builder aggregationsFromXContent( NamedXContentRegistry namedXContentRegistry, DeprecationHandler deprecationHandler ) throws IOException { - AggregatorFactories.Builder aggregations = null; - + final AggregatorFactories.Builder aggregations; XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().map(source); - XContentParser sourceParser = XContentType.JSON.xContent() - .createParser(namedXContentRegistry, deprecationHandler, BytesReference.bytes(xContentBuilder).streamInput()); - sourceParser.nextToken(); - aggregations = AggregatorFactories.parseAggregators(sourceParser); + try ( + XContentParser sourceParser = XContentHelper.createParserNotCompressed( + XContentParserConfiguration.EMPTY.withRegistry(namedXContentRegistry).withDeprecationHandler(deprecationHandler), + BytesReference.bytes(xContentBuilder), + XContentType.JSON + ) + ) { + sourceParser.nextToken(); + aggregations = AggregatorFactories.parseAggregators(sourceParser); + } return aggregations; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/GroupConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/GroupConfig.java index 4293595c711ef..c70b5b73b0256 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/GroupConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/GroupConfig.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ToXContentObject; @@ -57,7 +58,7 @@ public GroupConfig(final Map source, final Map { SingleGroupSource.Type groupType = SingleGroupSource.Type.fromId(stream.readByte()); return switch (groupType) { @@ -138,8 +139,11 @@ public static GroupConfig fromXContent(final XContentParser parser, boolean leni } else { try ( XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().map(source); - XContentParser sourceParser = XContentType.JSON.xContent() - .createParser(registry, LoggingDeprecationHandler.INSTANCE, BytesReference.bytes(xContentBuilder).streamInput()) + XContentParser sourceParser = XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG.withRegistry(registry), + BytesReference.bytes(xContentBuilder), + XContentType.JSON + ) ) { groups = parseGroupConfig(sourceParser, lenient); } catch (Exception e) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/ScriptConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/ScriptConfig.java index 7d4067ce4f665..f8bc06b61f521 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/ScriptConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/ScriptConfig.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.script.Script; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ToXContentObject; @@ -44,7 +45,7 @@ public ScriptConfig(final Map source, Script script) { } public ScriptConfig(final StreamInput in) throws IOException { - source = in.readMap(); + source = in.readGenericMap(); script = in.readOptionalWriteable(Script::new); } @@ -74,8 +75,11 @@ public static ScriptConfig fromXContent(final XContentParser parser, boolean len try ( XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().map(source); - XContentParser sourceParser = XContentType.JSON.xContent() - .createParser(registry, LoggingDeprecationHandler.INSTANCE, BytesReference.bytes(xContentBuilder).streamInput()) + XContentParser sourceParser = XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG.withRegistry(registry), + BytesReference.bytes(xContentBuilder), + XContentType.JSON + ) ) { script = Script.parse(sourceParser); } catch (Exception e) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherFeatureSetUsage.java index 18ffeeb9a3206..fce9399a9bf01 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherFeatureSetUsage.java @@ -23,7 +23,7 @@ public class WatcherFeatureSetUsage extends XPackFeatureSet.Usage { public WatcherFeatureSetUsage(StreamInput in) throws IOException { super(in); - stats = in.readMap(); + stats = in.readGenericMap(); } public WatcherFeatureSetUsage(boolean available, boolean enabled, Map stats) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/XContentSource.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/XContentSource.java index 56e48905b9c83..fe88ccab0bc34 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/XContentSource.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/XContentSource.java @@ -13,7 +13,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ObjectPath; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -22,7 +21,6 @@ import org.elasticsearch.xcontent.XContentUtils; import java.io.IOException; -import java.io.InputStream; import java.util.List; import java.util.Map; import java.util.Objects; @@ -112,15 +110,15 @@ public T getValue(String path) { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { // EMPTY is safe here because we never use namedObject - try (InputStream stream = bytes.streamInput(); XContentParser parser = parser(NamedXContentRegistry.EMPTY, stream)) { + try (XContentParser parser = parser(bytes)) { parser.nextToken(); builder.generator().copyCurrentStructure(parser); return builder; } } - public XContentParser parser(NamedXContentRegistry xContentRegistry, InputStream stream) throws IOException { - return contentType.xContent().createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, stream); + public XContentParser parser(BytesReference bytes) throws IOException { + return XContentHelper.createParserNotCompressed(LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG, bytes, contentType); } public static XContentSource readFrom(StreamInput in) throws IOException { @@ -134,8 +132,7 @@ public static void writeTo(XContentSource source, StreamOutput out) throws IOExc private Object data() { if (data == null) { - // EMPTY is safe here because we never use namedObject - try (InputStream stream = bytes.streamInput(); XContentParser parser = parser(NamedXContentRegistry.EMPTY, stream)) { + try (XContentParser parser = parser(bytes)) { data = XContentUtils.readValue(parser, parser.nextToken()); } catch (IOException ex) { throw new ElasticsearchException("failed to read value", ex); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchRequest.java index f311ab51b54e8..681b004dd1d28 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchRequest.java @@ -60,10 +60,10 @@ public ExecuteWatchRequest(StreamInput in) throws IOException { ignoreCondition = in.readBoolean(); recordExecution = in.readBoolean(); if (in.readBoolean()) { - alternativeInput = in.readMap(); + alternativeInput = in.readGenericMap(); } if (in.readBoolean()) { - triggerData = in.readMap(); + triggerData = in.readGenericMap(); } long actionModesCount = in.readLong(); actionModes = new HashMap<>(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchResponse.java index 7167759a8c0e5..261a31211e497 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchResponse.java @@ -10,12 +10,8 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentFactory; -import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; @@ -27,11 +23,8 @@ */ public class ExecuteWatchResponse extends ActionResponse implements ToXContentObject { - public static final ParseField ID_FIELD = new ParseField("_id"); - public static final ParseField WATCH_FIELD = new ParseField("watch_record"); - - private String recordId; - private XContentSource recordSource; + private final String recordId; + private final XContentSource recordSource; public ExecuteWatchResponse(StreamInput in) throws IOException { super(in); @@ -91,25 +84,4 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endObject(); return builder; } - - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "x_pack_execute_watch_response", - false, - (fields) -> new ExecuteWatchResponse((String) fields[0], (BytesReference) fields[1], XContentType.JSON) - ); - static { - PARSER.declareString(ConstructingObjectParser.constructorArg(), ID_FIELD); - PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> readBytesReference(p), WATCH_FIELD); - } - - public static ExecuteWatchResponse fromXContent(XContentParser parser) throws IOException { - return PARSER.parse(parser, null); - } - - private static BytesReference readBytesReference(XContentParser parser) throws IOException { - try (XContentBuilder builder = XContentFactory.jsonBuilder()) { - builder.copyCurrentStructure(parser); - return BytesReference.bytes(builder); - } - } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/UpdateWatcherSettingsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/UpdateWatcherSettingsAction.java index c4e1f365ac2c1..f8d0ade06e022 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/UpdateWatcherSettingsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/UpdateWatcherSettingsAction.java @@ -43,7 +43,7 @@ public Request(Map settings) { } public Request(StreamInput in) throws IOException { - this.settings = in.readMap(); + this.settings = in.readGenericMap(); } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ClientHelperTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ClientHelperTests.java index b98a8abc019d0..df4f7828d1fed 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ClientHelperTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ClientHelperTests.java @@ -24,7 +24,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.TransportVersionUtils; import org.elasticsearch.threadpool.ThreadPool; @@ -248,16 +248,7 @@ public void testExecuteWithHeadersNoHeaders() { PlainActionFuture searchFuture = new PlainActionFuture<>(); searchFuture.onResponse( - new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, - null, - 0, - 0, - 0, - 0L, - ShardSearchFailure.EMPTY_ARRAY, - SearchResponse.Clusters.EMPTY - ) + SearchResponseUtils.emptyWithTotalHits(null, 0, 0, 0, 0L, ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY) ); when(client.search(any())).thenReturn(searchFuture); assertExecutionWithOrigin(Collections.emptyMap(), client); @@ -272,16 +263,7 @@ public void testExecuteWithHeaders() { PlainActionFuture searchFuture = new PlainActionFuture<>(); searchFuture.onResponse( - new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, - null, - 0, - 0, - 0, - 0L, - ShardSearchFailure.EMPTY_ARRAY, - SearchResponse.Clusters.EMPTY - ) + SearchResponseUtils.emptyWithTotalHits(null, 0, 0, 0, 0L, ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY) ); when(client.search(any())).thenReturn(searchFuture); Map headers = Map.of( @@ -307,16 +289,7 @@ public void testExecuteWithHeadersNoSecurityHeaders() { PlainActionFuture searchFuture = new PlainActionFuture<>(); searchFuture.onResponse( - new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, - null, - 0, - 0, - 0, - 0L, - ShardSearchFailure.EMPTY_ARRAY, - SearchResponse.Clusters.EMPTY - ) + SearchResponseUtils.emptyWithTotalHits(null, 0, 0, 0, 0L, ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY) ); when(client.search(any())).thenReturn(searchFuture); Map unrelatedHeaders = Map.of(randomAlphaOfLength(10), "anything"); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/async/AsyncResultsServiceTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/async/AsyncResultsServiceTests.java index a2f4239b4a3f9..6849b5b859052 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/async/AsyncResultsServiceTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/async/AsyncResultsServiceTests.java @@ -90,12 +90,13 @@ public long getExpirationTime() { return this.expirationTimeMillis; } - public synchronized void addListener(ActionListener listener, TimeValue timeout) { + public synchronized boolean addListener(ActionListener listener, TimeValue timeout) { if (timeout.getMillis() < 0) { listener.onResponse(new TestAsyncResponse(null, expirationTimeMillis)); } else { assertThat(listeners.put(listener, timeout), nullValue()); } + return true; } private synchronized void onResponse(String response) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/async/AsyncTaskServiceTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/async/AsyncTaskServiceTests.java index bc191349ea601..e6bf5d067741b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/async/AsyncTaskServiceTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/async/AsyncTaskServiceTests.java @@ -216,11 +216,13 @@ public void testAutoCreateIndex() throws Exception { // To begin with, the results index should be auto-created. AsyncExecutionId id = new AsyncExecutionId("0", new TaskId("N/A", 0)); AsyncSearchResponse resp = new AsyncSearchResponse(id.getEncoded(), true, true, 0L, 0L); - { + try { PlainActionFuture future = new PlainActionFuture<>(); indexService.createResponse(id.getDocId(), Collections.emptyMap(), resp, future); future.get(); assertSettings(); + } finally { + resp.decRef(); } // Delete the index, so we can test subsequent auto-create behaviour diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditorTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditorTests.java index db6f303d98ca8..3879a8df0fbe6 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditorTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditorTests.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.core.common.notifications; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.index.IndexRequest; @@ -262,7 +262,7 @@ private TestAuditor createTestAuditorWithoutTemplate(CountDownLatch latch) { threadPool.generic().submit(onPutTemplate); return null; - }).when(client).execute(eq(PutComposableIndexTemplateAction.INSTANCE), any(), any()); + }).when(client).execute(eq(TransportPutComposableIndexTemplateAction.TYPE), any(), any()); IndicesAdminClient indicesAdminClient = mock(IndicesAdminClient.class); AdminClient adminClient = mock(AdminClient.class); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/RolloverStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/RolloverStepTests.java index 3bcc362ecaf2a..1fcfc1fb287c4 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/RolloverStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/RolloverStepTests.java @@ -152,7 +152,7 @@ private void mockClientRolloverCall(String rolloverTarget) { @SuppressWarnings("unchecked") ActionListener listener = (ActionListener) invocation.getArguments()[1]; assertRolloverIndexRequest(request, rolloverTarget); - listener.onResponse(new RolloverResponse(null, null, Collections.emptyMap(), request.isDryRun(), true, true, true)); + listener.onResponse(new RolloverResponse(null, null, Collections.emptyMap(), request.isDryRun(), true, true, true, false)); return null; }).when(indicesClient).rolloverIndex(Mockito.any(), Mockito.any()); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStepTests.java index db1f74d9de0c6..cda1b9940bdbb 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStepTests.java @@ -349,7 +349,7 @@ private void mockRolloverIndexCall(String rolloverTarget, WaitForRolloverReadySt assertRolloverIndexRequest(request, rolloverTarget, expectedConditions); Map conditionResults = expectedConditions.stream() .collect(Collectors.toMap(Condition::toString, condition -> conditionResult)); - listener.onResponse(new RolloverResponse(null, null, conditionResults, request.isDryRun(), false, false, false)); + listener.onResponse(new RolloverResponse(null, null, conditionResults, request.isDryRun(), false, false, false, false)); return null; }).when(indicesClient).rolloverIndex(Mockito.any(), Mockito.any()); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/action/PutLifecycleRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/action/PutLifecycleRequestTests.java index dd37e907f7a9e..ec821aa588c7b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/action/PutLifecycleRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/action/PutLifecycleRequestTests.java @@ -31,14 +31,13 @@ import org.elasticsearch.xpack.core.ilm.TimeseriesLifecycleType; import org.elasticsearch.xpack.core.ilm.UnfollowAction; import org.elasticsearch.xpack.core.ilm.WaitForSnapshotAction; -import org.elasticsearch.xpack.core.ilm.action.PutLifecycleAction.Request; import org.junit.Before; import java.util.ArrayList; import java.util.Arrays; import java.util.List; -public class PutLifecycleRequestTests extends AbstractXContentSerializingTestCase { +public class PutLifecycleRequestTests extends AbstractXContentSerializingTestCase { private String lifecycleName; @@ -48,18 +47,18 @@ public void setup() { } @Override - protected Request createTestInstance() { - return new Request(LifecyclePolicyTests.randomTimeseriesLifecyclePolicy(lifecycleName)); + protected PutLifecycleRequest createTestInstance() { + return new PutLifecycleRequest(LifecyclePolicyTests.randomTimeseriesLifecyclePolicy(lifecycleName)); } @Override - protected Writeable.Reader instanceReader() { - return Request::new; + protected Writeable.Reader instanceReader() { + return PutLifecycleRequest::new; } @Override - protected Request doParseInstance(XContentParser parser) { - return PutLifecycleAction.Request.parseRequest(lifecycleName, parser); + protected PutLifecycleRequest doParseInstance(XContentParser parser) { + return PutLifecycleRequest.parseRequest(lifecycleName, parser); } @Override @@ -125,13 +124,13 @@ protected NamedXContentRegistry xContentRegistry() { } @Override - protected Request mutateInstance(Request request) { + protected PutLifecycleRequest mutateInstance(PutLifecycleRequest request) { String name = randomBoolean() ? lifecycleName : randomAlphaOfLength(5); LifecyclePolicy policy = randomValueOtherThan( request.getPolicy(), () -> LifecyclePolicyTests.randomTimeseriesLifecyclePolicy(name) ); - return new Request(policy); + return new PutLifecycleRequest(policy); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java index 29453205b4d00..76b668a87cff5 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java @@ -7,18 +7,15 @@ package org.elasticsearch.xpack.core.indexing; -import org.apache.lucene.search.TotalHits; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchResponseSections; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ExecutorBuilder; @@ -116,18 +113,24 @@ protected void doNextSearch(long waitTimeInNanos, ActionListener return; } - final SearchResponseSections sections = new SearchResponseSections( - new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), 0), - null, - null, - false, - null, - null, - 1 - ); ActionListener.respondAndRelease( nextPhase, - new SearchResponse(sections, null, 1, 1, 0, 0, ShardSearchFailure.EMPTY_ARRAY, null) + new SearchResponse( + SearchHits.EMPTY_WITH_TOTAL_HITS, + null, + null, + false, + null, + null, + 1, + null, + 1, + 1, + 0, + 0, + ShardSearchFailure.EMPTY_ARRAY, + null + ) ); } @@ -256,15 +259,6 @@ public boolean waitingForLatchCountDown() { @Override protected void doNextSearch(long waitTimeInNanos, ActionListener nextPhase) { ++searchOps; - final SearchResponseSections sections = new SearchResponseSections( - new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), 0), - null, - null, - false, - null, - null, - 1 - ); if (processOps == 3) { awaitForLatch(); @@ -272,7 +266,22 @@ protected void doNextSearch(long waitTimeInNanos, ActionListener ActionListener.respondAndRelease( nextPhase, - new SearchResponse(sections, null, 1, 1, 0, 0, ShardSearchFailure.EMPTY_ARRAY, null) + new SearchResponse( + SearchHits.EMPTY_WITH_TOTAL_HITS, + null, + null, + false, + null, + null, + 1, + null, + 1, + 1, + 0, + 0, + ShardSearchFailure.EMPTY_ARRAY, + null + ) ); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/action/PutInferenceModelActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/action/PutInferenceModelActionTests.java new file mode 100644 index 0000000000000..10f35bf33f631 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/action/PutInferenceModelActionTests.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.inference.action; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.ml.utils.MlStringsTests; +import org.junit.Before; + +import java.util.Locale; + +public class PutInferenceModelActionTests extends ESTestCase { + public static String TASK_TYPE; + public static String MODEL_ID; + public static XContentType X_CONTENT_TYPE; + public static BytesReference BYTES; + + @Before + public void setup() throws Exception { + TASK_TYPE = TaskType.ANY.toString(); + MODEL_ID = randomAlphaOfLengthBetween(1, 10).toLowerCase(Locale.ROOT); + X_CONTENT_TYPE = randomFrom(XContentType.values()); + BYTES = new BytesArray(randomAlphaOfLengthBetween(1, 10)); + } + + public void testValidate() { + // valid model ID + var request = new PutInferenceModelAction.Request(TASK_TYPE, MODEL_ID + "_-0", BYTES, X_CONTENT_TYPE); + ActionRequestValidationException validationException = request.validate(); + assertNull(validationException); + + // invalid model IDs + + var invalidRequest = new PutInferenceModelAction.Request(TASK_TYPE, "", BYTES, X_CONTENT_TYPE); + validationException = invalidRequest.validate(); + assertNotNull(validationException); + + var invalidRequest2 = new PutInferenceModelAction.Request( + TASK_TYPE, + randomAlphaOfLengthBetween(1, 10) + randomFrom(MlStringsTests.SOME_INVALID_CHARS), + BYTES, + X_CONTENT_TYPE + ); + validationException = invalidRequest2.validate(); + assertNotNull(validationException); + + var invalidRequest3 = new PutInferenceModelAction.Request(TASK_TYPE, null, BYTES, X_CONTENT_TYPE); + validationException = invalidRequest3.validate(); + assertNotNull(validationException); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelDefinitionPartActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelDefinitionPartActionRequestTests.java index 4ffa2e27fe60c..ee304f966c9b4 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelDefinitionPartActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelDefinitionPartActionRequestTests.java @@ -72,7 +72,7 @@ protected Writeable.Reader instanceReader() { @Override protected Request mutateInstanceForVersion(Request instance, TransportVersion version) { - if (version.before(TransportVersions.V_8_500_043)) { + if (version.before(TransportVersions.V_8_500_061)) { return new Request( instance.getModelId(), instance.getDefinition(), diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/classification/ClassificationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/classification/ClassificationTests.java index bf00f8763d929..cc101626667b2 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/classification/ClassificationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/classification/ClassificationTests.java @@ -292,8 +292,7 @@ public void testProcess_MultipleMetricsWithDifferentNumberOfSteps() { private static SearchResponse mockSearchResponseWithNonZeroTotalHits() { SearchResponse searchResponse = mock(SearchResponse.class); - SearchHits hits = new SearchHits(SearchHits.EMPTY, new TotalHits(10, TotalHits.Relation.EQUAL_TO), 0); - when(searchResponse.getHits()).thenReturn(hits); + when(searchResponse.getHits()).thenReturn(SearchHits.empty(new TotalHits(10, TotalHits.Relation.EQUAL_TO), 0)); return searchResponse; } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/RoutingInfoTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/RoutingInfoTests.java index 28ebf8b2445c5..830f7dde7c7d8 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/RoutingInfoTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/RoutingInfoTests.java @@ -69,4 +69,17 @@ public void testIsRoutable_GivenStartedWithNonZeroAllocations() { RoutingInfo routingInfo = new RoutingInfo(randomIntBetween(1, 10), 1, RoutingState.STARTED, ""); assertThat(routingInfo.isRoutable(), is(true)); } + + public void testGetFailedAllocations() { + int targetAllocations = randomIntBetween(1, 10); + RoutingInfo routingInfo = new RoutingInfo( + randomIntBetween(0, targetAllocations), + targetAllocations, + randomFrom(RoutingState.STARTING, RoutingState.STARTED, RoutingState.STOPPING), + "" + ); + assertThat(routingInfo.getFailedAllocations(), is(0)); + routingInfo = new RoutingInfo(randomIntBetween(0, targetAllocations), targetAllocations, RoutingState.FAILED, ""); + assertThat(routingInfo.getFailedAllocations(), is(targetAllocations)); + } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentTests.java index 4e6b88d2ff054..75706f3d6a9bf 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentTests.java @@ -25,6 +25,8 @@ import java.util.List; import java.util.stream.Stream; +import static org.elasticsearch.test.hamcrest.OptionalMatchers.isEmpty; +import static org.elasticsearch.test.hamcrest.OptionalMatchers.isPresentWith; import static org.hamcrest.Matchers.arrayContainingInAnyOrder; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.empty; @@ -112,8 +114,8 @@ public void testGetStartedNodes() { public void testCalculateAllocationStatus_GivenNoAllocations() { assertThat( - TrainedModelAssignment.Builder.empty(randomTaskParams(5)).build().calculateAllocationStatus().get(), - equalTo(new AllocationStatus(0, 5)) + TrainedModelAssignment.Builder.empty(randomTaskParams(5)).build().calculateAllocationStatus(), + isPresentWith(new AllocationStatus(0, 5)) ); } @@ -121,7 +123,7 @@ public void testCalculateAllocationStatus_GivenStoppingAssignment() { TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(5)); builder.addRoutingEntry("node-1", new RoutingInfo(1, 2, RoutingState.STARTED, "")); builder.addRoutingEntry("node-2", new RoutingInfo(2, 1, RoutingState.STARTED, "")); - assertThat(builder.stopAssignment("test").build().calculateAllocationStatus().isEmpty(), is(true)); + assertThat(builder.stopAssignment("test").build().calculateAllocationStatus(), isEmpty()); } public void testCalculateAllocationStatus_GivenPartiallyAllocated() { @@ -129,14 +131,14 @@ public void testCalculateAllocationStatus_GivenPartiallyAllocated() { builder.addRoutingEntry("node-1", new RoutingInfo(1, 2, RoutingState.STARTED, "")); builder.addRoutingEntry("node-2", new RoutingInfo(2, 1, RoutingState.STARTED, "")); builder.addRoutingEntry("node-3", new RoutingInfo(3, 3, RoutingState.STARTING, "")); - assertThat(builder.build().calculateAllocationStatus().get(), equalTo(new AllocationStatus(3, 5))); + assertThat(builder.build().calculateAllocationStatus(), isPresentWith(new AllocationStatus(3, 5))); } public void testCalculateAllocationStatus_GivenFullyAllocated() { TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(5)); builder.addRoutingEntry("node-1", new RoutingInfo(4, 4, RoutingState.STARTED, "")); builder.addRoutingEntry("node-2", new RoutingInfo(1, 1, RoutingState.STARTED, "")); - assertThat(builder.build().calculateAllocationStatus().get(), equalTo(new AllocationStatus(5, 5))); + assertThat(builder.build().calculateAllocationStatus(), isPresentWith(new AllocationStatus(5, 5))); } public void testCalculateAssignmentState_GivenNoStartedAssignments() { @@ -179,8 +181,7 @@ public void testselectRandomStartedNodeWeighedOnAllocationsForNRequests_GivenSin var nodes = assignment.selectRandomStartedNodesWeighedOnAllocationsForNRequests(1, RoutingState.STARTED); - assertThat(nodes, hasSize(1)); - assertThat(nodes.get(0), equalTo(new Tuple<>("node-1", 1))); + assertThat(nodes, contains(new Tuple<>("node-1", 1))); } public void testselectRandomStartedNodeWeighedOnAllocationsForNRequests_GivenAShuttingDownRoute_ItReturnsNoNodes() { @@ -200,8 +201,7 @@ public void testselectRandomStartedNodeWeighedOnAllocationsForNRequests_GivenASh var nodes = assignment.selectRandomStartedNodesWeighedOnAllocationsForNRequests(1, RoutingState.STOPPING); - assertThat(nodes, hasSize(1)); - assertThat(nodes.get(0), equalTo(new Tuple<>("node-1", 1))); + assertThat(nodes, contains(new Tuple<>("node-1", 1))); } public void testSingleRequestWith2Nodes() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java index 880b62689dee2..4fff2804f9350 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java @@ -48,6 +48,7 @@ import java.util.Map; import java.util.Set; +import static org.elasticsearch.test.hamcrest.OptionalMatchers.isEmpty; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -108,12 +109,14 @@ public void testToXContentForInternalStorage() throws IOException { ToXContent.MapParams params = new ToXContent.MapParams(Collections.singletonMap(ToXContentParams.FOR_INTERNAL_STORAGE, "true")); BytesReference serializedJob = XContentHelper.toXContent(config, XContentType.JSON, params, false); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(XContentParserConfiguration.EMPTY.withRegistry(xContentRegistry()), serializedJob.streamInput()); - - Job parsedConfig = Job.LENIENT_PARSER.apply(parser, null).build(); - // When we are writing for internal storage, we do not include the datafeed config - assertThat(parsedConfig.getDatafeedConfig().isPresent(), is(false)); + try ( + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(XContentParserConfiguration.EMPTY.withRegistry(xContentRegistry()), serializedJob.streamInput()) + ) { + Job parsedConfig = Job.LENIENT_PARSER.apply(parser, null).build(); + // When we are writing for internal storage, we do not include the datafeed config + assertThat(parsedConfig.getDatafeedConfig(), isEmpty()); + } } public void testFutureConfigParse() throws IOException { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java index e1a9b20c048c4..6ba7dc6ac24cd 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java @@ -7,8 +7,8 @@ package org.elasticsearch.xpack.core.ml.job.persistence; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.action.admin.indices.mapping.put.TransportPutMappingAction; import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.MasterNodeRequest; @@ -288,7 +288,7 @@ public void testAddDocMappingIfMissing() { ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; listener.onResponse(AcknowledgedResponse.TRUE); return null; - }).when(client).execute(eq(PutMappingAction.INSTANCE), any(), any(ActionListener.class)); + }).when(client).execute(eq(TransportPutMappingAction.TYPE), any(), any(ActionListener.class)); ClusterState clusterState = getClusterStateWithMappingsWithMetadata(Collections.singletonMap("index-name", "0.0")); ElasticsearchMappings.addDocMappingIfMissing( @@ -304,7 +304,7 @@ public void testAddDocMappingIfMissing() { ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(PutMappingRequest.class); verify(client).threadPool(); - verify(client).execute(eq(PutMappingAction.INSTANCE), requestCaptor.capture(), any(ActionListener.class)); + verify(client).execute(eq(TransportPutMappingAction.TYPE), requestCaptor.capture(), any(ActionListener.class)); verifyNoMoreInteractions(client); PutMappingRequest request = requestCaptor.getValue(); @@ -362,10 +362,9 @@ private Set collectResultsDocFieldNames() throws IOException { private Set collectFieldNames(String mapping) throws IOException { BufferedInputStream inputStream = new BufferedInputStream(new ByteArrayInputStream(mapping.getBytes(StandardCharsets.UTF_8))); - XContentParser parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, inputStream); Set fieldNames = new HashSet<>(); boolean isAfterPropertiesStart = false; - try { + try (XContentParser parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, inputStream)) { XContentParser.Token token = parser.nextToken(); while (token != null) { switch (token) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAliasTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAliasTests.java index db18752cb91b7..e7dcc6b441a31 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAliasTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAliasTests.java @@ -16,7 +16,7 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.client.internal.AdminClient; @@ -121,8 +121,8 @@ public void setUpMocks() { return null; }).when(client) .execute( - any(PutComposableIndexTemplateAction.class), - any(PutComposableIndexTemplateAction.Request.class), + same(TransportPutComposableIndexTemplateAction.TYPE), + any(TransportPutComposableIndexTemplateAction.Request.class), any(ActionListener.class) ); @@ -173,7 +173,7 @@ public void testInstallIndexTemplateIfRequired_GivenLegacyTemplateExistsAndModer ); InOrder inOrder = inOrder(client, listener); inOrder.verify(listener).delegateFailureAndWrap(any()); - inOrder.verify(client).execute(same(PutComposableIndexTemplateAction.INSTANCE), any(), any()); + inOrder.verify(client).execute(same(TransportPutComposableIndexTemplateAction.TYPE), any(), any()); inOrder.verify(listener).onResponse(true); } @@ -239,7 +239,7 @@ public void testInstallIndexTemplateIfRequired() { ); InOrder inOrder = inOrder(client, listener); inOrder.verify(listener).delegateFailureAndWrap(any()); - inOrder.verify(client).execute(same(PutComposableIndexTemplateAction.INSTANCE), any(), any()); + inOrder.verify(client).execute(same(TransportPutComposableIndexTemplateAction.TYPE), any(), any()); inOrder.verify(listener).onResponse(true); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/MlStringsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/utils/MlStringsTests.java similarity index 87% rename from x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/MlStringsTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/utils/MlStringsTests.java index fb60ac39bdef1..04681fe6e0cd0 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/MlStringsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/utils/MlStringsTests.java @@ -4,10 +4,9 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.xpack.ml.utils; +package org.elasticsearch.xpack.core.ml.utils; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.core.ml.utils.MlStrings; import java.util.Arrays; import java.util.Collections; @@ -22,6 +21,37 @@ public class MlStringsTests extends ESTestCase { + public static final String[] SOME_INVALID_CHARS = { + "%", + " ", + "!", + "@", + "#", + "$", + "^", + "&", + "*", + "(", + ")", + "+", + "=", + "{", + "}", + "[", + "]", + "|", + "\\", + ":", + ";", + "\"", + "'", + "<", + ">", + ",", + "?", + "/", + "~" }; + public void testDoubleQuoteIfNotAlphaNumeric() { assertEquals("foo2", MlStrings.doubleQuoteIfNotAlphaNumeric("foo2")); assertEquals("\"fo o\"", MlStrings.doubleQuoteIfNotAlphaNumeric("fo o")); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/action/RollupSearchActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/action/RollupSearchActionTests.java index 830f0c09506e6..91ed19cb5389c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/action/RollupSearchActionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/action/RollupSearchActionTests.java @@ -11,9 +11,11 @@ import org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege; import org.elasticsearch.xpack.core.security.support.Automatons; +import static org.elasticsearch.test.LambdaMatchers.trueWith; + public class RollupSearchActionTests extends ESTestCase { public void testIndexReadPrivilegeCanPerformRollupSearchAction() { - assertTrue(Automatons.predicate(IndexPrivilege.READ.getAutomaton()).test(RollupSearchAction.NAME)); + assertThat(Automatons.predicate(IndexPrivilege.READ.getAutomaton()), trueWith(RollupSearchAction.NAME)); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKeyTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKeyTests.java index 9e357915186a5..02bce50ed3483 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKeyTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKeyTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.XContentTestUtils; import org.elasticsearch.xcontent.ToXContent; @@ -152,6 +153,10 @@ public static Map randomMetadata() { return randomMetadata == null ? new HashMap<>() : new HashMap<>(randomMetadata); } + public static TimeValue randomFutureExpirationTime() { + return TimeValue.parseTimeValue(randomTimeValue(10, 20, "d", "h", "s", "m"), "expiration"); + } + public static ApiKey randomApiKeyInstance() { final String name = randomAlphaOfLengthBetween(4, 10); final String id = randomAlphaOfLength(20); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyRequestSerializationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyRequestSerializationTests.java new file mode 100644 index 0000000000000..08e5266181298 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyRequestSerializationTests.java @@ -0,0 +1,71 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.action.apikey; + +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.nullValue; + +public class BulkUpdateApiKeyRequestSerializationTests extends AbstractWireSerializingTestCase { + public void testSerializationBackwardsCompatibility() throws IOException { + BulkUpdateApiKeyRequest testInstance = createTestInstance(); + BulkUpdateApiKeyRequest deserializedInstance = copyInstance(testInstance, TransportVersions.V_8_500_064); + try { + // Transport is on a version before expiration was introduced, so should always be null + assertThat(deserializedInstance.getExpiration(), nullValue()); + } finally { + dispose(deserializedInstance); + } + } + + @Override + protected BulkUpdateApiKeyRequest createTestInstance() { + final boolean roleDescriptorsPresent = randomBoolean(); + final List descriptorList; + if (roleDescriptorsPresent == false) { + descriptorList = null; + } else { + final int numDescriptors = randomIntBetween(0, 4); + descriptorList = new ArrayList<>(); + for (int i = 0; i < numDescriptors; i++) { + descriptorList.add(new RoleDescriptor("role_" + i, new String[] { "all" }, null, null)); + } + } + + final var ids = randomList(randomInt(5), () -> randomAlphaOfLength(10)); + final var metadata = ApiKeyTests.randomMetadata(); + final TimeValue expiration = ApiKeyTests.randomFutureExpirationTime(); + return new BulkUpdateApiKeyRequest(ids, descriptorList, metadata, expiration); + } + + @Override + protected Writeable.Reader instanceReader() { + return BulkUpdateApiKeyRequest::new; + } + + @Override + protected BulkUpdateApiKeyRequest mutateInstance(BulkUpdateApiKeyRequest instance) throws IOException { + Map metadata = ApiKeyTests.randomMetadata(); + long days = randomValueOtherThan(instance.getExpiration().days(), () -> ApiKeyTests.randomFutureExpirationTime().getDays()); + return new BulkUpdateApiKeyRequest( + instance.getIds(), + instance.getRoleDescriptors(), + metadata, + TimeValue.parseTimeValue(days + "d", null, "expiration") + ); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyRequestTests.java index 8a0f384daedaa..583b336b3f6eb 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyRequestTests.java @@ -8,13 +8,10 @@ package org.elasticsearch.xpack.core.security.action.apikey; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; -import java.io.IOException; -import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -23,42 +20,13 @@ import static org.hamcrest.Matchers.equalTo; public class BulkUpdateApiKeyRequestTests extends ESTestCase { - - public void testSerialization() throws IOException { - final boolean roleDescriptorsPresent = randomBoolean(); - final List descriptorList; - if (roleDescriptorsPresent == false) { - descriptorList = null; - } else { - final int numDescriptors = randomIntBetween(0, 4); - descriptorList = new ArrayList<>(); - for (int i = 0; i < numDescriptors; i++) { - descriptorList.add(new RoleDescriptor("role_" + i, new String[] { "all" }, null, null)); - } - } - - final List ids = randomList(1, 5, () -> randomAlphaOfLength(10)); - final Map metadata = ApiKeyTests.randomMetadata(); - final var request = new BulkUpdateApiKeyRequest(ids, descriptorList, metadata); - - try (BytesStreamOutput out = new BytesStreamOutput()) { - request.writeTo(out); - try (StreamInput in = out.bytes().streamInput()) { - final var serialized = new BulkUpdateApiKeyRequest(in); - assertEquals(ids, serialized.getIds()); - assertEquals(descriptorList, serialized.getRoleDescriptors()); - assertEquals(metadata, request.getMetadata()); - } - } - } - public void testNullValuesValidForNonIds() { final var request = BulkUpdateApiKeyRequest.usingApiKeyIds("id"); assertNull(request.validate()); } public void testEmptyIdsNotValid() { - final var request = new BulkUpdateApiKeyRequest(List.of(), null, null); + final var request = new BulkUpdateApiKeyRequest(List.of(), null, null, null); final ActionRequestValidationException ve = request.validate(); assertNotNull(ve); assertThat(ve.validationErrors().size(), equalTo(1)); @@ -68,10 +36,12 @@ public void testEmptyIdsNotValid() { public void testMetadataKeyValidation() { final var reservedKey = "_" + randomAlphaOfLengthBetween(0, 10); final var metadataValue = randomAlphaOfLengthBetween(1, 10); + final TimeValue expiration = ApiKeyTests.randomFutureExpirationTime(); final var request = new BulkUpdateApiKeyRequest( randomList(1, 5, () -> randomAlphaOfLength(10)), null, - Map.of(reservedKey, metadataValue) + Map.of(reservedKey, metadataValue), + expiration ); final ActionRequestValidationException ve = request.validate(); assertNotNull(ve); @@ -103,6 +73,7 @@ public void testRoleDescriptorValidation() { new RoleDescriptor.Restriction(unknownWorkflows) ) ), + null, null ); final ActionRequestValidationException ve = request.validate(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateApiKeyRequestSerializationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateApiKeyRequestSerializationTests.java new file mode 100644 index 0000000000000..be1e69d4d30e8 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateApiKeyRequestSerializationTests.java @@ -0,0 +1,72 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.action.apikey; + +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.nullValue; + +public class UpdateApiKeyRequestSerializationTests extends AbstractWireSerializingTestCase { + public void testSerializationBackwardsCompatibility() throws IOException { + UpdateApiKeyRequest testInstance = createTestInstance(); + UpdateApiKeyRequest deserializedInstance = copyInstance(testInstance, TransportVersions.V_8_500_064); + try { + // Transport is on a version before expiration was introduced, so should always be null + assertThat(deserializedInstance.getExpiration(), nullValue()); + } finally { + dispose(deserializedInstance); + } + } + + @Override + protected UpdateApiKeyRequest createTestInstance() { + final boolean roleDescriptorsPresent = randomBoolean(); + final List descriptorList; + if (roleDescriptorsPresent == false) { + descriptorList = null; + } else { + final int numDescriptors = randomIntBetween(0, 4); + descriptorList = new ArrayList<>(); + for (int i = 0; i < numDescriptors; i++) { + descriptorList.add(new RoleDescriptor("role_" + i, new String[] { "all" }, null, null)); + } + } + + final var id = randomAlphaOfLength(10); + final var metadata = ApiKeyTests.randomMetadata(); + final TimeValue expiration = ApiKeyTests.randomFutureExpirationTime(); + return new UpdateApiKeyRequest(id, descriptorList, metadata, expiration); + } + + @Override + protected Writeable.Reader instanceReader() { + return UpdateApiKeyRequest::new; + } + + @Override + protected UpdateApiKeyRequest mutateInstance(UpdateApiKeyRequest instance) throws IOException { + Map metadata = ApiKeyTests.randomMetadata(); + long days = randomValueOtherThan(instance.getExpiration().days(), () -> ApiKeyTests.randomFutureExpirationTime().getDays()); + return new UpdateApiKeyRequest( + instance.getId(), + instance.getRoleDescriptors(), + metadata, + TimeValue.parseTimeValue(days + "d", null, "expiration") + ); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateApiKeyRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateApiKeyRequestTests.java index cf4015f6fd4cc..7b85c71c7519f 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateApiKeyRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateApiKeyRequestTests.java @@ -8,13 +8,10 @@ package org.elasticsearch.xpack.core.security.action.apikey; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.restriction.WorkflowResolver; -import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -22,49 +19,19 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.containsStringIgnoringCase; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; public class UpdateApiKeyRequestTests extends ESTestCase { public void testNullValuesValidForNonIds() { - final var request = new UpdateApiKeyRequest("id", null, null); + final var request = new UpdateApiKeyRequest("id", null, null, null); assertNull(request.validate()); } - public void testSerialization() throws IOException { - final boolean roleDescriptorsPresent = randomBoolean(); - final List descriptorList; - if (roleDescriptorsPresent == false) { - descriptorList = null; - } else { - final int numDescriptors = randomIntBetween(0, 4); - descriptorList = new ArrayList<>(); - for (int i = 0; i < numDescriptors; i++) { - descriptorList.add(new RoleDescriptor("role_" + i, new String[] { "all" }, null, null)); - } - } - - final var id = randomAlphaOfLength(10); - final var metadata = ApiKeyTests.randomMetadata(); - final var request = new UpdateApiKeyRequest(id, descriptorList, metadata); - assertThat(request.getType(), is(ApiKey.Type.REST)); - - try (BytesStreamOutput out = new BytesStreamOutput()) { - request.writeTo(out); - try (StreamInput in = out.bytes().streamInput()) { - final var serialized = new UpdateApiKeyRequest(in); - assertEquals(id, serialized.getId()); - assertEquals(descriptorList, serialized.getRoleDescriptors()); - assertEquals(metadata, serialized.getMetadata()); - assertEquals(request.getType(), serialized.getType()); - } - } - } - public void testMetadataKeyValidation() { final var reservedKey = "_" + randomAlphaOfLengthBetween(0, 10); final var metadataValue = randomAlphaOfLengthBetween(1, 10); - UpdateApiKeyRequest request = new UpdateApiKeyRequest(randomAlphaOfLength(10), null, Map.of(reservedKey, metadataValue)); + + UpdateApiKeyRequest request = new UpdateApiKeyRequest(randomAlphaOfLength(10), null, Map.of(reservedKey, metadataValue), null); final ActionRequestValidationException ve = request.validate(); assertNotNull(ve); assertThat(ve.validationErrors().size(), equalTo(1)); @@ -98,6 +65,7 @@ public void testRoleDescriptorValidation() { new RoleDescriptor.Restriction(workflows.toArray(String[]::new)) ) ), + null, null ); final ActionRequestValidationException ve1 = request1.validate(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateCrossClusterApiKeyRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateCrossClusterApiKeyRequestTests.java index 89a6f5b650b5a..f9faa2731dcc0 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateCrossClusterApiKeyRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/UpdateCrossClusterApiKeyRequestTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -25,15 +26,15 @@ public class UpdateCrossClusterApiKeyRequestTests extends ESTestCase { public void testSerialization() throws IOException { final var metadata = ApiKeyTests.randomMetadata(); - + final TimeValue expiration = ApiKeyTests.randomFutureExpirationTime(); final CrossClusterApiKeyRoleDescriptorBuilder roleDescriptorBuilder; - if (metadata == null || randomBoolean()) { + if (randomBoolean()) { roleDescriptorBuilder = CrossClusterApiKeyRoleDescriptorBuilder.parse(randomCrossClusterApiKeyAccessField()); } else { roleDescriptorBuilder = null; } - final var request = new UpdateCrossClusterApiKeyRequest(randomAlphaOfLength(10), roleDescriptorBuilder, metadata); + final var request = new UpdateCrossClusterApiKeyRequest(randomAlphaOfLength(10), roleDescriptorBuilder, metadata, expiration); assertThat(request.getType(), is(ApiKey.Type.CROSS_CLUSTER)); assertThat(request.validate(), nullValue()); @@ -44,13 +45,14 @@ public void testSerialization() throws IOException { assertEquals(request.getId(), serialized.getId()); assertEquals(request.getRoleDescriptors(), serialized.getRoleDescriptors()); assertEquals(metadata, serialized.getMetadata()); + assertEquals(expiration, serialized.getExpiration()); assertEquals(request.getType(), serialized.getType()); } } } public void testNotEmptyUpdateValidation() { - final var request = new UpdateCrossClusterApiKeyRequest(randomAlphaOfLength(10), null, null); + final var request = new UpdateCrossClusterApiKeyRequest(randomAlphaOfLength(10), null, null, null); final ActionRequestValidationException ve = request.validate(); assertThat(ve, notNullValue()); assertThat(ve.validationErrors(), contains("must update either [access] or [metadata] for cross-cluster API keys")); @@ -59,7 +61,7 @@ public void testNotEmptyUpdateValidation() { public void testMetadataKeyValidation() { final var reservedKey = "_" + randomAlphaOfLengthBetween(0, 10); final var metadataValue = randomAlphaOfLengthBetween(1, 10); - final var request = new UpdateCrossClusterApiKeyRequest(randomAlphaOfLength(10), null, Map.of(reservedKey, metadataValue)); + final var request = new UpdateCrossClusterApiKeyRequest(randomAlphaOfLength(10), null, Map.of(reservedKey, metadataValue), null); final ActionRequestValidationException ve = request.validate(); assertThat(ve, notNullValue()); assertThat(ve.validationErrors(), contains("API key metadata keys may not start with [_]")); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleRestrictionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleRestrictionTests.java index fe64192cb0601..244e21f3f036c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleRestrictionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleRestrictionTests.java @@ -72,9 +72,10 @@ public void testToXContent() throws Exception { final Restriction restriction = randomWorkflowsRestriction(1, 5); final XContentType xContentType = randomFrom(XContentType.values()); final BytesReference xContentValue = toShuffledXContent(restriction, xContentType, ToXContent.EMPTY_PARAMS, false); - final XContentParser parser = xContentType.xContent().createParser(XContentParserConfiguration.EMPTY, xContentValue.streamInput()); - final Restriction parsed = Restriction.parse(randomAlphaOfLengthBetween(3, 6), parser); - assertThat(parsed, equalTo(restriction)); + try (XContentParser parser = xContentType.xContent().createParser(XContentParserConfiguration.EMPTY, xContentValue.streamInput())) { + final Restriction parsed = Restriction.parse(randomAlphaOfLengthBetween(3, 6), parser); + assertThat(parsed, equalTo(restriction)); + } } public void testSerialization() throws IOException { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageOwnApiKeyClusterPrivilegeTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageOwnApiKeyClusterPrivilegeTests.java index 22e6a6f005919..66ed55fadb734 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageOwnApiKeyClusterPrivilegeTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageOwnApiKeyClusterPrivilegeTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.xpack.core.security.action.apikey.ApiKeyTests; import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyRequest; import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequest; import org.elasticsearch.xpack.core.security.action.apikey.CreateCrossClusterApiKeyAction; @@ -88,7 +89,7 @@ public void testAuthenticationForBulkUpdateApiKeyAllowsAll() { .build(); final List apiKeyIds = randomList(1, 5, () -> randomAlphaOfLengthBetween(4, 7)); final Authentication authentication = AuthenticationTestHelper.builder().build(); - final TransportRequest bulkUpdateApiKeyRequest = new BulkUpdateApiKeyRequest(apiKeyIds, null, null); + final TransportRequest bulkUpdateApiKeyRequest = new BulkUpdateApiKeyRequest(apiKeyIds, null, null, null); assertTrue(clusterPermission.check("cluster:admin/xpack/security/api_key/update", bulkUpdateApiKeyRequest, authentication)); } @@ -315,7 +316,8 @@ public void testCheckUpdateCrossClusterApiKeyRequestDenied() { final UpdateCrossClusterApiKeyRequest request = new UpdateCrossClusterApiKeyRequest( randomAlphaOfLengthBetween(4, 7), null, - Map.of() + Map.of(), + ApiKeyTests.randomFutureExpirationTime() ); assertFalse(clusterPermission.check(UpdateCrossClusterApiKeyAction.NAME, request, AuthenticationTestHelper.builder().build())); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java index 2e2368ece0612..bddc30b8d7b83 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java @@ -14,7 +14,7 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; import org.elasticsearch.action.admin.cluster.stats.ClusterStatsAction; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesAction; -import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutIndexTemplateAction; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.TransportRequest; @@ -306,7 +306,7 @@ public void testReadSecurityPrivilege() { ClusterStatsAction.NAME, NodeEnrollmentAction.NAME, KibanaEnrollmentAction.NAME, - PutIndexTemplateAction.NAME, + TransportPutIndexTemplateAction.TYPE.name(), GetIndexTemplatesAction.NAME, ClusterRerouteAction.NAME, ClusterUpdateSettingsAction.NAME, @@ -356,7 +356,7 @@ public void testManageUserProfilePrivilege() { TransportClusterHealthAction.NAME, ClusterStateAction.NAME, ClusterStatsAction.NAME, - PutIndexTemplateAction.NAME, + TransportPutIndexTemplateAction.TYPE.name(), GetIndexTemplatesAction.NAME, ClusterRerouteAction.NAME, ClusterUpdateSettingsAction.NAME diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index 831dc58e14003..5e190f72c596c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -27,18 +27,18 @@ import org.elasticsearch.action.admin.indices.get.GetIndexAction; import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsAction; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsAction; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; +import org.elasticsearch.action.admin.indices.mapping.put.TransportPutMappingAction; import org.elasticsearch.action.admin.indices.recovery.RecoveryAction; import org.elasticsearch.action.admin.indices.resolve.ResolveIndexAction; import org.elasticsearch.action.admin.indices.rollover.RolloverAction; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsAction; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsAction; -import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction; -import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresAction; +import org.elasticsearch.action.admin.indices.settings.put.TransportUpdateSettingsAction; +import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; -import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.delete.TransportDeleteIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesAction; -import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutIndexTemplateAction; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryAction; import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.datastreams.CreateDataStreamAction; @@ -48,9 +48,9 @@ import org.elasticsearch.action.fieldcaps.TransportFieldCapabilitiesAction; import org.elasticsearch.action.get.TransportGetAction; import org.elasticsearch.action.index.TransportIndexAction; -import org.elasticsearch.action.ingest.DeletePipelineAction; +import org.elasticsearch.action.ingest.DeletePipelineTransportAction; import org.elasticsearch.action.ingest.GetPipelineAction; -import org.elasticsearch.action.ingest.PutPipelineAction; +import org.elasticsearch.action.ingest.PutPipelineTransportAction; import org.elasticsearch.action.ingest.SimulatePipelineAction; import org.elasticsearch.action.search.TransportMultiSearchAction; import org.elasticsearch.action.search.TransportSearchAction; @@ -74,11 +74,8 @@ import org.elasticsearch.xpack.core.ilm.action.ExplainLifecycleAction; import org.elasticsearch.xpack.core.ilm.action.GetLifecycleAction; import org.elasticsearch.xpack.core.ilm.action.GetStatusAction; -import org.elasticsearch.xpack.core.ilm.action.MoveToStepAction; -import org.elasticsearch.xpack.core.ilm.action.PutLifecycleAction; +import org.elasticsearch.xpack.core.ilm.action.ILMActions; import org.elasticsearch.xpack.core.ilm.action.RemoveIndexLifecyclePolicyAction; -import org.elasticsearch.xpack.core.ilm.action.StartILMAction; -import org.elasticsearch.xpack.core.ilm.action.StopILMAction; import org.elasticsearch.xpack.core.ml.MlConfigIndex; import org.elasticsearch.xpack.core.ml.MlMetaIndex; import org.elasticsearch.xpack.core.ml.action.CloseJobAction; @@ -310,10 +307,10 @@ public void testSnapshotUserRole() { assertThat(snapshotUserRole.cluster().check(TransportPutRepositoryAction.TYPE.name(), request, authentication), is(false)); assertThat(snapshotUserRole.cluster().check(GetIndexTemplatesAction.NAME, request, authentication), is(false)); - assertThat(snapshotUserRole.cluster().check(DeleteIndexTemplateAction.NAME, request, authentication), is(false)); - assertThat(snapshotUserRole.cluster().check(PutPipelineAction.NAME, request, authentication), is(false)); + assertThat(snapshotUserRole.cluster().check(TransportDeleteIndexTemplateAction.TYPE.name(), request, authentication), is(false)); + assertThat(snapshotUserRole.cluster().check(PutPipelineTransportAction.TYPE.name(), request, authentication), is(false)); assertThat(snapshotUserRole.cluster().check(GetPipelineAction.NAME, request, authentication), is(false)); - assertThat(snapshotUserRole.cluster().check(DeletePipelineAction.NAME, request, authentication), is(false)); + assertThat(snapshotUserRole.cluster().check(DeletePipelineTransportAction.TYPE.name(), request, authentication), is(false)); assertThat(snapshotUserRole.cluster().check(ClusterRerouteAction.NAME, request, authentication), is(false)); assertThat(snapshotUserRole.cluster().check(ClusterUpdateSettingsAction.NAME, request, authentication), is(false)); assertThat(snapshotUserRole.cluster().check(MonitoringBulkAction.NAME, request, authentication), is(false)); @@ -381,12 +378,12 @@ public void testIngestAdminRole() { assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); Role ingestAdminRole = Role.buildFromRoleDescriptor(roleDescriptor, new FieldPermissionsCache(Settings.EMPTY), RESTRICTED_INDICES); - assertThat(ingestAdminRole.cluster().check(PutIndexTemplateAction.NAME, request, authentication), is(true)); + assertThat(ingestAdminRole.cluster().check(TransportPutIndexTemplateAction.TYPE.name(), request, authentication), is(true)); assertThat(ingestAdminRole.cluster().check(GetIndexTemplatesAction.NAME, request, authentication), is(true)); - assertThat(ingestAdminRole.cluster().check(DeleteIndexTemplateAction.NAME, request, authentication), is(true)); - assertThat(ingestAdminRole.cluster().check(PutPipelineAction.NAME, request, authentication), is(true)); + assertThat(ingestAdminRole.cluster().check(TransportDeleteIndexTemplateAction.TYPE.name(), request, authentication), is(true)); + assertThat(ingestAdminRole.cluster().check(PutPipelineTransportAction.TYPE.name(), request, authentication), is(true)); assertThat(ingestAdminRole.cluster().check(GetPipelineAction.NAME, request, authentication), is(true)); - assertThat(ingestAdminRole.cluster().check(DeletePipelineAction.NAME, request, authentication), is(true)); + assertThat(ingestAdminRole.cluster().check(DeletePipelineTransportAction.TYPE.name(), request, authentication), is(true)); assertThat(ingestAdminRole.cluster().check(ClusterRerouteAction.NAME, request, authentication), is(false)); assertThat(ingestAdminRole.cluster().check(ClusterUpdateSettingsAction.NAME, request, authentication), is(false)); assertThat(ingestAdminRole.cluster().check(MonitoringBulkAction.NAME, request, authentication), is(false)); @@ -428,7 +425,7 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.cluster().check(TransportClusterHealthAction.NAME, request, authentication), is(true)); assertThat(kibanaRole.cluster().check(ClusterStateAction.NAME, request, authentication), is(true)); assertThat(kibanaRole.cluster().check(ClusterStatsAction.NAME, request, authentication), is(true)); - assertThat(kibanaRole.cluster().check(PutIndexTemplateAction.NAME, request, authentication), is(true)); + assertThat(kibanaRole.cluster().check(TransportPutIndexTemplateAction.TYPE.name(), request, authentication), is(true)); assertThat(kibanaRole.cluster().check(GetIndexTemplatesAction.NAME, request, authentication), is(true)); assertThat(kibanaRole.cluster().check(ClusterRerouteAction.NAME, request, authentication), is(false)); assertThat(kibanaRole.cluster().check(ClusterUpdateSettingsAction.NAME, request, authentication), is(false)); @@ -626,7 +623,10 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(mockIndexAbstraction(index)), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportIndexAction.NAME).test(mockIndexAbstraction(index)), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportDeleteAction.NAME).test(mockIndexAbstraction(index)), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(mockIndexAbstraction(index)), is(false)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(mockIndexAbstraction(index)), + is(false) + ); assertThat( kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), is(true) @@ -657,7 +657,10 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(mockIndexAbstraction(index)), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportIndexAction.NAME).test(mockIndexAbstraction(index)), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportDeleteAction.NAME).test(mockIndexAbstraction(index)), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(mockIndexAbstraction(index)), is(false)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(mockIndexAbstraction(index)), + is(false) + ); assertThat( kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), is(true) @@ -688,7 +691,10 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(mockIndexAbstraction(index)), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportIndexAction.NAME).test(mockIndexAbstraction(index)), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportDeleteAction.NAME).test(mockIndexAbstraction(index)), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(mockIndexAbstraction(index)), is(false)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(mockIndexAbstraction(index)), + is(false) + ); assertThat( kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), is(true) @@ -716,7 +722,10 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(mockIndexAbstraction(index)), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportIndexAction.NAME).test(mockIndexAbstraction(index)), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportDeleteAction.NAME).test(mockIndexAbstraction(index)), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(mockIndexAbstraction(index)), is(false)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(mockIndexAbstraction(index)), + is(false) + ); assertThat( kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), is(true) @@ -783,8 +792,14 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(mockIndexAbstraction(index)), is(false)); // Privileges needed for Fleet package upgrades - assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(mockIndexAbstraction(index)), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(PutMappingAction.NAME).test(mockIndexAbstraction(index)), is(true)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(mockIndexAbstraction(index)), + is(true) + ); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportPutMappingAction.TYPE.name()).test(mockIndexAbstraction(index)), + is(true) + ); assertThat(kibanaRole.indices().allowedIndicesMatcher(RolloverAction.NAME).test(mockIndexAbstraction(index)), is(true)); // Privileges needed for installing current ILM policy with delete action assertThat( @@ -810,8 +825,11 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportGetAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(indexAbstraction), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(indexAbstraction), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(PutMappingAction.NAME).test(indexAbstraction), is(true)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(indexAbstraction), + is(true) + ); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportPutMappingAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(RolloverAction.NAME).test(indexAbstraction), is(true)); }); @@ -832,8 +850,11 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportGetAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(indexAbstraction), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(indexAbstraction), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(PutMappingAction.NAME).test(indexAbstraction), is(true)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(indexAbstraction), + is(true) + ); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportPutMappingAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(RolloverAction.NAME).test(indexAbstraction), is(true)); }); @@ -860,7 +881,10 @@ public void testKibanaSystemRole() { is(false) ); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportGetAction.TYPE.name()).test(dotFleetSecretsIndex), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(dotFleetSecretsIndex), is(false)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(dotFleetSecretsIndex), + is(false) + ); assertThat(kibanaRole.cluster().check("cluster:admin/fleet/secrets/get", request, authentication), is(false)); assertThat(kibanaRole.cluster().check("cluster:admin/fleet/secrets/post", request, authentication), is(true)); @@ -899,14 +923,17 @@ public void testKibanaSystemRole() { ); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetIndexAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(indexAbstraction), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportIndexAction.NAME).test(indexAbstraction), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportDeleteAction.NAME).test(indexAbstraction), is(false)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportIndexAction.NAME).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportDeleteAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportGetAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(indexAbstraction), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(indexAbstraction), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(PutMappingAction.NAME).test(indexAbstraction), is(true)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(indexAbstraction), + is(true) + ); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportPutMappingAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(RolloverAction.NAME).test(indexAbstraction), is(true)); }); @@ -926,8 +953,11 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportGetAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(indexAbstraction), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(indexAbstraction), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(PutMappingAction.NAME).test(indexAbstraction), is(true)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(indexAbstraction), + is(true) + ); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportPutMappingAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(RolloverAction.NAME).test(indexAbstraction), is(true)); }); @@ -948,8 +978,11 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportGetAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(indexAbstraction), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(indexAbstraction), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(PutMappingAction.NAME).test(indexAbstraction), is(true)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(indexAbstraction), + is(true) + ); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportPutMappingAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(RolloverAction.NAME).test(indexAbstraction), is(true)); }); @@ -970,8 +1003,11 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportGetAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(indexAbstraction), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(indexAbstraction), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(PutMappingAction.NAME).test(indexAbstraction), is(true)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(indexAbstraction), + is(true) + ); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportPutMappingAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(RolloverAction.NAME).test(indexAbstraction), is(true)); }); @@ -990,7 +1026,10 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(mockIndexAbstraction(index)), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportIndexAction.NAME).test(mockIndexAbstraction(index)), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportDeleteAction.NAME).test(mockIndexAbstraction(index)), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(mockIndexAbstraction(index)), is(false)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(mockIndexAbstraction(index)), + is(false) + ); assertThat( kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), is(false) @@ -1045,8 +1084,11 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportGetAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(indexAbstraction), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(indexAbstraction), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(PutMappingAction.NAME).test(indexAbstraction), is(true)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(indexAbstraction), + is(true) + ); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportPutMappingAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(RolloverAction.NAME).test(indexAbstraction), is(true)); }); @@ -1067,8 +1109,11 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportGetAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(indexAbstraction), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(indexAbstraction), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(PutMappingAction.NAME).test(indexAbstraction), is(true)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(indexAbstraction), + is(true) + ); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportPutMappingAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(RolloverAction.NAME).test(indexAbstraction), is(true)); }); @@ -1083,7 +1128,10 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(mockIndexAbstraction(index)), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportIndexAction.NAME).test(mockIndexAbstraction(index)), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportDeleteAction.NAME).test(mockIndexAbstraction(index)), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(mockIndexAbstraction(index)), is(false)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(mockIndexAbstraction(index)), + is(false) + ); assertThat( kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), is(true) @@ -1102,21 +1150,21 @@ public void testKibanaSystemRole() { // 1. Pipeline Arrays.asList( GetPipelineAction.NAME, - PutPipelineAction.NAME, - DeletePipelineAction.NAME, + PutPipelineTransportAction.TYPE.name(), + DeletePipelineTransportAction.TYPE.name(), SimulatePipelineAction.NAME, "cluster:admin/ingest/pipeline/" + randomAlphaOfLengthBetween(3, 8) ).forEach(action -> assertThat(kibanaRole.cluster().check(action, request, authentication), is(true))); // 2. ILM Arrays.asList( - StartILMAction.NAME, + ILMActions.START.name(), DeleteLifecycleAction.NAME, GetLifecycleAction.NAME, GetStatusAction.NAME, - MoveToStepAction.NAME, - PutLifecycleAction.NAME, - StopILMAction.NAME, + ILMActions.MOVE_TO_STEP.name(), + ILMActions.PUT.name(), + ILMActions.STOP.name(), "cluster:admin/ilm/" + randomAlphaOfLengthBetween(3, 8) ).forEach(action -> assertThat(kibanaRole.cluster().check(action, request, authentication), is(true))); @@ -1135,8 +1183,11 @@ public void testKibanaSystemRole() { ).forEach(indexName -> { logger.info("index name [{}]", indexName); final IndexAbstraction indexAbstraction = mockIndexAbstraction(indexName); - assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(indexAbstraction), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(PutMappingAction.NAME).test(indexAbstraction), is(true)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(indexAbstraction), + is(true) + ); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportPutMappingAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(RolloverAction.NAME).test(indexAbstraction), is(true)); assertThat( kibanaRole.indices().allowedIndicesMatcher("indices:admin/data_stream/lifecycle/put").test(indexAbstraction), @@ -1150,7 +1201,8 @@ public void testKibanaSystemRole() { is(true) ); - final boolean isAlsoAutoCreateIndex = indexName.startsWith(".logs-endpoint.actions-"); + final boolean isAlsoAutoCreateIndex = indexName.startsWith(".logs-endpoint.actions-") + || indexName.startsWith(".logs-endpoint.action.responses-"); assertThat(kibanaRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(AutoCreateAction.NAME).test(indexAbstraction), is(isAlsoAutoCreateIndex)); assertThat(kibanaRole.indices().allowedIndicesMatcher(CreateDataStreamAction.NAME).test(indexAbstraction), is(false)); @@ -1244,7 +1296,10 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(DeleteDataStreamAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetAliasesAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportIndicesAliasesAction.NAME).test(indexAbstraction), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(indexAbstraction), is(true)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(indexAbstraction), + is(true) + ); // Implied by the overall view_index_metadata and monitor privilege assertViewIndexMetadata(kibanaRole, indexName); @@ -1255,7 +1310,7 @@ public void testKibanaSystemRole() { // Granted by bwc for index privilege assertThat( - kibanaRole.indices().allowedIndicesMatcher(PutMappingAction.NAME).test(indexAbstraction), + kibanaRole.indices().allowedIndicesMatcher(TransportPutMappingAction.TYPE.name()).test(indexAbstraction), is(indexAbstraction.getType() != IndexAbstraction.Type.DATA_STREAM) ); @@ -1279,7 +1334,10 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportDeleteIndexAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetAliasesAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportIndicesAliasesAction.NAME).test(indexAbstraction), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(indexAbstraction), is(true)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(indexAbstraction), + is(true) + ); // Allow deleting documents assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportDeleteAction.NAME).test(indexAbstraction), is(true)); @@ -1307,8 +1365,11 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportIndexAction.NAME).test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportDeleteAction.NAME).test(indexAbstraction), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(indexAbstraction), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(PutMappingAction.NAME).test(indexAbstraction), is(true)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(indexAbstraction), + is(true) + ); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportPutMappingAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(RolloverAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportDeleteIndexAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(indexAbstraction), is(true)); @@ -1365,8 +1426,11 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportGetAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(indexAbstraction), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(indexAbstraction), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(PutMappingAction.NAME).test(indexAbstraction), is(true)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(indexAbstraction), + is(true) + ); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportPutMappingAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(RolloverAction.NAME).test(indexAbstraction), is(true)); }); @@ -1386,8 +1450,11 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportGetAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(indexAbstraction), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(indexAbstraction), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(PutMappingAction.NAME).test(indexAbstraction), is(true)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(indexAbstraction), + is(true) + ); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportPutMappingAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(RolloverAction.NAME).test(indexAbstraction), is(true)); }); @@ -1413,7 +1480,10 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(CreateDataStreamAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetAliasesAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportIndicesAliasesAction.NAME).test(indexAbstraction), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(indexAbstraction), is(true)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(indexAbstraction), + is(true) + ); // Implied by the overall view_index_metadata and monitor privilege assertViewIndexMetadata(kibanaRole, indexName); @@ -1446,8 +1516,11 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportGetAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(indexAbstraction), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(indexAbstraction), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(PutMappingAction.NAME).test(indexAbstraction), is(true)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(indexAbstraction), + is(true) + ); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportPutMappingAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(RolloverAction.NAME).test(indexAbstraction), is(true)); assertViewIndexMetadata(kibanaRole, indexName); }); @@ -1473,7 +1546,10 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportDeleteIndexAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(RolloverAction.NAME).test(indexAbstraction), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(indexAbstraction), is(true)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(indexAbstraction), + is(true) + ); }); // Example transform package @@ -1490,7 +1566,10 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportDeleteIndexAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetAliasesAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportIndicesAliasesAction.NAME).test(indexAbstraction), is(true)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(indexAbstraction), is(true)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(indexAbstraction), + is(true) + ); // Implied by the overall view_index_metadata and monitor privilege assertViewIndexMetadata(kibanaRole, indexName); @@ -1530,7 +1609,7 @@ public void testKibanaAdminRole() { assertThat(kibanaAdminRole.cluster().check(TransportClusterHealthAction.NAME, request, authentication), is(false)); assertThat(kibanaAdminRole.cluster().check(ClusterStateAction.NAME, request, authentication), is(false)); assertThat(kibanaAdminRole.cluster().check(ClusterStatsAction.NAME, request, authentication), is(false)); - assertThat(kibanaAdminRole.cluster().check(PutIndexTemplateAction.NAME, request, authentication), is(false)); + assertThat(kibanaAdminRole.cluster().check(TransportPutIndexTemplateAction.TYPE.name(), request, authentication), is(false)); assertThat(kibanaAdminRole.cluster().check(ClusterRerouteAction.NAME, request, authentication), is(false)); assertThat(kibanaAdminRole.cluster().check(ClusterUpdateSettingsAction.NAME, request, authentication), is(false)); assertThat(kibanaAdminRole.cluster().check(MonitoringBulkAction.NAME, request, authentication), is(false)); @@ -1594,7 +1673,7 @@ public void testKibanaUserRole() { assertThat(kibanaUserRole.cluster().check(TransportClusterHealthAction.NAME, request, authentication), is(false)); assertThat(kibanaUserRole.cluster().check(ClusterStateAction.NAME, request, authentication), is(false)); assertThat(kibanaUserRole.cluster().check(ClusterStatsAction.NAME, request, authentication), is(false)); - assertThat(kibanaUserRole.cluster().check(PutIndexTemplateAction.NAME, request, authentication), is(false)); + assertThat(kibanaUserRole.cluster().check(TransportPutIndexTemplateAction.TYPE.name(), request, authentication), is(false)); assertThat(kibanaUserRole.cluster().check(ClusterRerouteAction.NAME, request, authentication), is(false)); assertThat(kibanaUserRole.cluster().check(ClusterUpdateSettingsAction.NAME, request, authentication), is(false)); assertThat(kibanaUserRole.cluster().check(MonitoringBulkAction.NAME, request, authentication), is(false)); @@ -1677,7 +1756,7 @@ public void testMonitoringUserRole() { assertThat(monitoringUserRole.cluster().check(TransportClusterHealthAction.NAME, request, authentication), is(false)); assertThat(monitoringUserRole.cluster().check(ClusterStateAction.NAME, request, authentication), is(false)); assertThat(monitoringUserRole.cluster().check(ClusterStatsAction.NAME, request, authentication), is(false)); - assertThat(monitoringUserRole.cluster().check(PutIndexTemplateAction.NAME, request, authentication), is(false)); + assertThat(monitoringUserRole.cluster().check(TransportPutIndexTemplateAction.TYPE.name(), request, authentication), is(false)); assertThat(monitoringUserRole.cluster().check(ClusterRerouteAction.NAME, request, authentication), is(false)); assertThat(monitoringUserRole.cluster().check(ClusterUpdateSettingsAction.NAME, request, authentication), is(false)); assertThat(monitoringUserRole.cluster().check(MonitoringBulkAction.NAME, request, authentication), is(false)); @@ -1736,7 +1815,7 @@ public void testMonitoringUserRole() { is(false) ); assertThat( - monitoringUserRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(mockIndexAbstraction(index)), + monitoringUserRole.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(mockIndexAbstraction(index)), is(false) ); assertThat( @@ -1813,7 +1892,10 @@ public void testRemoteMonitoringAgentRole() { assertThat(remoteMonitoringAgentRole.cluster().check(TransportClusterHealthAction.NAME, request, authentication), is(true)); assertThat(remoteMonitoringAgentRole.cluster().check(ClusterStateAction.NAME, request, authentication), is(true)); assertThat(remoteMonitoringAgentRole.cluster().check(ClusterStatsAction.NAME, request, authentication), is(true)); - assertThat(remoteMonitoringAgentRole.cluster().check(PutIndexTemplateAction.NAME, request, authentication), is(true)); + assertThat( + remoteMonitoringAgentRole.cluster().check(TransportPutIndexTemplateAction.TYPE.name(), request, authentication), + is(true) + ); assertThat(remoteMonitoringAgentRole.cluster().check(ClusterRerouteAction.NAME, request, authentication), is(false)); assertThat(remoteMonitoringAgentRole.cluster().check(ClusterUpdateSettingsAction.NAME, request, authentication), is(false)); assertThat(remoteMonitoringAgentRole.cluster().check(MonitoringBulkAction.NAME, request, authentication), is(false)); @@ -1832,7 +1914,7 @@ public void testRemoteMonitoringAgentRole() { assertThat(remoteMonitoringAgentRole.cluster().check(ProfileHasPrivilegesAction.NAME, request, authentication), is(false)); // ILM assertThat(remoteMonitoringAgentRole.cluster().check(GetLifecycleAction.NAME, request, authentication), is(true)); - assertThat(remoteMonitoringAgentRole.cluster().check(PutLifecycleAction.NAME, request, authentication), is(true)); + assertThat(remoteMonitoringAgentRole.cluster().check(ILMActions.PUT.name(), request, authentication), is(true)); // we get this from the cluster:monitor privilege assertThat(remoteMonitoringAgentRole.cluster().check(WatcherStatsAction.NAME, request, authentication), is(true)); @@ -1895,7 +1977,7 @@ public void testRemoteMonitoringAgentRole() { ); assertThat( remoteMonitoringAgentRole.indices() - .allowedIndicesMatcher(UpdateSettingsAction.NAME) + .allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()) .test(mockIndexAbstraction(monitoringIndex)), is(true) ); @@ -1979,7 +2061,7 @@ public void testRemoteMonitoringAgentRole() { ); assertThat( remoteMonitoringAgentRole.indices() - .allowedIndicesMatcher(UpdateSettingsAction.NAME) + .allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()) .test(mockIndexAbstraction(metricbeatIndex)), is(false) ); @@ -2017,8 +2099,14 @@ public void testRemoteMonitoringCollectorRole() { assertThat(remoteMonitoringCollectorRole.cluster().check(ClusterStateAction.NAME, request, authentication), is(true)); assertThat(remoteMonitoringCollectorRole.cluster().check(ClusterStatsAction.NAME, request, authentication), is(true)); assertThat(remoteMonitoringCollectorRole.cluster().check(GetIndexTemplatesAction.NAME, request, authentication), is(true)); - assertThat(remoteMonitoringCollectorRole.cluster().check(PutIndexTemplateAction.NAME, request, authentication), is(false)); - assertThat(remoteMonitoringCollectorRole.cluster().check(DeleteIndexTemplateAction.NAME, request, authentication), is(false)); + assertThat( + remoteMonitoringCollectorRole.cluster().check(TransportPutIndexTemplateAction.TYPE.name(), request, authentication), + is(false) + ); + assertThat( + remoteMonitoringCollectorRole.cluster().check(TransportDeleteIndexTemplateAction.TYPE.name(), request, authentication), + is(false) + ); assertThat(remoteMonitoringCollectorRole.cluster().check(ClusterRerouteAction.NAME, request, authentication), is(false)); assertThat(remoteMonitoringCollectorRole.cluster().check(ClusterUpdateSettingsAction.NAME, request, authentication), is(false)); assertThat(remoteMonitoringCollectorRole.cluster().check(MonitoringBulkAction.NAME, request, authentication), is(false)); @@ -2107,7 +2195,9 @@ public void testRemoteMonitoringCollectorRole() { is(false) ); assertThat( - remoteMonitoringCollectorRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(mockIndexAbstraction(index)), + remoteMonitoringCollectorRole.indices() + .allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()) + .test(mockIndexAbstraction(index)), is(false) ); assertThat( @@ -2144,13 +2234,13 @@ public void testRemoteMonitoringCollectorRole() { ); assertThat( remoteMonitoringCollectorRole.indices() - .allowedIndicesMatcher(IndicesShardStoresAction.NAME) + .allowedIndicesMatcher(TransportIndicesShardStoresAction.TYPE.name()) .test(mockIndexAbstraction(randomFrom(TestRestrictedIndices.SAMPLE_RESTRICTED_NAMES))), is(true) ); assertThat( remoteMonitoringCollectorRole.indices() - .allowedIndicesMatcher(IndicesShardStoresAction.NAME) + .allowedIndicesMatcher(TransportIndicesShardStoresAction.TYPE.name()) .test(mockIndexAbstraction(XPackPlugin.ASYNC_RESULTS_INDEX + randomAlphaOfLengthBetween(0, 2))), is(true) ); @@ -2265,7 +2355,7 @@ private void assertMonitoringOnRestrictedIndices(Role role) { IndicesStatsAction.NAME, IndicesSegmentsAction.NAME, GetSettingsAction.NAME, - IndicesShardStoresAction.NAME, + TransportIndicesShardStoresAction.TYPE.name(), RecoveryAction.NAME ); for (final String indexMonitoringActionName : indexMonitoringActionNamesList) { @@ -2300,7 +2390,7 @@ public void testReportingUserRole() { assertThat(reportingUserRole.cluster().check(TransportClusterHealthAction.NAME, request, authentication), is(false)); assertThat(reportingUserRole.cluster().check(ClusterStateAction.NAME, request, authentication), is(false)); assertThat(reportingUserRole.cluster().check(ClusterStatsAction.NAME, request, authentication), is(false)); - assertThat(reportingUserRole.cluster().check(PutIndexTemplateAction.NAME, request, authentication), is(false)); + assertThat(reportingUserRole.cluster().check(TransportPutIndexTemplateAction.TYPE.name(), request, authentication), is(false)); assertThat(reportingUserRole.cluster().check(ClusterRerouteAction.NAME, request, authentication), is(false)); assertThat(reportingUserRole.cluster().check(ClusterUpdateSettingsAction.NAME, request, authentication), is(false)); assertThat(reportingUserRole.cluster().check(MonitoringBulkAction.NAME, request, authentication), is(false)); @@ -2334,7 +2424,7 @@ public void testReportingUserRole() { ); assertThat(reportingUserRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(mockIndexAbstraction(index)), is(false)); assertThat( - reportingUserRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(mockIndexAbstraction(index)), + reportingUserRole.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(mockIndexAbstraction(index)), is(false) ); assertThat( @@ -2376,7 +2466,7 @@ public void testSuperuserRole() { assertThat(superuserRole.cluster().check(ClusterUpdateSettingsAction.NAME, request, authentication), is(true)); assertThat(superuserRole.cluster().check(PutUserAction.NAME, request, authentication), is(true)); assertThat(superuserRole.cluster().check(PutRoleAction.NAME, request, authentication), is(true)); - assertThat(superuserRole.cluster().check(PutIndexTemplateAction.NAME, request, authentication), is(true)); + assertThat(superuserRole.cluster().check(TransportPutIndexTemplateAction.TYPE.name(), request, authentication), is(true)); assertThat(superuserRole.cluster().check(DelegatePkiAuthenticationAction.NAME, request, authentication), is(true)); assertThat(superuserRole.cluster().check("internal:admin/foo", request, authentication), is(false)); assertThat( @@ -2431,7 +2521,8 @@ public void testSuperuserRole() { iac = superuserRole.indices().authorize(TransportIndexAction.NAME, Sets.newHashSet("a2", "ba"), lookup, fieldPermissionsCache); assertThat(iac.hasIndexPermissions("a2"), is(true)); assertThat(iac.hasIndexPermissions("b"), is(true)); - iac = superuserRole.indices().authorize(UpdateSettingsAction.NAME, Sets.newHashSet("aaaaaa", "ba"), lookup, fieldPermissionsCache); + iac = superuserRole.indices() + .authorize(TransportUpdateSettingsAction.TYPE.name(), Sets.newHashSet("aaaaaa", "ba"), lookup, fieldPermissionsCache); assertThat(iac.hasIndexPermissions("aaaaaa"), is(true)); assertThat(iac.hasIndexPermissions("b"), is(true)); @@ -2507,7 +2598,7 @@ public void testLogstashSystemRole() { assertThat(logstashSystemRole.cluster().check(TransportClusterHealthAction.NAME, request, authentication), is(true)); assertThat(logstashSystemRole.cluster().check(ClusterStateAction.NAME, request, authentication), is(true)); assertThat(logstashSystemRole.cluster().check(ClusterStatsAction.NAME, request, authentication), is(true)); - assertThat(logstashSystemRole.cluster().check(PutIndexTemplateAction.NAME, request, authentication), is(false)); + assertThat(logstashSystemRole.cluster().check(TransportPutIndexTemplateAction.TYPE.name(), request, authentication), is(false)); assertThat(logstashSystemRole.cluster().check(ClusterRerouteAction.NAME, request, authentication), is(false)); assertThat(logstashSystemRole.cluster().check(ClusterUpdateSettingsAction.NAME, request, authentication), is(false)); assertThat(logstashSystemRole.cluster().check(DelegatePkiAuthenticationAction.NAME, request, authentication), is(false)); @@ -2548,7 +2639,7 @@ public void testBeatsAdminRole() { assertThat(beatsAdminRole.cluster().check(TransportClusterHealthAction.NAME, request, authentication), is(false)); assertThat(beatsAdminRole.cluster().check(ClusterStateAction.NAME, request, authentication), is(false)); assertThat(beatsAdminRole.cluster().check(ClusterStatsAction.NAME, request, authentication), is(false)); - assertThat(beatsAdminRole.cluster().check(PutIndexTemplateAction.NAME, request, authentication), is(false)); + assertThat(beatsAdminRole.cluster().check(TransportPutIndexTemplateAction.TYPE.name(), request, authentication), is(false)); assertThat(beatsAdminRole.cluster().check(ClusterRerouteAction.NAME, request, authentication), is(false)); assertThat(beatsAdminRole.cluster().check(ClusterUpdateSettingsAction.NAME, request, authentication), is(false)); assertThat(beatsAdminRole.cluster().check(MonitoringBulkAction.NAME, request, authentication), is(false)); @@ -2577,7 +2668,10 @@ public void testBeatsAdminRole() { assertThat(beatsAdminRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(mockIndexAbstraction(index)), is(true)); assertThat(beatsAdminRole.indices().allowedIndicesMatcher(TransportIndexAction.NAME).test(mockIndexAbstraction(index)), is(true)); assertThat(beatsAdminRole.indices().allowedIndicesMatcher(TransportDeleteAction.NAME).test(mockIndexAbstraction(index)), is(true)); - assertThat(beatsAdminRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(mockIndexAbstraction(index)), is(true)); + assertThat( + beatsAdminRole.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(mockIndexAbstraction(index)), + is(true) + ); assertThat( beatsAdminRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), is(true) @@ -2607,7 +2701,7 @@ public void testBeatsSystemRole() { assertThat(beatsSystemRole.cluster().check(TransportClusterHealthAction.NAME, request, authentication), is(true)); assertThat(beatsSystemRole.cluster().check(ClusterStateAction.NAME, request, authentication), is(true)); assertThat(beatsSystemRole.cluster().check(ClusterStatsAction.NAME, request, authentication), is(true)); - assertThat(beatsSystemRole.cluster().check(PutIndexTemplateAction.NAME, request, authentication), is(false)); + assertThat(beatsSystemRole.cluster().check(TransportPutIndexTemplateAction.TYPE.name(), request, authentication), is(false)); assertThat(beatsSystemRole.cluster().check(ClusterRerouteAction.NAME, request, authentication), is(false)); assertThat(beatsSystemRole.cluster().check(ClusterUpdateSettingsAction.NAME, request, authentication), is(false)); assertThat(beatsSystemRole.cluster().check(DelegatePkiAuthenticationAction.NAME, request, authentication), is(false)); @@ -2655,7 +2749,7 @@ public void testAPMSystemRole() { assertThat(APMSystemRole.cluster().check(TransportClusterHealthAction.NAME, request, authentication), is(true)); assertThat(APMSystemRole.cluster().check(ClusterStateAction.NAME, request, authentication), is(true)); assertThat(APMSystemRole.cluster().check(ClusterStatsAction.NAME, request, authentication), is(true)); - assertThat(APMSystemRole.cluster().check(PutIndexTemplateAction.NAME, request, authentication), is(false)); + assertThat(APMSystemRole.cluster().check(TransportPutIndexTemplateAction.TYPE.name(), request, authentication), is(false)); assertThat(APMSystemRole.cluster().check(ClusterRerouteAction.NAME, request, authentication), is(false)); assertThat(APMSystemRole.cluster().check(ClusterUpdateSettingsAction.NAME, request, authentication), is(false)); assertThat(APMSystemRole.cluster().check(DelegatePkiAuthenticationAction.NAME, request, authentication), is(false)); @@ -3314,7 +3408,7 @@ public void testPredefinedViewerRole() { assertThat(role.cluster().check(TransportClusterHealthAction.NAME, request, authentication), is(false)); assertThat(role.cluster().check(ClusterStateAction.NAME, request, authentication), is(false)); assertThat(role.cluster().check(ClusterStatsAction.NAME, request, authentication), is(false)); - assertThat(role.cluster().check(PutIndexTemplateAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(TransportPutIndexTemplateAction.TYPE.name(), request, authentication), is(false)); assertThat(role.cluster().check(ClusterRerouteAction.NAME, request, authentication), is(false)); assertThat(role.cluster().check(ClusterUpdateSettingsAction.NAME, request, authentication), is(false)); assertThat(role.cluster().check(MonitoringBulkAction.NAME, request, authentication), is(false)); @@ -3388,7 +3482,7 @@ public void testPredefinedEditorRole() { assertThat(role.cluster().check(TransportClusterHealthAction.NAME, request, authentication), is(false)); assertThat(role.cluster().check(ClusterStateAction.NAME, request, authentication), is(false)); assertThat(role.cluster().check(ClusterStatsAction.NAME, request, authentication), is(false)); - assertThat(role.cluster().check(PutIndexTemplateAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(TransportPutIndexTemplateAction.TYPE.name(), request, authentication), is(false)); assertThat(role.cluster().check(ClusterRerouteAction.NAME, request, authentication), is(false)); assertThat(role.cluster().check(ClusterUpdateSettingsAction.NAME, request, authentication), is(false)); assertThat(role.cluster().check(MonitoringBulkAction.NAME, request, authentication), is(false)); @@ -3506,7 +3600,10 @@ private void assertAllIndicesAccessAllowed(Role role, String index) { assertThat(role.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(mockIndexAbstraction(index)), is(true)); assertThat(role.indices().allowedIndicesMatcher(TransportIndexAction.NAME).test(mockIndexAbstraction(index)), is(true)); assertThat(role.indices().allowedIndicesMatcher(TransportDeleteAction.NAME).test(mockIndexAbstraction(index)), is(true)); - assertThat(role.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(mockIndexAbstraction(index)), is(true)); + assertThat( + role.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(mockIndexAbstraction(index)), + is(true) + ); assertThat(role.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), is(true)); assertThat( role.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), @@ -3553,7 +3650,10 @@ private void assertOnlyReadAllowed(Role role, String index) { is(false) ); assertThat(role.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(mockIndexAbstraction(index)), is(false)); - assertThat(role.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(mockIndexAbstraction(index)), is(false)); + assertThat( + role.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(mockIndexAbstraction(index)), + is(false) + ); assertThat(role.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), is(true)); assertThat(role.indices().allowedIndicesMatcher(TransportGetAction.TYPE.name()).test(mockIndexAbstraction(index)), is(true)); assertThat(role.indices().allowedIndicesMatcher(TransportIndexAction.NAME).test(mockIndexAbstraction(index)), is(false)); @@ -3595,7 +3695,10 @@ private void assertNoAccessAllowed(Role role, String index) { is(false) ); assertThat(role.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(mockIndexAbstraction(index)), is(false)); - assertThat(role.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(mockIndexAbstraction(index)), is(false)); + assertThat( + role.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(mockIndexAbstraction(index)), + is(false) + ); assertThat(role.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(mockIndexAbstraction(index)), is(false)); assertThat(role.indices().allowedIndicesMatcher(TransportGetAction.TYPE.name()).test(mockIndexAbstraction(index)), is(false)); assertThat(role.indices().allowedIndicesMatcher(TransportIndexAction.NAME).test(mockIndexAbstraction(index)), is(false)); @@ -3618,7 +3721,7 @@ public void testLogstashAdminRole() { RESTRICTED_INDICES ); assertThat(logstashAdminRole.cluster().check(TransportClusterHealthAction.NAME, request, authentication), is(false)); - assertThat(logstashAdminRole.cluster().check(PutIndexTemplateAction.NAME, request, authentication), is(false)); + assertThat(logstashAdminRole.cluster().check(TransportPutIndexTemplateAction.TYPE.name(), request, authentication), is(false)); assertThat(logstashAdminRole.cluster().check(ClusterRerouteAction.NAME, request, authentication), is(false)); assertThat(logstashAdminRole.cluster().check(ClusterUpdateSettingsAction.NAME, request, authentication), is(false)); assertThat(logstashAdminRole.cluster().check(DelegatePkiAuthenticationAction.NAME, request, authentication), is(false)); @@ -3674,7 +3777,7 @@ public void testLogstashAdminRole() { is(true) ); assertThat( - logstashAdminRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(mockIndexAbstraction(index)), + logstashAdminRole.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(mockIndexAbstraction(index)), is(true) ); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/user/InternalUsersTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/user/InternalUsersTests.java index 8d1bcfb500e30..31642cbf5e34f 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/user/InternalUsersTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/user/InternalUsersTests.java @@ -17,12 +17,12 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexAction; import org.elasticsearch.action.admin.indices.delete.TransportDeleteIndexAction; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeAction; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; +import org.elasticsearch.action.admin.indices.mapping.put.TransportPutMappingAction; import org.elasticsearch.action.admin.indices.readonly.AddIndexBlockAction; import org.elasticsearch.action.admin.indices.refresh.RefreshAction; import org.elasticsearch.action.admin.indices.refresh.TransportUnpromotableShardRefreshAction; import org.elasticsearch.action.admin.indices.rollover.RolloverAction; -import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction; +import org.elasticsearch.action.admin.indices.settings.put.TransportUpdateSettingsAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; import org.elasticsearch.action.admin.indices.template.put.PutComponentTemplateAction; import org.elasticsearch.action.bulk.BulkAction; @@ -91,7 +91,7 @@ public void testXPackUser() { BulkAction.NAME, RefreshAction.NAME, CreateIndexAction.NAME, - PutMappingAction.NAME, + TransportPutMappingAction.TYPE.name(), TransportDeleteIndexAction.TYPE.name() ); checkIndexAccess(role, randomFrom(sampleIndexActions), randomAlphaOfLengthBetween(3, 12), true); @@ -126,7 +126,7 @@ public void testXPackSecurityUser() { BulkAction.NAME, RefreshAction.NAME, CreateIndexAction.NAME, - PutMappingAction.NAME, + TransportPutMappingAction.TYPE.name(), TransportDeleteIndexAction.TYPE.name() ); checkIndexAccess( @@ -153,7 +153,7 @@ public void testSecurityProfileUser() { BulkAction.NAME, RefreshAction.NAME, CreateIndexAction.NAME, - PutMappingAction.NAME, + TransportPutMappingAction.TYPE.name(), TransportDeleteIndexAction.TYPE.name() ); checkIndexAccess(role, randomFrom(sampleAllowedActions), ".security-profile", true); @@ -184,7 +184,7 @@ public void testAsyncSearchUser() { BulkAction.NAME, RefreshAction.NAME, CreateIndexAction.NAME, - PutMappingAction.NAME, + TransportPutMappingAction.TYPE.name(), TransportDeleteIndexAction.TYPE.name() ); checkIndexAccess(role, randomFrom(sampleAllowedActions), XPackPlugin.ASYNC_RESULTS_INDEX, true); @@ -215,7 +215,7 @@ public void testStorageUser() { final List sampleDeniedActions = List.of( TransportGetAction.TYPE.name(), BulkAction.NAME, - PutMappingAction.NAME, + TransportPutMappingAction.TYPE.name(), TransportDeleteIndexAction.TYPE.name() ); checkIndexAccess(role, randomFrom(sampleDeniedActions), randomAlphaOfLengthBetween(4, 8), false); @@ -249,7 +249,7 @@ public void testDataStreamLifecycleUser() { TransportDeleteIndexAction.TYPE.name(), ForceMergeAction.NAME, IndicesStatsAction.NAME, - UpdateSettingsAction.NAME, + TransportUpdateSettingsAction.TYPE.name(), DownsampleAction.NAME, AddIndexBlockAction.NAME ); @@ -259,7 +259,7 @@ public void testDataStreamLifecycleUser() { TransportDeleteIndexAction.TYPE.name(), ForceMergeAction.NAME, IndicesStatsAction.NAME, - UpdateSettingsAction.NAME, + TransportUpdateSettingsAction.TYPE.name(), DownsampleAction.NAME, AddIndexBlockAction.NAME ); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistryTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistryTests.java index db9cf91681199..2b845affde63b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistryTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistryTests.java @@ -15,9 +15,9 @@ import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; import org.elasticsearch.action.admin.indices.template.put.PutComponentTemplateAction; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; -import org.elasticsearch.action.ingest.PutPipelineAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.ingest.PutPipelineRequest; +import org.elasticsearch.action.ingest.PutPipelineTransportAction; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; @@ -54,7 +54,8 @@ import org.elasticsearch.xpack.core.ilm.LifecyclePolicy; import org.elasticsearch.xpack.core.ilm.LifecyclePolicyMetadata; import org.elasticsearch.xpack.core.ilm.OperationMode; -import org.elasticsearch.xpack.core.ilm.action.PutLifecycleAction; +import org.elasticsearch.xpack.core.ilm.action.ILMActions; +import org.elasticsearch.xpack.core.ilm.action.PutLifecycleRequest; import org.junit.After; import org.junit.Before; @@ -108,10 +109,10 @@ public void testThatIndependentPipelinesAreAddedImmediately() throws Exception { AtomicInteger calledTimes = new AtomicInteger(0); client.setVerifier((action, request, listener) -> { - if (action instanceof PutPipelineAction) { + if (action == PutPipelineTransportAction.TYPE) { assertPutPipelineAction(calledTimes, action, request, listener, "custom-plugin-final_pipeline"); return AcknowledgedResponse.TRUE; - } else if (action instanceof PutLifecycleAction) { + } else if (action == ILMActions.PUT) { // ignore lifecycle policies in this case return AcknowledgedResponse.TRUE; } else { @@ -133,10 +134,10 @@ public void testThatDependentPipelinesAreAddedIfDependenciesExist() throws Excep AtomicInteger calledTimes = new AtomicInteger(0); client.setVerifier((action, request, listener) -> { - if (action instanceof PutPipelineAction) { + if (action == PutPipelineTransportAction.TYPE) { assertPutPipelineAction(calledTimes, action, request, listener, "custom-plugin-default_pipeline"); return AcknowledgedResponse.TRUE; - } else if (action instanceof PutLifecycleAction) { + } else if (action == ILMActions.PUT) { // ignore lifecycle policies in this case return AcknowledgedResponse.TRUE; } else { @@ -166,7 +167,7 @@ public void testThatTemplateIsAddedIfAllDependenciesExist() throws Exception { if (action instanceof PutComponentTemplateAction) { assertPutComponentTemplate(calledTimes, action, request, listener); return AcknowledgedResponse.TRUE; - } else if (action instanceof PutLifecycleAction) { + } else if (action == ILMActions.PUT) { // ignore lifecycle policies in this case return AcknowledgedResponse.TRUE; } else { @@ -192,10 +193,10 @@ public void testThatTemplateIsNotAddedIfNotAllDependenciesExist() throws Excepti AtomicInteger calledTimes = new AtomicInteger(0); client.setVerifier((action, request, listener) -> { - if (action instanceof PutPipelineAction) { + if (action == PutPipelineTransportAction.TYPE) { assertPutPipelineAction(calledTimes, action, request, listener, "custom-plugin-default_pipeline"); return AcknowledgedResponse.TRUE; - } else if (action instanceof PutLifecycleAction) { + } else if (action == ILMActions.PUT) { // ignore lifecycle policies in this case return AcknowledgedResponse.TRUE; } else { @@ -221,13 +222,13 @@ public void testThatComposableTemplateIsAddedIfDependenciesExist() throws Except AtomicInteger calledTimes = new AtomicInteger(0); client.setVerifier((action, request, listener) -> { - if (action instanceof PutComposableIndexTemplateAction) { + if (action == TransportPutComposableIndexTemplateAction.TYPE) { assertPutComposableIndexTemplateAction(calledTimes, action, request, listener); return AcknowledgedResponse.TRUE; - } else if (action instanceof PutLifecycleAction) { + } else if (action == ILMActions.PUT) { // ignore lifecycle policies in this case return AcknowledgedResponse.TRUE; - } else if (action instanceof PutPipelineAction) { + } else if (action == PutPipelineTransportAction.TYPE) { // ignore pipelines in this case return AcknowledgedResponse.TRUE; } else { @@ -248,16 +249,16 @@ public void testThatComposableTemplateIsAddedIfDependenciesHaveRightVersion() th AtomicInteger calledTimes = new AtomicInteger(0); client.setVerifier((action, request, listener) -> { - if (action instanceof PutComposableIndexTemplateAction) { + if (action == TransportPutComposableIndexTemplateAction.TYPE) { assertPutComposableIndexTemplateAction(calledTimes, action, request, listener); return AcknowledgedResponse.TRUE; } else if (action instanceof PutComponentTemplateAction) { // ignore the component template upgrade return AcknowledgedResponse.TRUE; - } else if (action instanceof PutLifecycleAction) { + } else if (action == ILMActions.PUT) { // ignore lifecycle policies in this case return AcknowledgedResponse.TRUE; - } else if (action instanceof PutPipelineAction) { + } else if (action == PutPipelineTransportAction.TYPE) { // ignore pipelines in this case return AcknowledgedResponse.TRUE; } else { @@ -291,7 +292,7 @@ public void testThatTemplatesAreUpgradedWhenNeeded() throws Exception { AtomicInteger calledTimes = new AtomicInteger(0); client.setVerifier((action, request, listener) -> { - if (action instanceof PutPipelineAction) { + if (action == PutPipelineTransportAction.TYPE) { assertPutPipelineAction( calledTimes, action, @@ -301,13 +302,13 @@ public void testThatTemplatesAreUpgradedWhenNeeded() throws Exception { "custom-plugin-final_pipeline" ); return AcknowledgedResponse.TRUE; - } else if (action instanceof PutLifecycleAction) { + } else if (action == ILMActions.PUT) { // ignore lifecycle policies in this case return AcknowledgedResponse.TRUE; } else if (action instanceof PutComponentTemplateAction) { assertPutComponentTemplate(calledTimes, action, request, listener); return AcknowledgedResponse.TRUE; - } else if (action instanceof PutComposableIndexTemplateAction) { + } else if (action == TransportPutComposableIndexTemplateAction.TYPE) { assertPutComposableIndexTemplateAction(calledTimes, action, request, listener); return AcknowledgedResponse.TRUE; } else { @@ -376,7 +377,7 @@ public void testAutomaticRollover() throws Exception { rolloverCounter.incrementAndGet(); RolloverRequest rolloverRequest = ((RolloverRequest) request); assertThat(rolloverRequest.getRolloverTarget(), startsWith("logs-my_app-")); - } else if (action instanceof PutComposableIndexTemplateAction) { + } else if (action == TransportPutComposableIndexTemplateAction.TYPE) { putIndexTemplateCounter.incrementAndGet(); } return AcknowledgedResponse.TRUE; @@ -396,15 +397,16 @@ public void testAutomaticRollover() throws Exception { assertBusy(() -> assertThat(rolloverCounter.get(), equalTo(2))); AtomicReference> rolloverResponsesRef = registry.getRolloverResponses(); assertBusy(() -> assertNotNull(rolloverResponsesRef.get())); - Collection rolloverResponses = rolloverResponsesRef.get(); - assertThat(rolloverResponses, hasSize(2)); + assertThat(rolloverResponsesRef.get(), hasSize(2)); // test again, to verify that the per-index-template creation lock gets released for reuse putIndexTemplateCounter.set(0); rolloverCounter.set(0); + rolloverResponsesRef.set(Collections.emptySet()); registry.clusterChanged(event); assertBusy(() -> assertThat(putIndexTemplateCounter.get(), equalTo(1))); assertBusy(() -> assertThat(rolloverCounter.get(), equalTo(2))); + assertBusy(() -> assertThat(rolloverResponsesRef.get(), hasSize(2))); // test rollover failures putIndexTemplateCounter.set(0); @@ -415,7 +417,7 @@ public void testAutomaticRollover() throws Exception { RolloverRequest rolloverRequest = ((RolloverRequest) request); assertThat(rolloverRequest.getRolloverTarget(), startsWith("logs-my_app-")); throw new RuntimeException("Failed to rollover " + rolloverRequest.getRolloverTarget()); - } else if (action instanceof PutComposableIndexTemplateAction) { + } else if (action == TransportPutComposableIndexTemplateAction.TYPE) { putIndexTemplateCounter.incrementAndGet(); } return AcknowledgedResponse.TRUE; @@ -461,7 +463,7 @@ public void testNoRolloverForFreshInstalledIndexTemplate() throws Exception { rolloverCounter.incrementAndGet(); RolloverRequest rolloverRequest = ((RolloverRequest) request); assertThat(rolloverRequest.getRolloverTarget(), startsWith("logs-my_app-")); - } else if (action instanceof PutComposableIndexTemplateAction) { + } else if (action == TransportPutComposableIndexTemplateAction.TYPE) { putIndexTemplateCounter.incrementAndGet(); } return AcknowledgedResponse.TRUE; @@ -481,10 +483,10 @@ public void testThatTemplatesAreNotUpgradedWhenNotNeeded() throws Exception { AtomicInteger calledTimes = new AtomicInteger(0); client.setVerifier((action, request, listener) -> { - if (action instanceof PutComposableIndexTemplateAction) { + if (action == TransportPutComposableIndexTemplateAction.TYPE) { // ignore this return AcknowledgedResponse.TRUE; - } else if (action instanceof PutLifecycleAction) { + } else if (action == ILMActions.PUT) { // ignore lifecycle policies in this case return AcknowledgedResponse.TRUE; } else { @@ -509,10 +511,10 @@ public void testThatNonExistingPoliciesAreAddedImmediately() throws Exception { AtomicInteger calledTimes = new AtomicInteger(0); client.setVerifier((action, request, listener) -> { - if (action instanceof PutComposableIndexTemplateAction) { + if (action == TransportPutComposableIndexTemplateAction.TYPE) { // ignore this return AcknowledgedResponse.TRUE; - } else if (action instanceof PutLifecycleAction) { + } else if (action == ILMActions.PUT) { assertPutLifecycleAction(calledTimes, action, request, listener); return AcknowledgedResponse.TRUE; } else { @@ -541,10 +543,10 @@ public void testPolicyAlreadyExists() { policies.forEach(p -> policyMap.put(p.getName(), p)); client.setVerifier((action, request, listener) -> { - if (action instanceof PutComposableIndexTemplateAction) { + if (action == TransportPutComposableIndexTemplateAction.TYPE) { // ignore this return AcknowledgedResponse.TRUE; - } else if (action instanceof PutLifecycleAction) { + } else if (action == ILMActions.PUT) { fail("if the policy already exists it should not be re-put"); } else { fail("client called with unexpected request: " + request.toString()); @@ -573,10 +575,10 @@ public void testPolicyAlreadyExistsButDiffers() throws IOException { policies.forEach(p -> policyMap.put(p.getName(), p)); client.setVerifier((action, request, listener) -> { - if (action instanceof PutComposableIndexTemplateAction) { + if (action == TransportPutComposableIndexTemplateAction.TYPE) { // ignore this return AcknowledgedResponse.TRUE; - } else if (action instanceof PutLifecycleAction) { + } else if (action == ILMActions.PUT) { fail("if the policy already exists it should not be re-put"); } else { fail("client called with unexpected request: " + request.toString()); @@ -626,10 +628,10 @@ public void testPolicyUpgraded() throws Exception { AtomicInteger calledTimes = new AtomicInteger(0); client.setVerifier((action, request, listener) -> { - if (action instanceof PutComposableIndexTemplateAction) { + if (action == TransportPutComposableIndexTemplateAction.TYPE) { // ignore this return AcknowledgedResponse.TRUE; - } else if (action instanceof PutLifecycleAction) { + } else if (action == ILMActions.PUT) { assertPutLifecycleAction(calledTimes, action, request, listener); return AcknowledgedResponse.TRUE; @@ -693,8 +695,9 @@ private static void assertPutComposableIndexTemplateAction( ActionRequest request, ActionListener listener ) { - assertThat(request, instanceOf(PutComposableIndexTemplateAction.Request.class)); - PutComposableIndexTemplateAction.Request putComposableTemplateRequest = (PutComposableIndexTemplateAction.Request) request; + assertThat(request, instanceOf(TransportPutComposableIndexTemplateAction.Request.class)); + TransportPutComposableIndexTemplateAction.Request putComposableTemplateRequest = + (TransportPutComposableIndexTemplateAction.Request) request; assertThat(putComposableTemplateRequest.name(), equalTo("custom-plugin-template")); ComposableIndexTemplate composableIndexTemplate = putComposableTemplateRequest.indexTemplate(); assertThat(composableIndexTemplate.composedOf(), hasSize(2)); @@ -713,7 +716,7 @@ private static void assertPutPipelineAction( ActionListener listener, String... pipelineIds ) { - assertThat(action, instanceOf(PutPipelineAction.class)); + assertSame(PutPipelineTransportAction.TYPE, action); assertThat(request, instanceOf(PutPipelineRequest.class)); final PutPipelineRequest putRequest = (PutPipelineRequest) request; assertThat(putRequest.getId(), oneOf(pipelineIds)); @@ -737,9 +740,9 @@ private static void assertPutLifecycleAction( ActionRequest request, ActionListener listener ) { - assertThat(action, instanceOf(PutLifecycleAction.class)); - assertThat(request, instanceOf(PutLifecycleAction.Request.class)); - final PutLifecycleAction.Request putRequest = (PutLifecycleAction.Request) request; + assertSame(ILMActions.PUT, action); + assertThat(request, instanceOf(PutLifecycleRequest.class)); + final PutLifecycleRequest putRequest = (PutLifecycleRequest) request; assertThat(putRequest.getPolicy().getName(), equalTo("custom-plugin-policy")); assertNotNull(listener); calledTimes.incrementAndGet(); diff --git a/x-pack/plugin/core/template-resources/src/main/resources/fleet-agents.json b/x-pack/plugin/core/template-resources/src/main/resources/fleet-agents.json index 5de8ce4bef402..5facc229bf503 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/fleet-agents.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/fleet-agents.json @@ -270,6 +270,9 @@ "download_percent": { "type": "double" }, + "download_rate": { + "type": "double" + }, "failed_state": { "type": "keyword" }, @@ -281,6 +284,18 @@ "ignore_above": 1024 } } + }, + "retry_error_msg": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 1024 + } + } + }, + "retry_until": { + "type": "date" } } } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-es-mb.json b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-es-mb.json index 861fffbed0532..b3d6dc3936d59 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-es-mb.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-es-mb.json @@ -8,6 +8,21 @@ "@timestamp": { "type": "date" }, + "cluster_settings": { + "properties": { + "cluster": { + "properties": { + "metadata": { + "properties": { + "display_name": { + "type": "keyword" + } + } + } + } + } + } + }, "elasticsearch": { "properties": { "cluster": { @@ -1470,9 +1485,6 @@ "hidden": { "type": "boolean" }, - "created": { - "type": "long" - }, "name": { "ignore_above": 1024, "type": "keyword" diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-events.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-events.json index c809d920bdf23..d8a27813734e4 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-events.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-events.json @@ -26,7 +26,8 @@ }, "_meta": { "index-template-version": ${xpack.profiling.template.version}, - "index-version": ${xpack.profiling.index.events.version} + "index-version": ${xpack.profiling.index.events.version}, + "managed": true }, "dynamic": false, "properties": { diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-executables.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-executables.json index ef5fc7159d090..0f1c24d96c092 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-executables.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-executables.json @@ -15,7 +15,8 @@ }, "_meta": { "index-template-version": ${xpack.profiling.template.version}, - "index-version": ${xpack.profiling.index.executables.version} + "index-version": ${xpack.profiling.index.executables.version}, + "managed": true }, "dynamic": false, "properties": { diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-hosts.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-hosts.json index 752b57d933f19..f452682c620c4 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-hosts.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-hosts.json @@ -21,7 +21,8 @@ }, "_meta": { "index-template-version": ${xpack.profiling.template.version}, - "index-version": ${xpack.profiling.index.hosts.version} + "index-version": ${xpack.profiling.index.hosts.version}, + "managed": true }, "dynamic": false, "properties": { diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-hot-tier.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-hot-tier.json index fcd6667773ea3..68f0fd09f18d3 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-hot-tier.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-hot-tier.json @@ -6,5 +6,9 @@ } } }, + "_meta": { + "index-template-version": ${xpack.profiling.template.version}, + "managed": true + }, "version": ${xpack.profiling.template.version} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-ilm.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-ilm.json index bf3dd67893564..3847e1775442a 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-ilm.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-ilm.json @@ -8,5 +8,9 @@ } } }, + "_meta": { + "index-template-version": ${xpack.profiling.template.version}, + "managed": true + }, "version": ${xpack.profiling.template.version} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-metrics.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-metrics.json index 6bc69c8ddc5f2..ac4a6def2a70b 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-metrics.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-metrics.json @@ -21,7 +21,8 @@ }, "_meta": { "index-template-version": ${xpack.profiling.template.version}, - "index-version": ${xpack.profiling.index.metrics.version} + "index-version": ${xpack.profiling.index.metrics.version}, + "managed": true }, /* We intentionally allow dynamic mappings for metrics. Which metrics are added is guarded by diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-stackframes.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-stackframes.json index 84c31d11589a4..c28a548f95418 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-stackframes.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-stackframes.json @@ -23,7 +23,8 @@ }, "_meta": { "index-template-version": ${xpack.profiling.template.version}, - "index-version": ${xpack.profiling.index.stackframes.version} + "index-version": ${xpack.profiling.index.stackframes.version}, + "managed": true }, "dynamic": false, "properties": { diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-stacktraces.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-stacktraces.json index 7999ad1b8f062..470edd710136d 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-stacktraces.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-stacktraces.json @@ -21,7 +21,8 @@ }, "_meta": { "index-template-version": ${xpack.profiling.template.version}, - "index-version": ${xpack.profiling.index.stacktraces.version} + "index-version": ${xpack.profiling.index.stacktraces.version}, + "managed": true }, "dynamic": false, "properties": { diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-symbols.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-symbols.json index b8c9f9db93db8..48b88492a777d 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-symbols.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-symbols.json @@ -19,7 +19,8 @@ }, "_meta": { "index-template-version": ${xpack.profiling.template.version}, - "index-version": ${xpack.profiling.index.symbols.version} + "index-version": ${xpack.profiling.index.symbols.version}, + "managed": true }, "dynamic": false, "properties": { diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-events.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-events.json index 795e2d318ddb9..e2d17c8327704 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-events.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-events.json @@ -13,7 +13,8 @@ "ignore_missing_component_templates": ["profiling-ilm@custom"], "priority": 100, "_meta": { - "description": "Index template for profiling-events" + "description": "Index template for profiling-events", + "managed": true }, "version": ${xpack.profiling.template.version} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-executables.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-executables.json index 41df385022fdf..57fd114c57e27 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-executables.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-executables.json @@ -11,7 +11,8 @@ "ignore_missing_component_templates": ["profiling-ilm@custom"], "priority": 100, "_meta": { - "description": "Index template for .profiling-executables" + "description": "Index template for .profiling-executables", + "managed": true }, "version": ${xpack.profiling.template.version} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-hosts.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-hosts.json index 6189c86bb2999..526d8090b0ac6 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-hosts.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-hosts.json @@ -11,7 +11,8 @@ "ignore_missing_component_templates": ["profiling-ilm@custom"], "priority": 100, "_meta": { - "description": "Template for profiling-hosts" + "description": "Index template for profiling-hosts", + "managed": true }, "version": ${xpack.profiling.template.version} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-metrics.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-metrics.json index b9154bfa54334..d09de006d025d 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-metrics.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-metrics.json @@ -11,7 +11,8 @@ "ignore_missing_component_templates": ["profiling-ilm@custom"], "priority": 100, "_meta": { - "description": "Template for profiling-metrics" + "description": "Index template for profiling-metrics", + "managed": true }, "version": ${xpack.profiling.template.version} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-returnpads-private.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-returnpads-private.json index 7fbb2f8d903a5..72d8cf6e1dfc2 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-returnpads-private.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-returnpads-private.json @@ -21,7 +21,9 @@ }, "_meta": { "index-template-version": ${xpack.profiling.template.version}, - "index-version": ${xpack.profiling.index.returnpads.private.version} + "index-version": ${xpack.profiling.index.returnpads.private.version}, + "description": "Index template for .profiling-returnpads-private", + "managed": true }, "dynamic": false, "properties": { diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-sq-executables.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-sq-executables.json index f164af84e07ae..6f32af12c84bf 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-sq-executables.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-sq-executables.json @@ -17,7 +17,9 @@ }, "_meta": { "index-template-version": ${xpack.profiling.template.version}, - "index-version": ${xpack.profiling.index.sq.executables.version} + "index-version": ${xpack.profiling.index.sq.executables.version}, + "description": "Index template for .profiling-sq-executables", + "managed": true }, "dynamic": false, "properties": { diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-sq-leafframes.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-sq-leafframes.json index 78a3296093c52..d3c5b0af215e6 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-sq-leafframes.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-sq-leafframes.json @@ -17,7 +17,9 @@ }, "_meta": { "index-template-version": ${xpack.profiling.template.version}, - "index-version": ${xpack.profiling.index.sq.leafframes.version} + "index-version": ${xpack.profiling.index.sq.leafframes.version}, + "description": "Index template for .profiling-sq-leafframes", + "managed": true }, "dynamic": false, "properties": { diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-stackframes.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-stackframes.json index eed570de0a608..694ae6ba92a57 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-stackframes.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-stackframes.json @@ -11,7 +11,8 @@ "ignore_missing_component_templates": ["profiling-ilm@custom"], "priority": 100, "_meta": { - "description": "Index template for .profiling-stackframes" + "description": "Index template for .profiling-stackframes", + "managed": true }, "version": ${xpack.profiling.template.version} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-stacktraces.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-stacktraces.json index 3797b87b17f10..c4c920a76c375 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-stacktraces.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-stacktraces.json @@ -11,7 +11,8 @@ "ignore_missing_component_templates": ["profiling-ilm@custom"], "priority": 100, "_meta": { - "description": "Index template for .profiling-stacktraces" + "description": "Index template for .profiling-stacktraces", + "managed": true }, "version": ${xpack.profiling.template.version} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-symbols-global.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-symbols-global.json index cd6c597f19689..a7bae1adbb548 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-symbols-global.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-symbols-global.json @@ -18,7 +18,8 @@ }, "priority": 100, "_meta": { - "description": "Index template for .profiling-symbols-global" + "description": "Index template for .profiling-symbols-global", + "managed": true }, "version": ${xpack.profiling.template.version} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-symbols-private.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-symbols-private.json index 58a3b7196ac61..999bf7721b897 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-symbols-private.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-symbols-private.json @@ -7,7 +7,8 @@ ], "priority": 100, "_meta": { - "description": "Index template for .profiling-symbols-private" + "description": "Index template for .profiling-symbols-private", + "managed": true }, "version": ${xpack.profiling.template.version} } diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/logging/DeprecationIndexingTemplateRegistry.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/logging/DeprecationIndexingTemplateRegistry.java index 6b4882bae9fd8..065053f117de0 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/logging/DeprecationIndexingTemplateRegistry.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/logging/DeprecationIndexingTemplateRegistry.java @@ -70,11 +70,8 @@ public DeprecationIndexingTemplateRegistry( DEPRECATION_INDEXING_TEMPLATE_VERSION_VARIABLE ) )) { - try { - componentTemplates.put( - config.getTemplateName(), - ComponentTemplate.parse(JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, config.loadBytes())) - ); + try (var parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, config.loadBytes())) { + componentTemplates.put(config.getTemplateName(), ComponentTemplate.parse(parser)); } catch (IOException e) { throw new AssertionError(e); } diff --git a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/MlDeprecationCheckerTests.java b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/MlDeprecationCheckerTests.java index 84d39d9a02070..c90b578db8f09 100644 --- a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/MlDeprecationCheckerTests.java +++ b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/MlDeprecationCheckerTests.java @@ -17,6 +17,7 @@ import java.util.Collections; +import static org.elasticsearch.test.hamcrest.OptionalMatchers.isEmpty; import static org.hamcrest.Matchers.is; public class MlDeprecationCheckerTests extends ESTestCase { @@ -47,7 +48,7 @@ public void testCheckDataFeedQuery() { DatafeedConfig.Builder goodDatafeed = new DatafeedConfig.Builder("good-df", "job-id"); goodDatafeed.setIndices(Collections.singletonList("some-index")); goodDatafeed.setParsedQuery(QueryBuilders.termQuery("foo", "bar")); - assertThat(MlDeprecationChecker.checkDataFeedQuery(goodDatafeed.build(), xContentRegistry()).isPresent(), is(false)); + assertThat(MlDeprecationChecker.checkDataFeedQuery(goodDatafeed.build(), xContentRegistry()), isEmpty()); DatafeedConfig.Builder deprecatedDatafeed = new DatafeedConfig.Builder("df-with-deprecated-query", "job-id"); deprecatedDatafeed.setIndices(Collections.singletonList("some-index")); diff --git a/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/downsample/10_basic.yml b/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/downsample/10_basic.yml index 70f66f38d39b9..0eb93c59c5b1d 100644 --- a/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/downsample/10_basic.yml +++ b/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/downsample/10_basic.yml @@ -46,9 +46,17 @@ setup: multi-counter: type: long time_series_metric: counter + scaled-counter: + type: scaled_float + scaling_factor: 100 + time_series_metric: counter multi-gauge: type: integer time_series_metric: gauge + scaled-gauge: + type: scaled_float + scaling_factor: 100 + time_series_metric: gauge network: properties: tx: @@ -63,21 +71,21 @@ setup: index: test body: - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:50:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "multi-counter" : [10, 11, 12], "multi-gauge": [100, 200, 150], "network": {"tx": 2001818691, "rx": 802133794}, "created_at": "2021-04-28T19:34:00.000Z", "running": false, "number_of_containers": 2, "tags": ["backend", "prod"], "values": [2, 3, 6]}}}' + - '{"@timestamp": "2021-04-28T18:50:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "multi-counter" : [10, 11, 12], "scaled-counter": 10.0, "multi-gauge": [100, 200, 150], "scaled-gauge": 100.0, "network": {"tx": 2001818691, "rx": 802133794}, "created_at": "2021-04-28T19:34:00.000Z", "running": false, "number_of_containers": 2, "tags": ["backend", "prod"], "values": [2, 3, 6]}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:50:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.26", "multi-counter" : [21, 22, 23], "multi-gauge": [90, 91, 95], "network": {"tx": 2005177954, "rx": 801479970}, "created_at": "2021-04-28T19:35:00.000Z", "running": true, "number_of_containers": 2, "tags": ["backend", "prod", "us-west1"], "values": [1, 1, 3]}}}' + - '{"@timestamp": "2021-04-28T18:50:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.26", "multi-counter" : [21, 22, 23], "scaled-counter": 20.0, "multi-gauge": [90, 91, 95], "scaled-gauge": 90.0, "network": {"tx": 2005177954, "rx": 801479970}, "created_at": "2021-04-28T19:35:00.000Z", "running": true, "number_of_containers": 2, "tags": ["backend", "prod", "us-west1"], "values": [1, 1, 3]}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T20:50:44.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.41", "multi-counter" : [1, 5, 10], "multi-gauge": [103, 110, 109], "network": {"tx": 2006223737, "rx": 802337279}, "created_at": "2021-04-28T19:36:00.000Z", "running": true, "number_of_containers": 2, "tags": ["backend", "prod", "us-west2"], "values": [4, 1, 2]}}}' + - '{"@timestamp": "2021-04-28T20:50:44.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.41", "multi-counter" : [1, 5, 10], "scaled-counter": 1.0, "multi-gauge": [103, 110, 109], "scaled-gauge": 104.0, "network": {"tx": 2006223737, "rx": 802337279}, "created_at": "2021-04-28T19:36:00.000Z", "running": true, "number_of_containers": 2, "tags": ["backend", "prod", "us-west2"], "values": [4, 1, 2]}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T20:51:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.22", "multi-counter" : [101, 102, 105], "multi-gauge": [100, 100, 100], "network": {"tx": 2012916202, "rx": 803685721}, "created_at": "2021-04-28T19:37:00.000Z", "running": true, "number_of_containers": 2, "tags": ["backend", "prod"], "values": [2, 3, 1]}}}' + - '{"@timestamp": "2021-04-28T20:51:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.22", "multi-counter" : [101, 102, 105], "scaled-counter": 100.0, "multi-gauge": [100, 100, 100], "scaled-gauge": 102.0, "network": {"tx": 2012916202, "rx": 803685721}, "created_at": "2021-04-28T19:37:00.000Z", "running": true, "number_of_containers": 2, "tags": ["backend", "prod"], "values": [2, 3, 1]}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:50:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.33", "multi-counter" : [7, 11, 44], "multi-gauge": [100, 100, 102], "network": {"tx": 1434521831, "rx": 530575198}, "created_at": "2021-04-28T19:42:00.000Z", "running": false, "number_of_containers": 1, "tags": ["backend", "test"], "values": [2, 3, 4]}}}' + - '{"@timestamp": "2021-04-28T18:50:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.33", "multi-counter" : [7, 11, 44], "scaled-counter": 7.0, "multi-gauge": [100, 100, 102], "scaled-gauge": 100.0, "network": {"tx": 1434521831, "rx": 530575198}, "created_at": "2021-04-28T19:42:00.000Z", "running": false, "number_of_containers": 1, "tags": ["backend", "test"], "values": [2, 3, 4]}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:50:23.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.56", "multi-counter" : [0, 0, 1], "multi-gauge": [101, 102, 102], "network": {"tx": 1434577921, "rx": 530600088}, "created_at": "2021-04-28T19:43:00.000Z", "running": false, "number_of_containers": 1, "tags": ["backend", "test", "us-west2"], "values": [2, 1, 1]}}}' + - '{"@timestamp": "2021-04-28T18:50:23.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.56", "multi-counter" : [0, 0, 1], "scaled-counter": 0.0, "multi-gauge": [101, 102, 102], "scaled-gauge": 101.0, "network": {"tx": 1434577921, "rx": 530600088}, "created_at": "2021-04-28T19:43:00.000Z", "running": false, "number_of_containers": 1, "tags": ["backend", "test", "us-west2"], "values": [2, 1, 1]}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T19:50:53.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.37", "multi-counter" : [1000, 1001, 1002], "multi-gauge": [99, 100, 110], "network": {"tx": 1434587694, "rx": 530604797}, "created_at": "2021-04-28T19:44:00.000Z", "running": true, "number_of_containers": 1, "tags": ["backend", "test", "us-west1"], "values": [4, 5, 2]}}}' + - '{"@timestamp": "2021-04-28T19:50:53.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.37", "multi-counter" : [1000, 1001, 1002], "scaled-counter": 1000.0, "multi-gauge": [99, 100, 110], "scaled-gauge": 99.0, "network": {"tx": 1434587694, "rx": 530604797}, "created_at": "2021-04-28T19:44:00.000Z", "running": true, "number_of_containers": 1, "tags": ["backend", "test", "us-west1"], "values": [4, 5, 2]}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T19:51:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.120", "multi-counter" : [76, 77, 78], "multi-gauge": [95, 98, 100], "network": {"tx": 1434595272, "rx": 530605511}, "created_at": "2021-04-28T19:45:00.000Z", "running": true, "number_of_containers": 1, "tags": ["backend", "test", "us-west1"], "values": [3, 2, 1]}}}' + - '{"@timestamp": "2021-04-28T19:51:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.120", "multi-counter" : [76, 77, 78], "scaled-counter": 70.0, "multi-gauge": [95, 98, 100], "scaled-gauge": 95.0, "network": {"tx": 1434595272, "rx": 530605511}, "created_at": "2021-04-28T19:45:00.000Z", "running": true, "number_of_containers": 1, "tags": ["backend", "test", "us-west1"], "values": [3, 2, 1]}}}' - do: indices.put_settings: @@ -314,10 +322,15 @@ setup: - match: { hits.hits.0._source.metricset: pod } - match: { hits.hits.0._source.@timestamp: 2021-04-28T18:00:00.000Z } - match: { hits.hits.0._source.k8s.pod.multi-counter: 21 } + - match: { hits.hits.0._source.k8s.pod.scaled-counter: 20.0 } - match: { hits.hits.0._source.k8s.pod.multi-gauge.min: 90 } - match: { hits.hits.0._source.k8s.pod.multi-gauge.max: 200 } - match: { hits.hits.0._source.k8s.pod.multi-gauge.sum: 726 } - match: { hits.hits.0._source.k8s.pod.multi-gauge.value_count: 6 } + - match: { hits.hits.0._source.k8s.pod.scaled-gauge.min: 90.0 } + - match: { hits.hits.0._source.k8s.pod.scaled-gauge.max: 100.0 } + - match: { hits.hits.0._source.k8s.pod.scaled-gauge.sum: 190.0 } + - match: { hits.hits.0._source.k8s.pod.scaled-gauge.value_count: 2 } - match: { hits.hits.0._source.k8s.pod.network.tx.min: 2001818691 } - match: { hits.hits.0._source.k8s.pod.network.tx.max: 2005177954 } - match: { hits.hits.0._source.k8s.pod.network.tx.value_count: 2 } @@ -354,6 +367,13 @@ setup: - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.multi-gauge.time_series_metric: gauge } - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.multi-counter.type: long } - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.multi-counter.time_series_metric: counter } + - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.scaled-counter.type: scaled_float } + - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.scaled-counter.scaling_factor: 100 } + - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.scaled-counter.time_series_metric: counter } + - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.scaled-gauge.type: aggregate_metric_double } + - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.scaled-gauge.metrics: [ "min", "max", "sum", "value_count" ] } + - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.scaled-gauge.default_metric: max } + - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.scaled-gauge.time_series_metric: gauge } - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.uid.type: keyword } - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.uid.time_series_dimension: true } @@ -387,6 +407,38 @@ setup: "fixed_interval": "1h" } +--- +"Downsample failure": + - skip: + version: " - 8.12.99" + reason: "#103615 merged to 8.13.0 and later" + features: allowed_warnings + + - do: + allowed_warnings: + - "index template [my-template1] has index patterns [failed-downsample-test] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template1] will take precedence during new index creation" + indices.put_index_template: + name: my-template1 + body: + index_patterns: [failed-downsample-test] + template: + settings: + index: + routing: + allocation: + include: + does-not-exist: "yes" + + - do: + catch: /downsample task \[downsample-failed-downsample-test-0-1h\] failed/ + indices.downsample: + index: test + target_index: failed-downsample-test + body: > + { + "fixed_interval": "1h" + } + --- "Downsample to existing index": - skip: diff --git a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleDisruptionIT.java b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleDisruptionIT.java index 826c958de4c18..f248da8a7842a 100644 --- a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleDisruptionIT.java +++ b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleDisruptionIT.java @@ -126,11 +126,7 @@ public boolean validateClusterForming() { } })).start(); - waitUntil( - () -> cluster.client().admin().cluster().preparePendingClusterTasks().get().pendingTasks().isEmpty(), - 60, - TimeUnit.SECONDS - ); + waitUntil(() -> getClusterPendingTasks(cluster.client()).pendingTasks().isEmpty(), 60, TimeUnit.SECONDS); ensureStableCluster(cluster.numDataAndMasterNodes()); final String targetIndex = "downsample-5m-" + sourceIndex; diff --git a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDriver.java b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDriver.java index db6ab6d01613d..8f1c32e56d057 100644 --- a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDriver.java +++ b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDriver.java @@ -11,7 +11,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.DocWriteRequest; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; @@ -136,7 +136,7 @@ private static void putComposableIndexTemplate( @Nullable Map metadata, @Nullable DataStreamLifecycle lifecycle ) throws IOException { - PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request(id); + TransportPutComposableIndexTemplateAction.Request request = new TransportPutComposableIndexTemplateAction.Request(id); request.indexTemplate( ComposableIndexTemplate.builder() .indexPatterns(patterns) @@ -145,7 +145,7 @@ private static void putComposableIndexTemplate( .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) .build() ); - client.execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); + client.execute(TransportPutComposableIndexTemplateAction.TYPE, request).actionGet(); } private static int indexDocuments(Client client, String dataStreamName, int docCount, String firstDocTimestamp) { diff --git a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DownsampleClusterDisruptionIT.java b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DownsampleClusterDisruptionIT.java index d6549a9618d36..30066e21e4960 100644 --- a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DownsampleClusterDisruptionIT.java +++ b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DownsampleClusterDisruptionIT.java @@ -203,7 +203,7 @@ public boolean validateClusterForming() { } })).start(); startDownsampleTaskDuringDisruption(sourceIndex, targetIndex, config, disruptionStart, disruptionEnd); - waitUntil(() -> cluster.client().admin().cluster().preparePendingClusterTasks().get().pendingTasks().isEmpty()); + waitUntil(() -> getClusterPendingTasks(cluster.client()).pendingTasks().isEmpty()); ensureStableCluster(cluster.numDataAndMasterNodes()); assertTargetIndex(cluster, sourceIndex, targetIndex, indexedDocs); } @@ -265,7 +265,7 @@ public boolean validateClusterForming() { })).start(); startDownsampleTaskDuringDisruption(sourceIndex, targetIndex, config, disruptionStart, disruptionEnd); - waitUntil(() -> cluster.client().admin().cluster().preparePendingClusterTasks().get().pendingTasks().isEmpty()); + waitUntil(() -> getClusterPendingTasks(cluster.client()).pendingTasks().isEmpty()); ensureStableCluster(cluster.numDataAndMasterNodes()); assertTargetIndex(cluster, sourceIndex, targetIndex, indexedDocs); } @@ -354,7 +354,7 @@ public boolean validateClusterForming() { })).start(); startDownsampleTaskDuringDisruption(sourceIndex, downsampleIndex, config, disruptionStart, disruptionEnd); - waitUntil(() -> cluster.client().admin().cluster().preparePendingClusterTasks().get().pendingTasks().isEmpty()); + waitUntil(() -> getClusterPendingTasks(cluster.client()).pendingTasks().isEmpty()); ensureStableCluster(cluster.numDataAndMasterNodes()); assertTargetIndex(cluster, sourceIndex, downsampleIndex, indexedDocs); } @@ -429,7 +429,7 @@ private void downsample(final String sourceIndex, final String downsampleIndex, assertAcked( internalCluster().client() .execute(DownsampleAction.INSTANCE, new DownsampleAction.Request(sourceIndex, downsampleIndex, TIMEOUT, config)) - .actionGet(TIMEOUT.millis()) + .actionGet(TIMEOUT) ); } diff --git a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/ILMDownsampleDisruptionIT.java b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/ILMDownsampleDisruptionIT.java index a023f171ad209..e4091af0fedf2 100644 --- a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/ILMDownsampleDisruptionIT.java +++ b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/ILMDownsampleDisruptionIT.java @@ -41,7 +41,8 @@ import org.elasticsearch.xpack.core.ilm.LifecyclePolicy; import org.elasticsearch.xpack.core.ilm.LifecycleSettings; import org.elasticsearch.xpack.core.ilm.Phase; -import org.elasticsearch.xpack.core.ilm.action.PutLifecycleAction; +import org.elasticsearch.xpack.core.ilm.action.ILMActions; +import org.elasticsearch.xpack.core.ilm.action.PutLifecycleRequest; import org.elasticsearch.xpack.ilm.IndexLifecycle; import java.io.IOException; @@ -135,8 +136,8 @@ public void setup(final String sourceIndex, int numOfShards, int numOfReplicas, ) ); LifecyclePolicy policy = new LifecyclePolicy(POLICY_NAME, phases); - PutLifecycleAction.Request putLifecycleRequest = new PutLifecycleAction.Request(policy); - assertAcked(client().execute(PutLifecycleAction.INSTANCE, putLifecycleRequest).actionGet()); + PutLifecycleRequest putLifecycleRequest = new PutLifecycleRequest(policy); + assertAcked(client().execute(ILMActions.PUT, putLifecycleRequest).actionGet()); } public void testILMDownsampleRollingRestart() throws Exception { @@ -194,7 +195,7 @@ public boolean validateClusterForming() { final String targetIndex = "downsample-1h-" + sourceIndex; startDownsampleTaskViaIlm(sourceIndex, targetIndex, disruptionStart, disruptionEnd); - waitUntil(() -> cluster.client().admin().cluster().preparePendingClusterTasks().get().pendingTasks().isEmpty()); + waitUntil(() -> getClusterPendingTasks(cluster.client()).pendingTasks().isEmpty(), 60, TimeUnit.SECONDS); ensureStableCluster(cluster.numDataAndMasterNodes()); assertTargetIndex(cluster, targetIndex, indexedDocs); } diff --git a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardPersistentTaskExecutor.java b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardPersistentTaskExecutor.java index 06e69ab4702c1..ebf31bd32b48f 100644 --- a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardPersistentTaskExecutor.java +++ b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardPersistentTaskExecutor.java @@ -159,12 +159,13 @@ private void delegate( final DownsampleShardTaskParams params, final SearchHit[] lastDownsampledTsidHits ) { + DownsampleShardTask downsampleShardTask = (DownsampleShardTask) task; client.execute( DelegatingAction.INSTANCE, - new DelegatingAction.Request((DownsampleShardTask) task, lastDownsampledTsidHits, params), + new DelegatingAction.Request(downsampleShardTask, lastDownsampledTsidHits, params), ActionListener.wrap(empty -> {}, e -> { LOGGER.error("error while delegating", e); - markAsFailed(task, e); + markAsFailed(downsampleShardTask, e); }) ); } @@ -222,7 +223,8 @@ protected void doRun() throws Exception { }); } - private static void markAsFailed(AllocatedPersistentTask task, Exception e) { + private static void markAsFailed(DownsampleShardTask task, Exception e) { + task.setDownsampleShardIndexerStatus(DownsampleShardIndexerStatus.FAILED); task.updatePersistentTaskState( new DownsampleShardPersistentTaskState(DownsampleShardIndexerStatus.FAILED, null), ActionListener.running(() -> task.markAsFailed(e)) diff --git a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardTaskParams.java b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardTaskParams.java index 6a4ee88a0cdef..34b7d3c90b267 100644 --- a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardTaskParams.java +++ b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardTaskParams.java @@ -91,7 +91,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_8_500_054; + return TransportVersions.V_8_500_061; } @Override diff --git a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/RestDownsampleAction.java b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/RestDownsampleAction.java index 76f19388e7ee7..8324265c3a786 100644 --- a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/RestDownsampleAction.java +++ b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/RestDownsampleAction.java @@ -35,7 +35,10 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient String sourceIndex = restRequest.param("index"); String targetIndex = restRequest.param("target_index"); String timeout = restRequest.param("timeout"); - DownsampleConfig config = DownsampleConfig.fromXContent(restRequest.contentParser()); + DownsampleConfig config; + try (var parser = restRequest.contentParser()) { + config = DownsampleConfig.fromXContent(parser); + } DownsampleAction.Request request = new DownsampleAction.Request( sourceIndex, targetIndex, diff --git a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java index 4dc5195f8345a..e7bd2f0c0fb27 100644 --- a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java +++ b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java @@ -400,6 +400,19 @@ private void performShardDownsampling( @Override public void onResponse(PersistentTasksCustomMetadata.PersistentTask persistentTask) { + if (persistentTask != null) { + var runningPersistentTaskState = (DownsampleShardPersistentTaskState) persistentTask.getState(); + if (runningPersistentTaskState != null) { + if (runningPersistentTaskState.failed()) { + onFailure(new ElasticsearchException("downsample task [" + persistentTaskId + "] failed")); + return; + } else if (runningPersistentTaskState.cancelled()) { + onFailure(new ElasticsearchException("downsample task [" + persistentTaskId + "] cancelled")); + return; + } + } + } + logger.info("Downsampling task [" + persistentTaskId + " completed for shard " + params.shardId()); if (countDown.decrementAndGet() == 0) { logger.info("All downsampling tasks completed [" + numberOfShards + "]"); @@ -598,21 +611,23 @@ private static void addMetricFieldMapping(final XContentBuilder builder, final S final TimeSeriesParams.MetricType metricType = TimeSeriesParams.MetricType.fromString( fieldProperties.get(TIME_SERIES_METRIC_PARAM).toString() ); + builder.startObject(field); if (metricType == TimeSeriesParams.MetricType.COUNTER) { // For counters, we keep the same field type, because they store // only one value (the last value of the counter) - builder.startObject(field).field("type", fieldProperties.get("type")).field(TIME_SERIES_METRIC_PARAM, metricType).endObject(); + for (String fieldProperty : fieldProperties.keySet()) { + builder.field(fieldProperty, fieldProperties.get(fieldProperty)); + } } else { final String[] supportedAggsArray = metricType.supportedAggs(); // We choose max as the default metric final String defaultMetric = List.of(supportedAggsArray).contains("max") ? "max" : supportedAggsArray[0]; - builder.startObject(field) - .field("type", AggregateDoubleMetricFieldMapper.CONTENT_TYPE) + builder.field("type", AggregateDoubleMetricFieldMapper.CONTENT_TYPE) .array(AggregateDoubleMetricFieldMapper.Names.METRICS, supportedAggsArray) .field(AggregateDoubleMetricFieldMapper.Names.DEFAULT_METRIC, defaultMetric) - .field(TIME_SERIES_METRIC_PARAM, metricType) - .endObject(); + .field(TIME_SERIES_METRIC_PARAM, metricType); } + builder.endObject(); } private static void validateDownsamplingInterval(MapperService mapperService, DownsampleConfig config) { diff --git a/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java b/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java index c0abab1234133..95de6e3ab2027 100644 --- a/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java +++ b/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java @@ -19,7 +19,7 @@ import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; @@ -59,6 +59,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.Aggregations; @@ -558,7 +559,7 @@ public void testDownsampleDatastream() throws Exception { final Instant now = Instant.now(); SourceSupplier sourceSupplier = () -> { - String ts = randomDateForRange(now.minusSeconds(60 * 60).toEpochMilli(), now.plusSeconds(60 * 60).toEpochMilli()); + String ts = randomDateForRange(now.minusSeconds(60 * 60).toEpochMilli(), now.plusSeconds(60 * 29).toEpochMilli()); return XContentFactory.jsonBuilder() .startObject() .field(FIELD_TIMESTAMP, ts) @@ -951,12 +952,9 @@ public void testResumeDownsamplePartial() throws IOException { ); final DownsampleIndexerAction.ShardDownsampleResponse response2 = indexer.execute(); - int dim2DocCount = client().prepareSearch(sourceIndex) - .setQuery(new TermQueryBuilder(FIELD_DIMENSION_1, "dim2")) - .setSize(10_000) - .get() - .getHits() - .getHits().length; + long dim2DocCount = SearchResponseUtils.getTotalHitsValue( + client().prepareSearch(sourceIndex).setQuery(new TermQueryBuilder(FIELD_DIMENSION_1, "dim2")).setSize(10_000) + ); assertDownsampleIndexer(indexService, shardNum, task, response2, dim2DocCount); } @@ -1062,7 +1060,12 @@ private RolloverResponse rollover(String dataStreamName) throws ExecutionExcepti } private Aggregations aggregate(final String index, AggregationBuilder aggregationBuilder) { - return client().prepareSearch(index).addAggregation(aggregationBuilder).get().getAggregations(); + var resp = client().prepareSearch(index).addAggregation(aggregationBuilder).get(); + try { + return resp.getAggregations(); + } finally { + resp.decRef(); + } } @SuppressWarnings("unchecked") @@ -1439,9 +1442,10 @@ private String createDataStream() throws Exception { .template(indexTemplate) .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) .build(); - PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request(dataStreamName + "_template") - .indexTemplate(template); - assertAcked(client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet()); + TransportPutComposableIndexTemplateAction.Request request = new TransportPutComposableIndexTemplateAction.Request( + dataStreamName + "_template" + ).indexTemplate(template); + assertAcked(client().execute(TransportPutComposableIndexTemplateAction.TYPE, request).actionGet()); assertAcked(client().execute(CreateDataStreamAction.INSTANCE, new CreateDataStreamAction.Request(dataStreamName)).get()); return dataStreamName; } diff --git a/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleDataStreamTests.java b/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleDataStreamTests.java index 95640f4625849..251c778260188 100644 --- a/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleDataStreamTests.java +++ b/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleDataStreamTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; @@ -181,7 +181,7 @@ public void testDataStreamDownsample() throws ExecutionException, InterruptedExc } private void putComposableIndexTemplate(final String id, final List patterns) throws IOException { - final PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request(id); + final TransportPutComposableIndexTemplateAction.Request request = new TransportPutComposableIndexTemplateAction.Request(id); final Template template = new Template( indexSettings(1, 0).put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES) .putList(IndexMetadata.INDEX_ROUTING_PATH.getKey(), List.of("routing_field")) @@ -212,7 +212,7 @@ private void putComposableIndexTemplate(final String id, final List patt .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) .build() ); - client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); + client().execute(TransportPutComposableIndexTemplateAction.TYPE, request).actionGet(); } private void indexDocs(final String dataStream, int numDocs, long startTime) { diff --git a/x-pack/plugin/enrich/src/internalClusterTest/java/org/elasticsearch/xpack/enrich/EnrichRestartIT.java b/x-pack/plugin/enrich/src/internalClusterTest/java/org/elasticsearch/xpack/enrich/EnrichRestartIT.java index 9e03f7469d71e..86d18bcbbbbc4 100644 --- a/x-pack/plugin/enrich/src/internalClusterTest/java/org/elasticsearch/xpack/enrich/EnrichRestartIT.java +++ b/x-pack/plugin/enrich/src/internalClusterTest/java/org/elasticsearch/xpack/enrich/EnrichRestartIT.java @@ -19,13 +19,14 @@ import java.util.List; import java.util.Optional; +import static org.elasticsearch.test.hamcrest.OptionalMatchers.isPresent; import static org.elasticsearch.xpack.enrich.AbstractEnrichTestCase.createSourceIndices; import static org.elasticsearch.xpack.enrich.EnrichMultiNodeIT.DECORATE_FIELDS; import static org.elasticsearch.xpack.enrich.EnrichMultiNodeIT.MATCH_FIELD; import static org.elasticsearch.xpack.enrich.EnrichMultiNodeIT.POLICY_NAME; import static org.elasticsearch.xpack.enrich.EnrichMultiNodeIT.SOURCE_INDEX_NAME; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.hasSize; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0) public class EnrichRestartIT extends ESIntegTestCase { @@ -72,14 +73,14 @@ public void testRestart() throws Exception { private static void verifyPolicies(int numPolicies, EnrichPolicy enrichPolicy) { GetEnrichPolicyAction.Response response = client().execute(GetEnrichPolicyAction.INSTANCE, new GetEnrichPolicyAction.Request()) .actionGet(); - assertThat(response.getPolicies().size(), equalTo(numPolicies)); + assertThat(response.getPolicies(), hasSize(numPolicies)); for (int i = 0; i < numPolicies; i++) { String policyName = POLICY_NAME + i; Optional result = response.getPolicies() .stream() .filter(namedPolicy -> namedPolicy.getName().equals(policyName)) .findFirst(); - assertThat(result.isPresent(), is(true)); + assertThat(result, isPresent()); assertThat(result.get().getPolicy(), equalTo(enrichPolicy)); } } diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/EnrichShardMultiSearchAction.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/EnrichShardMultiSearchAction.java index df8ea5344708d..94e9033dcca4f 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/EnrichShardMultiSearchAction.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/EnrichShardMultiSearchAction.java @@ -55,7 +55,6 @@ import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xcontent.DeprecationHandler; @@ -305,7 +304,13 @@ private static BytesReference filterSource(FetchSourceContext fetchSourceContext private static SearchResponse createSearchResponse(TopDocs topDocs, SearchHit[] hits) { SearchHits searchHits = new SearchHits(hits, topDocs.totalHits, 0); return new SearchResponse( - new InternalSearchResponse(searchHits, null, null, null, false, null, 0), + searchHits, + null, + null, + false, + null, + null, + 0, null, 1, 1, diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/BasicEnrichTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/BasicEnrichTests.java index 10ec1f8fb6f72..ac5f7f2baf43e 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/BasicEnrichTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/BasicEnrichTests.java @@ -402,7 +402,7 @@ public void testFailureAfterEnrich() throws Exception { IndexRequest indexRequest = new IndexRequest("my-index").id("1") .setPipeline(pipelineName) .source(Map.of(MATCH_FIELD, "non_existing")); - Exception e = expectThrows(IllegalArgumentException.class, () -> client().index(indexRequest).actionGet()); + Exception e = expectThrows(IllegalArgumentException.class, client().index(indexRequest)); assertThat(e.getMessage(), equalTo("field [users] not present as part of path [users]")); } } diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunnerTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunnerTests.java index 6d46d8bf8db94..13e1df133f00b 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunnerTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunnerTests.java @@ -30,7 +30,7 @@ import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsAction; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest; import org.elasticsearch.action.admin.indices.segments.ShardSegments; -import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction; +import org.elasticsearch.action.admin.indices.settings.put.TransportUpdateSettingsAction; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.PlainActionFuture; @@ -2077,7 +2077,7 @@ public void testRunnerCancel() throws Exception { ForceMergeAction.INSTANCE, RefreshAction.INSTANCE, IndicesSegmentsAction.INSTANCE, - UpdateSettingsAction.INSTANCE, + TransportUpdateSettingsAction.TYPE, TransportClusterHealthAction.TYPE ); logger.info("Selected [{}] to perform cancel", randomActionType.name()); diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyUpdateTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyUpdateTests.java index 73c68b92ff30c..1e4426661e06c 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyUpdateTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyUpdateTests.java @@ -74,7 +74,7 @@ public void testUpdatePolicyOnly() { createSourceIndices(client(), instance2); ResourceAlreadyExistsException exc = expectThrows( ResourceAlreadyExistsException.class, - () -> client().execute(PutEnrichPolicyAction.INSTANCE, new PutEnrichPolicyAction.Request("my_policy", instance2)).actionGet() + client().execute(PutEnrichPolicyAction.INSTANCE, new PutEnrichPolicyAction.Request("my_policy", instance2)) ); assertTrue(exc.getMessage().contains("policy [my_policy] already exists")); } diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichProcessorFactoryTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichProcessorFactoryTests.java index a26cab231f52c..9d63c56ecf721 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichProcessorFactoryTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichProcessorFactoryTests.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.enrich; -import org.apache.lucene.search.TotalHits; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; @@ -25,10 +24,8 @@ import org.elasticsearch.index.VersionType; import org.elasticsearch.ingest.IngestDocument; import org.elasticsearch.script.ScriptService; -import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.profile.SearchProfileResults; import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.test.ESTestCase; @@ -257,26 +254,26 @@ protected void ActionListener listener ) { assert EnrichCoordinatorProxyAction.NAME.equals(action.name()); - var emptyResponse = new SearchResponse( - new InternalSearchResponse( - new SearchHits(new SearchHit[0], new TotalHits(0L, TotalHits.Relation.EQUAL_TO), 0.0f), + requestCounter[0]++; + ActionListener.respondAndRelease( + listener, + (Response) new SearchResponse( + SearchHits.EMPTY_WITH_TOTAL_HITS, InternalAggregations.EMPTY, new Suggest(Collections.emptyList()), - new SearchProfileResults(Collections.emptyMap()), false, false, - 1 - ), - "", - 1, - 1, - 0, - 0, - ShardSearchFailure.EMPTY_ARRAY, - SearchResponse.Clusters.EMPTY + new SearchProfileResults(Collections.emptyMap()), + 1, + "", + 1, + 1, + 0, + 0, + ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY + ) ); - requestCounter[0]++; - listener.onResponse((Response) emptyResponse); } }; EnrichProcessorFactory factory = new EnrichProcessorFactory(client, scriptService, enrichCache); diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichResiliencyTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichResiliencyTests.java index 049684a2c778d..6c62d7a315872 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichResiliencyTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichResiliencyTests.java @@ -12,8 +12,8 @@ import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.ingest.PutPipelineAction; import org.elasticsearch.action.ingest.PutPipelineRequest; +import org.elasticsearch.action.ingest.PutPipelineTransportAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; @@ -34,6 +34,7 @@ import java.util.List; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -122,7 +123,7 @@ public void testWriteThreadLivenessBackToBack() throws Exception { pipe1.endObject(); client().execute( - PutPipelineAction.INSTANCE, + PutPipelineTransportAction.TYPE, new PutPipelineRequest(enrichPipelineName, BytesReference.bytes(pipe1), XContentType.JSON) ).actionGet(); @@ -153,7 +154,7 @@ public void testWriteThreadLivenessBackToBack() throws Exception { assertThat(firstFailure.getMessage(), containsString("Could not perform enrichment, enrich coordination queue at capacity")); client().admin().indices().refresh(new RefreshRequest(enrichedIndexName)).actionGet(); - assertEquals(successfulItems, client().search(new SearchRequest(enrichedIndexName)).actionGet().getHits().getTotalHits().value); + assertHitCount(client().search(new SearchRequest(enrichedIndexName)), successfulItems); } public void testWriteThreadLivenessWithPipeline() throws Exception { @@ -240,12 +241,12 @@ public void testWriteThreadLivenessWithPipeline() throws Exception { pipe2.endObject(); client().execute( - PutPipelineAction.INSTANCE, + PutPipelineTransportAction.TYPE, new PutPipelineRequest(enrichPipelineName1, BytesReference.bytes(pipe1), XContentType.JSON) ).actionGet(); client().execute( - PutPipelineAction.INSTANCE, + PutPipelineTransportAction.TYPE, new PutPipelineRequest(enrichPipelineName2, BytesReference.bytes(pipe2), XContentType.JSON) ).actionGet(); @@ -276,6 +277,6 @@ public void testWriteThreadLivenessWithPipeline() throws Exception { assertThat(firstFailure.getMessage(), containsString("Could not perform enrichment, enrich coordination queue at capacity")); client().admin().indices().refresh(new RefreshRequest(enrichedIndexName)).actionGet(); - assertEquals(successfulItems, client().search(new SearchRequest(enrichedIndexName)).actionGet().getHits().getTotalHits().value); + assertHitCount(client().search(new SearchRequest(enrichedIndexName)), successfulItems); } } diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/CoordinatorTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/CoordinatorTests.java index 079af561e00c9..db523546e13bf 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/CoordinatorTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/CoordinatorTests.java @@ -22,10 +22,8 @@ import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.query.MatchQueryBuilder; -import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -373,16 +371,22 @@ public void testReduce() { } private static SearchResponse emptySearchResponse() { - InternalSearchResponse response = new InternalSearchResponse( - new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), Float.NaN), + return new SearchResponse( + SearchHits.empty(new TotalHits(0, TotalHits.Relation.EQUAL_TO), Float.NaN), InternalAggregations.EMPTY, null, - null, false, null, - 1 + null, + 1, + null, + 1, + 1, + 0, + 100, + ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY ); - return new SearchResponse(response, null, 1, 1, 0, 100, ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY); } private class MockLookupFunction implements BiConsumer> { diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/EnrichShardMultiSearchActionTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/EnrichShardMultiSearchActionTests.java index c3ee4a19dd543..00f22aca2cb92 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/EnrichShardMultiSearchActionTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/EnrichShardMultiSearchActionTests.java @@ -82,7 +82,7 @@ public void testNonEnrichIndex() throws Exception { request.add(new SearchRequest("index")); Exception e = expectThrows( ActionRequestValidationException.class, - () -> client().execute(EnrichShardMultiSearchAction.INSTANCE, new EnrichShardMultiSearchAction.Request(request)).actionGet() + client().execute(EnrichShardMultiSearchAction.INSTANCE, new EnrichShardMultiSearchAction.Request(request)) ); assertThat(e.getMessage(), equalTo("Validation Failed: 1: index [index] is not an enrich index;")); } @@ -94,7 +94,7 @@ public void testMultipleShards() throws Exception { request.add(new SearchRequest(indexName)); Exception e = expectThrows( IllegalStateException.class, - () -> client().execute(EnrichShardMultiSearchAction.INSTANCE, new EnrichShardMultiSearchAction.Request(request)).actionGet() + client().execute(EnrichShardMultiSearchAction.INSTANCE, new EnrichShardMultiSearchAction.Request(request)) ); assertThat(e.getMessage(), equalTo("index [.enrich-1] should have 1 shard, but has 2 shards")); } diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/TransportDeleteEnrichPolicyActionTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/TransportDeleteEnrichPolicyActionTests.java index d34fcf48aa68d..84700308662b9 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/TransportDeleteEnrichPolicyActionTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/TransportDeleteEnrichPolicyActionTests.java @@ -154,9 +154,7 @@ public void onFailure(final Exception e) { expectThrows( IndexNotFoundException.class, - () -> indicesAdmin().prepareGetIndex() - .setIndices(EnrichPolicy.getIndexName(name, 1001), EnrichPolicy.getIndexName(name, 1001)) - .get() + indicesAdmin().prepareGetIndex().setIndices(EnrichPolicy.getIndexName(name, 1001), EnrichPolicy.getIndexName(name, 1001)) ); if (destructiveRequiresName) { diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/335_connector_update_configuration.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/335_connector_update_configuration.yml index 260e1784d29e2..5a012853b4bf9 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/335_connector_update_configuration.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/335_connector_update_configuration.yml @@ -85,6 +85,41 @@ setup: - match: { configuration.some_field.value: 456 } - match: { status: configured } +--- +"Update Connector Configuration with null tooltip": + - do: + connector.update_configuration: + connector_id: test-connector + body: + configuration: + some_field: + default_value: null + depends_on: + - field: some_field + value: 31 + display: numeric + label: Very important field + options: [ ] + order: 4 + required: true + sensitive: false + tooltip: null + type: str + ui_restrictions: [ ] + validations: + - constraint: 0 + type: greater_than + value: 123 + + + - match: { result: updated } + + - do: + connector.get: + connector_id: test-connector + + - match: { configuration.some_field.tooltip: null } + --- "Update Connector Configuration - Connector doesn't exist": - do: diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/400_connector_sync_job_post.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/400_connector_sync_job_post.yml index 0403842cb0728..582a523605663 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/400_connector_sync_job_post.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/400_connector_sync_job_post.yml @@ -41,8 +41,68 @@ setup: - exists: created_at - exists: last_seen +--- +'Create connector sync job with complex connector document': + + - do: + connector.update_pipeline: + connector_id: test-connector + body: + pipeline: + extract_binary_content: true + name: test-pipeline + reduce_whitespace: true + run_ml_inference: false + + - match: { result: updated } + + - do: + connector.update_configuration: + connector_id: test-connector + body: + configuration: + some_field: + default_value: null + depends_on: + - field: some_field + value: 31 + display: numeric + label: Very important field + options: [ ] + order: 4 + required: true + sensitive: false + tooltip: Wow, this tooltip is useful. + type: str + ui_restrictions: [ ] + validations: + - constraint: 0 + type: greater_than + value: 456 + + - match: { result: updated } + + - do: + connector_sync_job.post: + body: + id: test-connector + job_type: full + trigger_method: on_demand + + - set: { id: id } + + - match: { id: $id } + + - do: + connector_sync_job.get: + connector_sync_job_id: $id + + - match: { connector.id: test-connector } + - match: { connector.configuration.some_field.value: 456 } + - match: { connector.pipeline.name: test-pipeline } --- + 'Create connector sync job with missing job type - expect job type full as default': - do: connector_sync_job.post: diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/AnalyticsTemplateRegistry.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/AnalyticsTemplateRegistry.java index a1446606a21af..d9f433b8052bf 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/AnalyticsTemplateRegistry.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/AnalyticsTemplateRegistry.java @@ -65,11 +65,8 @@ public class AnalyticsTemplateRegistry extends IndexTemplateRegistry { TEMPLATE_VERSION_VARIABLE ) )) { - try { - componentTemplates.put( - config.getTemplateName(), - ComponentTemplate.parse(JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, config.loadBytes())) - ); + try (var parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, config.loadBytes())) { + componentTemplates.put(config.getTemplateName(), ComponentTemplate.parse(parser)); } catch (IOException e) { throw new AssertionError(e); } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/event/AnalyticsEventFactory.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/event/AnalyticsEventFactory.java index 5bd84e31de1e4..aa10b143f69e7 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/event/AnalyticsEventFactory.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/event/AnalyticsEventFactory.java @@ -9,6 +9,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.xcontent.ContextParser; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; @@ -66,7 +67,7 @@ public AnalyticsEvent fromRequest(PostAnalyticsEventAction.Request request) thro */ public AnalyticsEvent fromPayload(AnalyticsEvent.Context context, XContentType xContentType, BytesReference payload) throws IOException { - try (XContentParser parser = xContentType.xContent().createParser(XContentParserConfiguration.EMPTY, payload.streamInput())) { + try (XContentParser parser = XContentHelper.createParserNotCompressed(XContentParserConfiguration.EMPTY, payload, xContentType)) { AnalyticsEvent.Type eventType = context.eventType(); if (EVENT_PARSERS.containsKey(eventType)) { diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorConfiguration.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorConfiguration.java index 103c647f180b4..8ed7c417a1af1 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorConfiguration.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorConfiguration.java @@ -54,6 +54,7 @@ public class ConnectorConfiguration implements Writeable, ToXContentObject { private final String placeholder; private final boolean required; private final boolean sensitive; + @Nullable private final String tooltip; private final ConfigurationFieldType type; private final List uiRestrictions; @@ -199,7 +200,7 @@ public ConnectorConfiguration(StreamInput in) throws IOException { PARSER.declareString(optionalConstructorArg(), PLACEHOLDER_FIELD); PARSER.declareBoolean(constructorArg(), REQUIRED_FIELD); PARSER.declareBoolean(constructorArg(), SENSITIVE_FIELD); - PARSER.declareStringOrNull(constructorArg(), TOOLTIP_FIELD); + PARSER.declareStringOrNull(optionalConstructorArg(), TOOLTIP_FIELD); PARSER.declareField( constructorArg(), (p, c) -> ConfigurationFieldType.fieldType(p.text()), diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistry.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistry.java index 642295061d17a..c57650541b416 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistry.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistry.java @@ -98,11 +98,8 @@ public class ConnectorTemplateRegistry extends IndexTemplateRegistry { ) )) { - try { - componentTemplates.put( - config.getTemplateName(), - ComponentTemplate.parse(JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, config.loadBytes())) - ); + try (var parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, config.loadBytes())) { + componentTemplates.put(config.getTemplateName(), ComponentTemplate.parse(parser)); } catch (IOException e) { throw new AssertionError(e); } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestDeleteConnectorAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestDeleteConnectorAction.java index 02153710a99a0..8030b9922eaa2 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestDeleteConnectorAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestDeleteConnectorAction.java @@ -10,6 +10,8 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.application.EnterpriseSearch; @@ -18,6 +20,7 @@ import static org.elasticsearch.rest.RestRequest.Method.DELETE; +@ServerlessScope(Scope.PUBLIC) public class RestDeleteConnectorAction extends BaseRestHandler { @Override diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestGetConnectorAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestGetConnectorAction.java index 50691bf4d5ea8..79922755e67ef 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestGetConnectorAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestGetConnectorAction.java @@ -10,6 +10,8 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.application.EnterpriseSearch; @@ -17,6 +19,7 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; +@ServerlessScope(Scope.PUBLIC) public class RestGetConnectorAction extends BaseRestHandler { @Override diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestListConnectorAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestListConnectorAction.java index 59d984438ebf6..9c37e31944ac8 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestListConnectorAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestListConnectorAction.java @@ -10,6 +10,8 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.application.EnterpriseSearch; import org.elasticsearch.xpack.core.action.util.PageParams; @@ -19,6 +21,7 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; +@ServerlessScope(Scope.PUBLIC) public class RestListConnectorAction extends BaseRestHandler { @Override diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestPostConnectorAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestPostConnectorAction.java index 9bfa3fd629567..2c5f1dda4e554 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestPostConnectorAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestPostConnectorAction.java @@ -11,6 +11,8 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.application.EnterpriseSearch; @@ -18,6 +20,7 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; +@ServerlessScope(Scope.PUBLIC) public class RestPostConnectorAction extends BaseRestHandler { @Override diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestPutConnectorAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestPutConnectorAction.java index e87719943fc29..1d1254bfda3ce 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestPutConnectorAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestPutConnectorAction.java @@ -10,6 +10,8 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.application.EnterpriseSearch; @@ -17,6 +19,7 @@ import static org.elasticsearch.rest.RestRequest.Method.PUT; +@ServerlessScope(Scope.PUBLIC) public class RestPutConnectorAction extends BaseRestHandler { @Override diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorConfigurationAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorConfigurationAction.java index 12c96d212f77a..f4cc47da2f109 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorConfigurationAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorConfigurationAction.java @@ -10,6 +10,8 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.application.EnterpriseSearch; @@ -17,6 +19,7 @@ import static org.elasticsearch.rest.RestRequest.Method.PUT; +@ServerlessScope(Scope.PUBLIC) public class RestUpdateConnectorConfigurationAction extends BaseRestHandler { @Override diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorErrorAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorErrorAction.java index 8b4b70b994ec1..df56f5825f84e 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorErrorAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorErrorAction.java @@ -10,6 +10,8 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.application.EnterpriseSearch; @@ -17,6 +19,7 @@ import static org.elasticsearch.rest.RestRequest.Method.PUT; +@ServerlessScope(Scope.PUBLIC) public class RestUpdateConnectorErrorAction extends BaseRestHandler { @Override diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorFilteringAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorFilteringAction.java index 4908e9e09d73f..ae294dfebd111 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorFilteringAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorFilteringAction.java @@ -10,6 +10,8 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.application.EnterpriseSearch; @@ -17,6 +19,7 @@ import static org.elasticsearch.rest.RestRequest.Method.PUT; +@ServerlessScope(Scope.PUBLIC) public class RestUpdateConnectorFilteringAction extends BaseRestHandler { @Override diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorLastSeenAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorLastSeenAction.java index c2c6ee12a7767..bef6c357fdda3 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorLastSeenAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorLastSeenAction.java @@ -10,6 +10,8 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.application.EnterpriseSearch; @@ -17,6 +19,7 @@ import static org.elasticsearch.rest.RestRequest.Method.PUT; +@ServerlessScope(Scope.PUBLIC) public class RestUpdateConnectorLastSeenAction extends BaseRestHandler { @Override diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorLastSyncStatsAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorLastSyncStatsAction.java index ff3ba53e34a9d..6275e84a28952 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorLastSyncStatsAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorLastSyncStatsAction.java @@ -10,6 +10,8 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.application.EnterpriseSearch; @@ -17,6 +19,7 @@ import static org.elasticsearch.rest.RestRequest.Method.PUT; +@ServerlessScope(Scope.PUBLIC) public class RestUpdateConnectorLastSyncStatsAction extends BaseRestHandler { @Override diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorNameAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorNameAction.java index c51744e57b1df..7fbd42cbff272 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorNameAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorNameAction.java @@ -10,6 +10,8 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.application.EnterpriseSearch; @@ -17,6 +19,7 @@ import static org.elasticsearch.rest.RestRequest.Method.PUT; +@ServerlessScope(Scope.PUBLIC) public class RestUpdateConnectorNameAction extends BaseRestHandler { @Override diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorPipelineAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorPipelineAction.java index 8192099b832dd..465414491bb95 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorPipelineAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorPipelineAction.java @@ -10,6 +10,8 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.application.EnterpriseSearch; @@ -17,6 +19,7 @@ import static org.elasticsearch.rest.RestRequest.Method.PUT; +@ServerlessScope(Scope.PUBLIC) public class RestUpdateConnectorPipelineAction extends BaseRestHandler { @Override diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorSchedulingAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorSchedulingAction.java index fda9fa03af913..dfc12659d394b 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorSchedulingAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/RestUpdateConnectorSchedulingAction.java @@ -10,6 +10,8 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.application.EnterpriseSearch; @@ -17,6 +19,7 @@ import static org.elasticsearch.rest.RestRequest.Method.PUT; +@ServerlessScope(Scope.PUBLIC) public class RestUpdateConnectorSchedulingAction extends BaseRestHandler { @Override diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJob.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJob.java index d623d8dab3834..84d91b7fe0f08 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJob.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJob.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.application.connector.syncjob; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -282,7 +283,7 @@ public ConnectorSyncJob(StreamInput in) throws IOException { ); PARSER.declareField( constructorArg(), - (p, c) -> ConnectorSyncJob.syncJobConnectorFromXContent(p), + (p, c) -> ConnectorSyncJob.syncJobConnectorFromXContent(p, null), CONNECTOR_FIELD, ObjectParser.ValueType.OBJECT ); @@ -327,12 +328,21 @@ private static Instant parseNullableInstant(XContentParser p) throws IOException } @SuppressWarnings("unchecked") - private static final ConstructingObjectParser SYNC_JOB_CONNECTOR_PARSER = new ConstructingObjectParser<>( + private static final ConstructingObjectParser SYNC_JOB_CONNECTOR_PARSER = new ConstructingObjectParser<>( "sync_job_connector", true, - (args) -> { + (args, connectorId) -> { int i = 0; - return new Connector.Builder().setConnectorId((String) args[i++]) + + // Parse the connector ID from the arguments. The ID uniquely identifies the connector. + String parsedConnectorId = (String) args[i++]; + + // Determine the actual connector ID to use. If the context parameter `connectorId` is not null or empty, + // it takes precedence over the `parsedConnectorId` extracted from the arguments. + // This approach allows for flexibility in specifying the connector ID, either from a context or as a parsed argument. + String syncJobConnectorId = Strings.isNullOrEmpty(connectorId) ? parsedConnectorId : connectorId; + + return new Connector.Builder().setConnectorId(syncJobConnectorId) .setFiltering((List) args[i++]) .setIndexName((String) args[i++]) .setLanguage((String) args[i++]) @@ -344,7 +354,7 @@ private static Instant parseNullableInstant(XContentParser p) throws IOException ); static { - SYNC_JOB_CONNECTOR_PARSER.declareString(constructorArg(), Connector.ID_FIELD); + SYNC_JOB_CONNECTOR_PARSER.declareString(optionalConstructorArg(), Connector.ID_FIELD); SYNC_JOB_CONNECTOR_PARSER.declareObjectArray( optionalConstructorArg(), (p, c) -> ConnectorFiltering.fromXContent(p), @@ -378,16 +388,16 @@ public static ConnectorSyncJob fromXContent(XContentParser parser) throws IOExce return PARSER.parse(parser, null); } - public static Connector syncJobConnectorFromXContentBytes(BytesReference source, XContentType xContentType) { + public static Connector syncJobConnectorFromXContentBytes(BytesReference source, String connectorId, XContentType xContentType) { try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, source, xContentType)) { - return ConnectorSyncJob.syncJobConnectorFromXContent(parser); + return ConnectorSyncJob.syncJobConnectorFromXContent(parser, connectorId); } catch (IOException e) { throw new ElasticsearchParseException("Failed to parse a connector document.", e); } } - public static Connector syncJobConnectorFromXContent(XContentParser parser) throws IOException { - return SYNC_JOB_CONNECTOR_PARSER.parse(parser, null); + public static Connector syncJobConnectorFromXContent(XContentParser parser, String connectorId) throws IOException { + return SYNC_JOB_CONNECTOR_PARSER.parse(parser, connectorId); } public String getId() { diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexService.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexService.java index a7d20414d4631..ee35d8fb6372c 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexService.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexService.java @@ -38,10 +38,7 @@ import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.application.connector.Connector; -import org.elasticsearch.xpack.application.connector.ConnectorConfiguration; -import org.elasticsearch.xpack.application.connector.ConnectorFiltering; import org.elasticsearch.xpack.application.connector.ConnectorIndexService; -import org.elasticsearch.xpack.application.connector.ConnectorIngestPipeline; import org.elasticsearch.xpack.application.connector.ConnectorSyncStatus; import org.elasticsearch.xpack.application.connector.ConnectorTemplateRegistry; import org.elasticsearch.xpack.application.connector.syncjob.action.PostConnectorSyncJobAction; @@ -429,22 +426,16 @@ public void onResponse(GetResponse response) { onFailure(new ResourceNotFoundException("Connector with id '" + connectorId + "' does not exist.")); return; } - - Map source = response.getSource(); - - @SuppressWarnings("unchecked") - final Connector syncJobConnectorInfo = new Connector.Builder().setConnectorId(connectorId) - .setFiltering((List) source.get(Connector.FILTERING_FIELD.getPreferredName())) - .setIndexName((String) source.get(Connector.INDEX_NAME_FIELD.getPreferredName())) - .setLanguage((String) source.get(Connector.LANGUAGE_FIELD.getPreferredName())) - .setPipeline((ConnectorIngestPipeline) source.get(Connector.PIPELINE_FIELD.getPreferredName())) - .setServiceType((String) source.get(Connector.SERVICE_TYPE_FIELD.getPreferredName())) - .setConfiguration( - (Map) source.get(Connector.CONFIGURATION_FIELD.getPreferredName()) - ) - .build(); - - listener.onResponse(syncJobConnectorInfo); + try { + final Connector syncJobConnectorInfo = ConnectorSyncJob.syncJobConnectorFromXContentBytes( + response.getSourceAsBytesRef(), + connectorId, + XContentType.JSON + ); + listener.onResponse(syncJobConnectorInfo); + } catch (Exception e) { + listener.onFailure(e); + } } @Override diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/GetConnectorSyncJobAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/GetConnectorSyncJobAction.java index 483914e6a7a19..70be6a5a6ffa1 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/GetConnectorSyncJobAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/GetConnectorSyncJobAction.java @@ -133,10 +133,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return connectorSyncJob.toXContent(builder, params); } - public static GetConnectorSyncJobAction.Response fromXContent(XContentParser parser) throws IOException { - return new GetConnectorSyncJobAction.Response(ConnectorSyncJob.fromXContent(parser)); - } - @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestCancelConnectorSyncJobAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestCancelConnectorSyncJobAction.java index 7cfce07aca48d..f55449ad33b86 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestCancelConnectorSyncJobAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestCancelConnectorSyncJobAction.java @@ -10,6 +10,8 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.application.EnterpriseSearch; import org.elasticsearch.xpack.application.connector.action.ConnectorUpdateActionResponse; @@ -19,6 +21,7 @@ import static org.elasticsearch.xpack.application.connector.syncjob.action.DeleteConnectorSyncJobAction.Request.CONNECTOR_SYNC_JOB_ID_FIELD; +@ServerlessScope(Scope.PUBLIC) public class RestCancelConnectorSyncJobAction extends BaseRestHandler { private static final String CONNECTOR_SYNC_JOB_ID_PARAM = CONNECTOR_SYNC_JOB_ID_FIELD.getPreferredName(); diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestCheckInConnectorSyncJobAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestCheckInConnectorSyncJobAction.java index 882227e45169a..2c25252daf734 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestCheckInConnectorSyncJobAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestCheckInConnectorSyncJobAction.java @@ -10,6 +10,8 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.application.EnterpriseSearch; import org.elasticsearch.xpack.application.connector.action.ConnectorUpdateActionResponse; @@ -19,6 +21,7 @@ import static org.elasticsearch.xpack.application.connector.syncjob.ConnectorSyncJobConstants.CONNECTOR_SYNC_JOB_ID_PARAM; +@ServerlessScope(Scope.PUBLIC) public class RestCheckInConnectorSyncJobAction extends BaseRestHandler { @Override diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestDeleteConnectorSyncJobAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestDeleteConnectorSyncJobAction.java index c1f352a341cc3..07cbe6e3aac43 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestDeleteConnectorSyncJobAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestDeleteConnectorSyncJobAction.java @@ -10,6 +10,8 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.application.EnterpriseSearch; @@ -18,6 +20,7 @@ import static org.elasticsearch.xpack.application.connector.syncjob.ConnectorSyncJobConstants.CONNECTOR_SYNC_JOB_ID_PARAM; +@ServerlessScope(Scope.PUBLIC) public class RestDeleteConnectorSyncJobAction extends BaseRestHandler { @Override diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestGetConnectorSyncJobAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestGetConnectorSyncJobAction.java index 1f5606810757e..d1021281ff53d 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestGetConnectorSyncJobAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestGetConnectorSyncJobAction.java @@ -10,6 +10,8 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.application.EnterpriseSearch; @@ -18,6 +20,7 @@ import static org.elasticsearch.xpack.application.connector.syncjob.ConnectorSyncJobConstants.CONNECTOR_SYNC_JOB_ID_PARAM; +@ServerlessScope(Scope.PUBLIC) public class RestGetConnectorSyncJobAction extends BaseRestHandler { @Override public String getName() { diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestListConnectorSyncJobsAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestListConnectorSyncJobsAction.java index ef8851636be1b..bb3f55e603905 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestListConnectorSyncJobsAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestListConnectorSyncJobsAction.java @@ -10,6 +10,8 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.application.EnterpriseSearch; import org.elasticsearch.xpack.application.connector.ConnectorSyncStatus; @@ -19,6 +21,7 @@ import java.io.IOException; import java.util.List; +@ServerlessScope(Scope.PUBLIC) public class RestListConnectorSyncJobsAction extends BaseRestHandler { @Override public String getName() { diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestPostConnectorSyncJobAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestPostConnectorSyncJobAction.java index 51cc890418dcb..eac645ab3dc77 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestPostConnectorSyncJobAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestPostConnectorSyncJobAction.java @@ -11,6 +11,8 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.application.EnterpriseSearch; @@ -19,6 +21,7 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; +@ServerlessScope(Scope.PUBLIC) public class RestPostConnectorSyncJobAction extends BaseRestHandler { @Override diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestUpdateConnectorSyncJobErrorAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestUpdateConnectorSyncJobErrorAction.java index a05be4a92e6e3..720bfdf416827 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestUpdateConnectorSyncJobErrorAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestUpdateConnectorSyncJobErrorAction.java @@ -10,6 +10,8 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.application.EnterpriseSearch; import org.elasticsearch.xpack.application.connector.action.ConnectorUpdateActionResponse; @@ -19,6 +21,7 @@ import static org.elasticsearch.xpack.application.connector.syncjob.ConnectorSyncJobConstants.CONNECTOR_SYNC_JOB_ID_PARAM; +@ServerlessScope(Scope.PUBLIC) public class RestUpdateConnectorSyncJobErrorAction extends BaseRestHandler { @Override diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestUpdateConnectorSyncJobIngestionStatsAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestUpdateConnectorSyncJobIngestionStatsAction.java index 57a362b55ee9b..d55d3ba87d1df 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestUpdateConnectorSyncJobIngestionStatsAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/action/RestUpdateConnectorSyncJobIngestionStatsAction.java @@ -10,6 +10,8 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.application.EnterpriseSearch; import org.elasticsearch.xpack.application.connector.action.ConnectorUpdateActionResponse; @@ -19,6 +21,7 @@ import static org.elasticsearch.xpack.application.connector.syncjob.ConnectorSyncJobConstants.CONNECTOR_SYNC_JOB_ID_PARAM; +@ServerlessScope(Scope.PUBLIC) public class RestUpdateConnectorSyncJobIngestionStatsAction extends BaseRestHandler { @Override public String getName() { diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRule.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRule.java index 9cca42b0402bf..b9093a2597d7d 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRule.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRule.java @@ -108,7 +108,7 @@ public QueryRule(StreamInput in) throws IOException { this.id = in.readString(); this.type = QueryRuleType.queryRuleType(in.readString()); this.criteria = in.readCollectionAsList(QueryRuleCriteria::new); - this.actions = in.readMap(); + this.actions = in.readGenericMap(); validate(); } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRuleCriteria.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRuleCriteria.java index 39a2b1c6ab6d2..ef42a7d7c64f2 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRuleCriteria.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRuleCriteria.java @@ -37,7 +37,7 @@ public class QueryRuleCriteria implements Writeable, ToXContentObject { - public static final TransportVersion CRITERIA_METADATA_VALUES_TRANSPORT_VERSION = TransportVersions.V_8_500_046; + public static final TransportVersion CRITERIA_METADATA_VALUES_TRANSPORT_VERSION = TransportVersions.V_8_500_061; private final QueryRuleCriteriaType criteriaType; private final String criteriaMetadata; private final List criteriaValues; diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesetListItem.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesetListItem.java index 0a1ff919493c3..fcd0f6be8fbcb 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesetListItem.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesetListItem.java @@ -27,8 +27,7 @@ */ public class QueryRulesetListItem implements Writeable, ToXContentObject { - // TODO we need to actually bump transport version, but there's no point until main is merged. Placeholder for now. - public static final TransportVersion EXPANDED_RULESET_COUNT_TRANSPORT_VERSION = TransportVersions.V_8_500_052; + public static final TransportVersion EXPANDED_RULESET_COUNT_TRANSPORT_VERSION = TransportVersions.V_8_500_061; public static final ParseField RULESET_ID_FIELD = new ParseField("ruleset_id"); public static final ParseField RULE_TOTAL_COUNT_FIELD = new ParseField("rule_total_count"); diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/RuleQueryBuilder.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/RuleQueryBuilder.java index ebd78119ab7d5..b23ed92a5d9b8 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/RuleQueryBuilder.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/RuleQueryBuilder.java @@ -73,7 +73,7 @@ public class RuleQueryBuilder extends AbstractQueryBuilder { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_8_500_040; + return TransportVersions.V_8_500_061; } public RuleQueryBuilder(QueryBuilder organicQuery, Map matchCriteria, String rulesetId) { @@ -83,7 +83,7 @@ public RuleQueryBuilder(QueryBuilder organicQuery, Map matchCrit public RuleQueryBuilder(StreamInput in) throws IOException { super(in); organicQuery = in.readNamedWriteable(QueryBuilder.class); - matchCriteria = in.readMap(); + matchCriteria = in.readGenericMap(); rulesetId = in.readString(); pinnedIds = in.readOptionalStringCollectionAsList(); pinnedIdsSupplier = null; diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/SearchApplicationListItem.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/SearchApplicationListItem.java index 7d4377d71079e..76964a7dbb3e8 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/SearchApplicationListItem.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/SearchApplicationListItem.java @@ -11,17 +11,13 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Nullable; -import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Objects; -import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; - /** * This class is used for returning information for lists of search applications, to avoid including all * {@link SearchApplication} information which can be retrieved using subsequent Get Search Application requests. @@ -70,28 +66,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "search_application_list_item`", - false, - (params) -> { - final String name = (String) params[0]; - @SuppressWarnings("unchecked") - final String analyticsCollectionName = (String) params[2]; - final Long updatedAtMillis = (Long) params[3]; - return new SearchApplicationListItem(name, analyticsCollectionName, updatedAtMillis); - } - ); - - static { - PARSER.declareStringOrNull(optionalConstructorArg(), NAME_FIELD); - PARSER.declareStringOrNull(optionalConstructorArg(), ANALYTICS_COLLECTION_NAME_FIELD); - PARSER.declareLong(optionalConstructorArg(), UPDATED_AT_MILLIS_FIELD); - } - - public SearchApplicationListItem fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name); diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/RestQuerySearchApplicationAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/RestQuerySearchApplicationAction.java index a7341c972156f..de0bb837acef8 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/RestQuerySearchApplicationAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/RestQuerySearchApplicationAction.java @@ -13,7 +13,7 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import org.elasticsearch.xpack.application.EnterpriseSearch; import org.elasticsearch.xpack.application.EnterpriseSearchBaseRestHandler; import org.elasticsearch.xpack.application.utils.LicenseUtils; @@ -53,7 +53,7 @@ protected RestChannelConsumer innerPrepareRequest(RestRequest restRequest, NodeC } return channel -> { RestCancellableNodeClient cancelClient = new RestCancellableNodeClient(client, restRequest.getHttpChannel()); - cancelClient.execute(QuerySearchApplicationAction.INSTANCE, request, new RestChunkedToXContentListener<>(channel)); + cancelClient.execute(QuerySearchApplicationAction.INSTANCE, request, new RestRefCountedChunkedToXContentListener<>(channel)); }; } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/RestRenderSearchApplicationQueryAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/RestRenderSearchApplicationQueryAction.java index a4ce64181c48e..97f30d2ca8722 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/RestRenderSearchApplicationQueryAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/RestRenderSearchApplicationQueryAction.java @@ -42,7 +42,9 @@ protected RestChannelConsumer innerPrepareRequest(RestRequest restRequest, NodeC final String searchAppName = restRequest.param("name"); SearchApplicationSearchRequest request; if (restRequest.hasContent()) { - request = SearchApplicationSearchRequest.fromXContent(searchAppName, restRequest.contentParser()); + try (var parser = restRequest.contentParser()) { + request = SearchApplicationSearchRequest.fromXContent(searchAppName, parser); + } } else { request = new SearchApplicationSearchRequest(searchAppName); } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/SearchApplicationSearchRequest.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/SearchApplicationSearchRequest.java index b741221890429..105b58308217b 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/SearchApplicationSearchRequest.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/SearchApplicationSearchRequest.java @@ -42,7 +42,7 @@ public class SearchApplicationSearchRequest extends ActionRequest implements Ind public SearchApplicationSearchRequest(StreamInput in) throws IOException { super(in); this.name = in.readString(); - this.queryParams = in.readMap(); + this.queryParams = in.readGenericMap(); } public SearchApplicationSearchRequest(String name) { diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/analytics/AnalyticsTemplateRegistryTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/analytics/AnalyticsTemplateRegistryTests.java index 8bf06b8954080..50102b8cfcf53 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/analytics/AnalyticsTemplateRegistryTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/analytics/AnalyticsTemplateRegistryTests.java @@ -13,8 +13,8 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.admin.indices.template.put.PutComponentTemplateAction; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; -import org.elasticsearch.action.ingest.PutPipelineAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; +import org.elasticsearch.action.ingest.PutPipelineTransportAction; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; @@ -47,7 +47,7 @@ import org.elasticsearch.xpack.core.ilm.LifecyclePolicy; import org.elasticsearch.xpack.core.ilm.LifecyclePolicyMetadata; import org.elasticsearch.xpack.core.ilm.OperationMode; -import org.elasticsearch.xpack.core.ilm.action.PutLifecycleAction; +import org.elasticsearch.xpack.core.ilm.action.ILMActions; import org.junit.After; import org.junit.Before; @@ -63,6 +63,7 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.sameInstance; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; @@ -202,17 +203,17 @@ public void testSameOrHigherVersionComponentTemplateNotUpgraded() { versions.put(AnalyticsTemplateRegistry.EVENT_DATA_STREAM_SETTINGS_COMPONENT_NAME, AnalyticsTemplateRegistry.REGISTRY_VERSION); ClusterChangedEvent sameVersionEvent = createClusterChangedEvent(Collections.emptyMap(), versions, nodes); client.setVerifier((action, request, listener) -> { - if (action instanceof PutPipelineAction) { + if (action == PutPipelineTransportAction.TYPE) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; } if (action instanceof PutComponentTemplateAction) { fail("template should not have been re-installed"); return null; - } else if (action instanceof PutLifecycleAction) { + } else if (action == ILMActions.PUT) { fail("As of 8.12.0 we no longer put an ILM lifecycle and instead rely on DSL for analytics datastreams."); return null; - } else if (action instanceof PutComposableIndexTemplateAction) { + } else if (action == TransportPutComposableIndexTemplateAction.TYPE) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; } else { @@ -258,16 +259,16 @@ public void testThatNonExistingPipelinesAreAddedImmediately() throws Exception { AtomicInteger calledTimes = new AtomicInteger(0); client.setVerifier((action, request, listener) -> { - if (action instanceof PutPipelineAction) { + if (action == PutPipelineTransportAction.TYPE) { calledTimes.incrementAndGet(); return AcknowledgedResponse.TRUE; } if (action instanceof PutComponentTemplateAction) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; - } else if (action instanceof PutLifecycleAction) { + } else if (action == ILMActions.PUT) { fail("As of 8.12.0 we no longer put an ILM lifecycle and instead rely on DSL for analytics datastreams."); - } else if (action instanceof PutComposableIndexTemplateAction) { + } else if (action == TransportPutComposableIndexTemplateAction.TYPE) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; } else { @@ -342,21 +343,22 @@ private ActionResponse verifyComposableTemplateInstalled( ActionRequest request, ActionListener listener ) { - if (action instanceof PutPipelineAction) { + if (action == PutPipelineTransportAction.TYPE) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; } if (action instanceof PutComponentTemplateAction) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; - } else if (action instanceof PutLifecycleAction) { + } else if (action == ILMActions.PUT) { fail("As of 8.12.0 we no longer put an ILM lifecycle and instead rely on DSL for analytics datastreams."); return null; - } else if (action instanceof PutComposableIndexTemplateAction) { + } else if (action == TransportPutComposableIndexTemplateAction.TYPE) { calledTimes.incrementAndGet(); - assertThat(action, instanceOf(PutComposableIndexTemplateAction.class)); - assertThat(request, instanceOf(PutComposableIndexTemplateAction.Request.class)); - final PutComposableIndexTemplateAction.Request putRequest = (PutComposableIndexTemplateAction.Request) request; + assertThat(action, sameInstance(TransportPutComposableIndexTemplateAction.TYPE)); + assertThat(request, instanceOf(TransportPutComposableIndexTemplateAction.Request.class)); + final TransportPutComposableIndexTemplateAction.Request putRequest = + (TransportPutComposableIndexTemplateAction.Request) request; assertThat(putRequest.indexTemplate().version(), equalTo((long) AnalyticsTemplateRegistry.REGISTRY_VERSION)); final List indexPatterns = putRequest.indexTemplate().indexPatterns(); assertThat(indexPatterns, hasSize(1)); @@ -375,7 +377,7 @@ private ActionResponse verifyComponentTemplateInstalled( ActionRequest request, ActionListener listener ) { - if (action instanceof PutPipelineAction) { + if (action == PutPipelineTransportAction.TYPE) { return AcknowledgedResponse.TRUE; } if (action instanceof PutComponentTemplateAction) { @@ -386,10 +388,10 @@ private ActionResponse verifyComponentTemplateInstalled( assertThat(putRequest.componentTemplate().version(), equalTo((long) AnalyticsTemplateRegistry.REGISTRY_VERSION)); assertNotNull(listener); return new TestPutIndexTemplateResponse(true); - } else if (action instanceof PutLifecycleAction) { + } else if (action == ILMActions.PUT) { fail("As of 8.12.0 we no longer put an ILM lifecycle and instead rely on DSL for analytics datastreams."); return null; - } else if (action instanceof PutComposableIndexTemplateAction) { + } else if (action == TransportPutComposableIndexTemplateAction.TYPE) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; } else { diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistryTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistryTests.java index d6f2c2532acca..3fbc5cd749cb2 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistryTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistryTests.java @@ -13,8 +13,8 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.admin.indices.template.put.PutComponentTemplateAction; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; -import org.elasticsearch.action.ingest.PutPipelineAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; +import org.elasticsearch.action.ingest.PutPipelineTransportAction; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; @@ -46,7 +46,7 @@ import org.elasticsearch.xpack.core.ilm.LifecyclePolicy; import org.elasticsearch.xpack.core.ilm.LifecyclePolicyMetadata; import org.elasticsearch.xpack.core.ilm.OperationMode; -import org.elasticsearch.xpack.core.ilm.action.PutLifecycleAction; +import org.elasticsearch.xpack.core.ilm.action.ILMActions; import org.junit.After; import org.junit.Before; @@ -66,6 +66,7 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.oneOf; +import static org.hamcrest.Matchers.sameInstance; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; @@ -217,17 +218,17 @@ public void testSameOrHigherVersionComponentTemplateNotUpgraded() { versions.put(ConnectorTemplateRegistry.ACCESS_CONTROL_TEMPLATE_NAME, ConnectorTemplateRegistry.REGISTRY_VERSION); ClusterChangedEvent sameVersionEvent = createClusterChangedEvent(Collections.emptyMap(), versions, nodes); client.setVerifier((action, request, listener) -> { - if (action instanceof PutPipelineAction) { + if (action == PutPipelineTransportAction.TYPE) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; } if (action instanceof PutComponentTemplateAction) { fail("template should not have been re-installed"); return null; - } else if (action instanceof PutLifecycleAction) { + } else if (action == ILMActions.PUT) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; - } else if (action instanceof PutComposableIndexTemplateAction) { + } else if (action == TransportPutComposableIndexTemplateAction.TYPE) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; } else { @@ -285,17 +286,17 @@ public void testThatNonExistingPipelinesAreAddedImmediately() throws Exception { AtomicInteger calledTimes = new AtomicInteger(0); client.setVerifier((action, request, listener) -> { - if (action instanceof PutPipelineAction) { + if (action == PutPipelineTransportAction.TYPE) { calledTimes.incrementAndGet(); return AcknowledgedResponse.TRUE; } if (action instanceof PutComponentTemplateAction) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; - } else if (action instanceof PutLifecycleAction) { + } else if (action == ILMActions.PUT) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; - } else if (action instanceof PutComposableIndexTemplateAction) { + } else if (action == TransportPutComposableIndexTemplateAction.TYPE) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; } else { @@ -369,21 +370,22 @@ private ActionResponse verifyComposableTemplateInstalled( ActionRequest request, ActionListener listener ) { - if (action instanceof PutPipelineAction) { + if (action == PutPipelineTransportAction.TYPE) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; } if (action instanceof PutComponentTemplateAction) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; - } else if (action instanceof PutLifecycleAction) { + } else if (action == ILMActions.PUT) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; - } else if (action instanceof PutComposableIndexTemplateAction) { + } else if (action == TransportPutComposableIndexTemplateAction.TYPE) { calledTimes.incrementAndGet(); - assertThat(action, instanceOf(PutComposableIndexTemplateAction.class)); - assertThat(request, instanceOf(PutComposableIndexTemplateAction.Request.class)); - final PutComposableIndexTemplateAction.Request putRequest = (PutComposableIndexTemplateAction.Request) request; + assertThat(action, sameInstance(TransportPutComposableIndexTemplateAction.TYPE)); + assertThat(request, instanceOf(TransportPutComposableIndexTemplateAction.Request.class)); + final TransportPutComposableIndexTemplateAction.Request putRequest = + (TransportPutComposableIndexTemplateAction.Request) request; assertThat(putRequest.indexTemplate().version(), equalTo((long) ConnectorTemplateRegistry.REGISTRY_VERSION)); final List indexPatterns = putRequest.indexTemplate().indexPatterns(); assertThat(indexPatterns, hasSize(1)); @@ -405,7 +407,7 @@ private ActionResponse verifyComponentTemplateInstalled( ActionRequest request, ActionListener listener ) { - if (action instanceof PutPipelineAction) { + if (action == PutPipelineTransportAction.TYPE) { return AcknowledgedResponse.TRUE; } if (action instanceof PutComponentTemplateAction) { @@ -416,10 +418,10 @@ private ActionResponse verifyComponentTemplateInstalled( assertThat(putRequest.componentTemplate().version(), equalTo((long) ConnectorTemplateRegistry.REGISTRY_VERSION)); assertNotNull(listener); return new TestPutIndexTemplateResponse(true); - } else if (action instanceof PutLifecycleAction) { + } else if (action == ILMActions.PUT) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; - } else if (action instanceof PutComposableIndexTemplateAction) { + } else if (action == TransportPutComposableIndexTemplateAction.TYPE) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; } else { diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTests.java index 9401a2a58403e..cdfa3dea8a6fa 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTests.java @@ -98,6 +98,31 @@ public void testToXContent() throws IOException { } ], "value":"" + }, + "field_with_null_tooltip":{ + "default_value":null, + "depends_on":[ + { + "field":"some_field", + "value":true + } + ], + "display":"textbox", + "label":"Very important field", + "options":[], + "order":4, + "required":true, + "sensitive":false, + "tooltip":null, + "type":"str", + "ui_restrictions":[], + "validations":[ + { + "constraint":0, + "type":"greater_than" + } + ], + "value":"" } }, "description":"test-connector", diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTests.java index 5eed1e5d1b58a..b82db8d04d3a9 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTests.java @@ -400,7 +400,7 @@ public void testSyncJobConnectorFromXContent_WithAllFieldsSet() throws IOExcepti } """); - Connector connector = ConnectorSyncJob.syncJobConnectorFromXContentBytes(new BytesArray(content), XContentType.JSON); + Connector connector = ConnectorSyncJob.syncJobConnectorFromXContentBytes(new BytesArray(content), null, XContentType.JSON); assertThat(connector.getConnectorId(), equalTo("connector-id")); assertThat(connector.getFiltering().size(), equalTo(1)); @@ -474,7 +474,7 @@ public void testSyncJobConnectorFromXContent_WithAllNonOptionalFieldsSet_DoesNot } """); - ConnectorSyncJob.syncJobConnectorFromXContentBytes(new BytesArray(content), XContentType.JSON); + ConnectorSyncJob.syncJobConnectorFromXContentBytes(new BytesArray(content), null, XContentType.JSON); } private void assertTransportSerialization(ConnectorSyncJob testInstance) throws IOException { diff --git a/x-pack/plugin/eql/qa/common/src/main/resources/test_missing_events.toml b/x-pack/plugin/eql/qa/common/src/main/resources/test_missing_events.toml index bfe5465adebcf..0d546940c72a1 100644 --- a/x-pack/plugin/eql/qa/common/src/main/resources/test_missing_events.toml +++ b/x-pack/plugin/eql/qa/common/src/main/resources/test_missing_events.toml @@ -385,3 +385,16 @@ join_keys = ["foo", "foo", "foo", "foo", "baz", "baz"] +[[queries]] +name = "interleaved_3_missing" +query = ''' + sequence with maxspan=1h + ![ test1 where tag == "foobar" ] + [ test1 where tag == "normal" ] + ![ test1 where tag == "foobar" ] + [ test1 where tag == "normal" ] + ![ test1 where tag == "foobar" ] +''' +expected_event_ids = [-1, 1, -1, 2, -1, + -1, 2, -1, 4, -1] + diff --git a/x-pack/plugin/eql/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/eql/50_samples.yml b/x-pack/plugin/eql/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/eql/50_samples.yml new file mode 100644 index 0000000000000..0c413e809689a --- /dev/null +++ b/x-pack/plugin/eql/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/eql/50_samples.yml @@ -0,0 +1,80 @@ +--- +setup: + - do: + indices.create: + index: sample1 + body: + mappings: + properties: + ip: + type: ip + version: + type: version + missing_keyword: + type: keyword + type_test: + type: keyword + "@timestamp_pretty": + type: date + format: dd-MM-yyyy + event_type: + type: keyword + event: + properties: + category: + type: alias + path: event_type + host: + type: keyword + os: + type: keyword + bool: + type: boolean + uptime: + type: long + port: + type: long + - do: + bulk: + refresh: true + body: + - '{"index" : { "_index" : "sample1" }}' + - '{"@timestamp_pretty":"12-12-2022","type_test":"abc","event_type":"alert","os":"win10","port":1234,"missing_keyword":"test","ip":"10.0.0.1","host":"doom","id":11,"version":"1.0.0","uptime":0}' + - '{"index" : { "_index" : "sample1" }}' + - '{"@timestamp_pretty":"13-12-2022","event_type":"alert","type_test":"abc","os":"win10","port":1,"host":"CS","id":12,"version":"1.2.0","uptime":5}' + - '{"index" : { "_index" : "sample1" }}' + - '{"@timestamp_pretty":"12-12-2022","event_type":"alert","type_test":"abc","bool":false,"os":"win10","port":1234,"host":"farcry","id":13,"version":"2.0.0","uptime":1}' + - '{"index" : { "_index" : "sample1" }}' + - '{"@timestamp_pretty":"13-12-2022","event_type":"alert","type_test":"abc","os":"slack","port":12,"host":"GTA","id":14,"version":"10.0.0","uptime":3}' + - '{"index" : { "_index" : "sample1" }}' + - '{"@timestamp_pretty":"17-12-2022","event_type":"alert","os":"fedora","port":1234,"host":"sniper 3d","id":15,"version":"20.1.0","uptime":6}' + - '{"index" : { "_index" : "sample1" }}' + - '{"@timestamp_pretty":"17-12-2022","event_type":"alert","bool":true,"os":"redhat","port":65123,"host":"doom","id":16,"version":"20.10.0"}' + - '{"index" : { "_index" : "sample1" }}' + - '{"@timestamp_pretty":"17-12-2022","event_type":"failure","bool":true,"os":"redhat","port":1234,"missing_keyword":"yyy","host":"doom","id":17,"version":"20.2.0","uptime":15}' + - '{"index" : { "_index" : "sample1" }}' + - '{"@timestamp_pretty":"12-12-2022","event_type":"success","os":"win10","port":512,"missing_keyword":"test","host":"doom","id":18,"version":"1.2.3","uptime":16}' + - '{"index" : { "_index" : "sample1" }}' + - '{"@timestamp_pretty":"15-12-2022","event_type":"success","bool":true,"os":"win10","port":12,"missing_keyword":"test","host":"GTA","id":19,"version":"1.2.3"}' + - '{"index" : { "_index" : "sample1" }}' + - '{"event_type":"alert","bool":true,"os":"win10","port":1234,"missing_keyword":null,"ip":"10.0.0.5","host":"farcry","id":110,"version":"1.2.3","uptime":1}' + +--- +# Test an empty reply due to query filtering +"Execute some EQL.": + - do: + eql.search: + index: sample1 + body: + query: 'sample by host [any where uptime > 0] by os [any where port > 100] by os [any where bool == true] by os' + filter: + range: + "@timestamp_pretty": + gte: now-5m + lte: now + + - match: {timed_out: false} + - match: {hits.total.value: 0} + - match: {hits.total.relation: "eq"} + - match: {hits.sequences: []} + diff --git a/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/AsyncEqlSearchActionIT.java b/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/AsyncEqlSearchActionIT.java index 646a1e896c473..5387e5b08c2fd 100644 --- a/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/AsyncEqlSearchActionIT.java +++ b/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/AsyncEqlSearchActionIT.java @@ -17,7 +17,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressorFactory; import org.elasticsearch.common.io.stream.ByteBufferStreamInput; -import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; @@ -32,16 +31,15 @@ import org.elasticsearch.tasks.TaskId; import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.async.AsyncExecutionId; -import org.elasticsearch.xpack.core.async.DeleteAsyncResultAction; import org.elasticsearch.xpack.core.async.DeleteAsyncResultRequest; import org.elasticsearch.xpack.core.async.GetAsyncResultRequest; import org.elasticsearch.xpack.core.async.StoredAsyncResponse; +import org.elasticsearch.xpack.core.async.TransportDeleteAsyncResultAction; import org.elasticsearch.xpack.eql.plugin.EqlAsyncGetResultAction; import org.hamcrest.BaseMatcher; import org.hamcrest.Description; import org.junit.After; -import java.io.InputStream; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Base64; @@ -150,11 +148,11 @@ public void testBasicAsyncExecution() throws Exception { assertThat(response, notNullValue()); assertThat(response.hits().events().size(), equalTo(1)); } else { - Exception ex = expectThrows(Exception.class, future::actionGet); + Exception ex = expectThrows(Exception.class, future); assertThat(ex.getCause().getMessage(), containsString("by zero")); } AcknowledgedResponse deleteResponse = client().execute( - DeleteAsyncResultAction.INSTANCE, + TransportDeleteAsyncResultAction.TYPE, new DeleteAsyncResultRequest(response.id()) ).actionGet(); assertThat(deleteResponse.isAcknowledged(), equalTo(true)); @@ -251,13 +249,13 @@ public void testAsyncCancellation() throws Exception { logger.trace("Block is established"); ActionFuture deleteResponse = client().execute( - DeleteAsyncResultAction.INSTANCE, + TransportDeleteAsyncResultAction.TYPE, new DeleteAsyncResultRequest(response.id()) ); disableBlocks(plugins); assertThat(deleteResponse.actionGet().isAcknowledged(), equalTo(true)); - deleteResponse = client().execute(DeleteAsyncResultAction.INSTANCE, new DeleteAsyncResultRequest(response.id())); + deleteResponse = client().execute(TransportDeleteAsyncResultAction.TYPE, new DeleteAsyncResultRequest(response.id())); assertFutureThrows(deleteResponse, ResourceNotFoundException.class); } @@ -294,7 +292,7 @@ public void testFinishingBeforeTimeout() throws Exception { assertThat(storedResponse, equalTo(response)); AcknowledgedResponse deleteResponse = client().execute( - DeleteAsyncResultAction.INSTANCE, + TransportDeleteAsyncResultAction.TYPE, new DeleteAsyncResultRequest(response.id()) ).actionGet(); assertThat(deleteResponse.isAcknowledged(), equalTo(true)); @@ -312,8 +310,12 @@ public StoredAsyncResponse getStoredRecord(String id) throws String value = doc.getSource().get("result").toString(); try (ByteBufferStreamInput buf = new ByteBufferStreamInput(ByteBuffer.wrap(Base64.getDecoder().decode(value)))) { TransportVersion version = TransportVersion.readVersion(buf); - final InputStream compressedIn = CompressorFactory.COMPRESSOR.threadLocalInputStream(buf); - try (StreamInput in = new NamedWriteableAwareStreamInput(new InputStreamStreamInput(compressedIn), registry)) { + try ( + StreamInput in = new NamedWriteableAwareStreamInput( + CompressorFactory.COMPRESSOR.threadLocalStreamInput(buf), + registry + ) + ) { in.setTransportVersion(version); return new StoredAsyncResponse<>(EqlSearchResponse::new, in); } diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchRequest.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchRequest.java index 5811d328ae7dd..0aeddd525e317 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchRequest.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchRequest.java @@ -128,7 +128,7 @@ public EqlSearchRequest(StreamInput in) throws IOException { if (in.readBoolean()) { fetchFields = in.readCollectionAsList(FieldAndFormat::new); } - runtimeMappings = in.readMap(); + runtimeMappings = in.readGenericMap(); } else { runtimeMappings = emptyMap(); } diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchResponse.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchResponse.java index c22cf7d390628..2d7a330560fcc 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchResponse.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchResponse.java @@ -281,7 +281,7 @@ private Event(StreamInput in) throws IOException { } else { fetchFields = null; } - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_040)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { missing = in.readBoolean(); } else { missing = index.isEmpty(); @@ -304,7 +304,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeMap(fetchFields, StreamOutput::writeWriteable); } } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_040)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { // for BWC, 8.9.1+ does not have "missing" attribute, but it considers events with an empty index "" as missing events // see https://github.com/elastic/elasticsearch/pull/98130 out.writeBoolean(missing); @@ -442,10 +442,6 @@ public Sequence(StreamInput in) throws IOException { this.events = in.readCollectionAsList(Event::readFrom); } - public static Sequence fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeGenericValue(joinKeys); diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sample/SampleIterator.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sample/SampleIterator.java index a96102dad6cfb..f4b933300dcd7 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sample/SampleIterator.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sample/SampleIterator.java @@ -147,6 +147,12 @@ private void advance(ActionListener listener) { private void queryForCompositeAggPage(ActionListener listener, final SampleQueryRequest request) { client.query(request, listener.delegateFailureAndWrap((delegate, r) -> { + // either the fields values or the fields themselves are missing + // or the filter applied on the eql query matches no documents + if (r.hasAggregations() == false) { + payload(delegate); + return; + } Aggregation a = r.getAggregations().get(COMPOSITE_AGG_NAME); if (a instanceof InternalComposite == false) { throw new EqlIllegalArgumentException("Unexpected aggregation result type returned [{}]", a.getClass()); diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClient.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClient.java index 4e4817d4c041d..befb2c7503515 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClient.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClient.java @@ -100,6 +100,7 @@ private void searchWithPIT(MultiSearchRequest search, ActionListener, Accountable { private final SequenceKey key; private final Match[] matches; + private int firstStage; private int currentStage = 0; @SuppressWarnings({ "rawtypes", "unchecked" }) - public Sequence(SequenceKey key, int stages, Ordinal ordinal, HitReference firstHit) { + public Sequence(SequenceKey key, int stages, int firstStage, Ordinal ordinal, HitReference firstHit) { Check.isTrue(stages >= 2, "A sequence requires at least 2 criteria, given [{}]", stages); this.key = key; this.matches = new Match[stages]; - this.matches[0] = new Match(ordinal, firstHit); + this.matches[firstStage] = new Match(ordinal, firstHit); + this.firstStage = firstStage; + this.currentStage = firstStage; } public void putMatch(int stage, Ordinal ordinal, HitReference hit) { @@ -56,7 +59,7 @@ public Ordinal ordinal() { } public Ordinal startOrdinal() { - return matches[0].ordinal(); + return matches[firstStage].ordinal(); } public List hits() { diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/SequenceMatcher.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/SequenceMatcher.java index adb8ee1b43c02..1ad9002f88999 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/SequenceMatcher.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/SequenceMatcher.java @@ -168,7 +168,7 @@ boolean match(int stage, Iterable> hits) { if (isFirstPositiveStage(stage)) { log.trace("Matching hit {} - track sequence", ko.ordinal); - Sequence seq = new Sequence(ko.key, numberOfStages, ko.ordinal, hit); + Sequence seq = new Sequence(ko.key, numberOfStages, stage, ko.ordinal, hit); if (lastPositiveStage == stage) { tryComplete(seq); } else { diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/RestEqlDeleteAsyncResultAction.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/RestEqlDeleteAsyncResultAction.java index e329c83afc20f..12724cd1f0563 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/RestEqlDeleteAsyncResultAction.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/RestEqlDeleteAsyncResultAction.java @@ -12,8 +12,8 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xpack.core.async.DeleteAsyncResultAction; import org.elasticsearch.xpack.core.async.DeleteAsyncResultRequest; +import org.elasticsearch.xpack.core.async.TransportDeleteAsyncResultAction; import java.util.List; @@ -34,6 +34,6 @@ public String getName() { @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { DeleteAsyncResultRequest delete = new DeleteAsyncResultRequest(request.param("id")); - return channel -> client.execute(DeleteAsyncResultAction.INSTANCE, delete, new RestToXContentListener<>(channel)); + return channel -> client.execute(TransportDeleteAsyncResultAction.TYPE, delete, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchResponseTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchResponseTests.java index 080cc26d81eb2..edbeb3d0a0d8c 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchResponseTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchResponseTests.java @@ -289,7 +289,7 @@ private List mutateEvents(List original, TransportVersion version) e.id(), e.source(), version.onOrAfter(TransportVersions.V_7_13_0) ? e.fetchFields() : null, - version.onOrAfter(TransportVersions.V_8_500_040) ? e.missing() : e.index().isEmpty() + version.onOrAfter(TransportVersions.V_8_500_061) ? e.missing() : e.index().isEmpty() ) ); } diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/ImplicitTiebreakerTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/ImplicitTiebreakerTests.java index 85a34d7b6a943..f391e9bdae84b 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/ImplicitTiebreakerTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/ImplicitTiebreakerTests.java @@ -13,7 +13,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchResponse.Clusters; -import org.elasticsearch.action.search.SearchResponseSections; import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.DocValueFormat; @@ -83,8 +82,10 @@ public void query(QueryRequest r, ActionListener l) { ) ); SearchHits searchHits = new SearchHits(new SearchHit[] { searchHit }, new TotalHits(1, Relation.EQUAL_TO), 0.0f); - SearchResponseSections internal = new SearchResponseSections(searchHits, null, null, false, false, null, 0); - ActionListener.respondAndRelease(l, new SearchResponse(internal, null, 0, 1, 0, 0, null, Clusters.EMPTY)); + ActionListener.respondAndRelease( + l, + new SearchResponse(searchHits, null, null, false, false, null, 0, null, 0, 1, 0, 0, null, Clusters.EMPTY) + ); } @Override diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/SequenceSpecTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/SequenceSpecTests.java index 336526a1153a5..eb417570cb4a7 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/SequenceSpecTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/SequenceSpecTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchResponse.Clusters; -import org.elasticsearch.action.search.SearchResponseSections; import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.document.DocumentField; @@ -217,12 +216,14 @@ public void query(QueryRequest r, ActionListener l) { EventsAsHits eah = new EventsAsHits(evs); SearchHits searchHits = new SearchHits( - eah.hits.toArray(new SearchHit[0]), + eah.hits.toArray(SearchHits.EMPTY), new TotalHits(eah.hits.size(), Relation.EQUAL_TO), 0.0f ); - SearchResponseSections internal = new SearchResponseSections(searchHits, null, null, false, false, null, 0); - ActionListener.respondAndRelease(l, new SearchResponse(internal, null, 0, 1, 0, 0, null, Clusters.EMPTY)); + ActionListener.respondAndRelease( + l, + new SearchResponse(searchHits, null, null, false, false, null, 0, null, 0, 1, 0, 0, null, Clusters.EMPTY) + ); } @Override diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java index e787505f7dfe3..9141555fcd613 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java @@ -18,7 +18,6 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchResponse.Clusters; -import org.elasticsearch.action.search.SearchResponseSections; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreakingException; @@ -222,12 +221,16 @@ protected void @SuppressWarnings("unchecked") void handleSearchRequest(ActionListener listener, SearchRequest searchRequest) { Aggregations aggs = new Aggregations(List.of(newInternalComposite())); - - SearchResponseSections internal = new SearchResponseSections(null, aggs, null, false, false, null, 0); ActionListener.respondAndRelease( listener, (Response) new SearchResponse( - internal, + null, + aggs, + null, + false, + false, + null, + 0, null, 2, 0, @@ -290,7 +293,7 @@ public List readStringCollectionAsList() throws IOException { } @Override - public Map readMap() throws IOException { + public Map readGenericMap() throws IOException { return emptyMap(); } }); diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClientTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClientTests.java index e12eec4833199..9c9bbfcdc5127 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClientTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClientTests.java @@ -18,7 +18,6 @@ import org.elasticsearch.action.search.OpenPointInTimeResponse; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchResponseSections; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.NoopCircuitBreaker; @@ -243,9 +242,14 @@ void handleSearchRequest(ActionListener l) { new SearchSortValues(new Long[] { (long) ordinal, 1L }, new DocValueFormat[] { DocValueFormat.RAW, DocValueFormat.RAW }) ); SearchHits searchHits = new SearchHits(new SearchHit[] { searchHit }, new TotalHits(1, Relation.EQUAL_TO), 0.0f); - SearchResponseSections internal = new SearchResponseSections(searchHits, null, null, false, false, null, 0); - ActionListener.respondAndRelease(l, new SearchResponse(internal, null, 0, 1, 0, 0, null, Clusters.EMPTY)); + ActionListener.respondAndRelease( + l, + new SearchResponse(searchHits, null, null, false, false, null, 0, null, 0, 1, 0, 0, null, Clusters.EMPTY) + ); } @Override @@ -431,9 +431,14 @@ void handleSearchRequest(ActionListener void handleSearchRequest(ActionListener void handleSearchRequest(ActionListener *

- * The generation code also looks for the optional methods {@code combineStates} + * The generation code also looks for the optional methods {@code combineIntermediate} * and {@code evaluateFinal} which are used to combine intermediate states and * produce the final output. If the first is missing then the generated code will * call the {@code combine} method to combine intermediate states. If the second diff --git a/x-pack/plugin/esql/compute/ann/src/main/java/org/elasticsearch/compute/ann/MvEvaluator.java b/x-pack/plugin/esql/compute/ann/src/main/java/org/elasticsearch/compute/ann/MvEvaluator.java index 97f165e67cb44..bb2cb3bf9e5fa 100644 --- a/x-pack/plugin/esql/compute/ann/src/main/java/org/elasticsearch/compute/ann/MvEvaluator.java +++ b/x-pack/plugin/esql/compute/ann/src/main/java/org/elasticsearch/compute/ann/MvEvaluator.java @@ -16,17 +16,30 @@ * Implement an evaluator for a function reducing multivalued fields into a * single valued field from a static {@code process} method. *

- * Annotated methods can have two "shapes": pairwise processing and - * accumulator processing. Pairwise is generally - * simpler and looks like {@code int process(int current, int next)}. - * Use it when the result is a primitive. Accumulator processing is - * a bit more complex and looks like {@code void process(State state, int v)} - * and it useful when you need to accumulate more data than fits - * in a primitive result. Think Kahan summation. + * Annotated methods can have three "shapes": *

+ *
    + *
  • pairwise processing
  • + *
  • accumulator processing
  • + *
  • position at a time processing
  • + *
*

- * Both method shapes support at {@code finish = "finish_method"} parameter - * on the annotation which is used to, well, "finish" processing after + * Pairwise processing is generally simpler and looks + * like {@code int process(int current, int next)}. Use it when the result + * is a primitive.

+ *

+ * Accumulator processing is a bit more complex and looks like + * {@code void process(State state, int v)} and it useful when you need to + * accumulate more data than fits in a primitive result. Think Kahan summation. + *

+ *

+ * Position at a time processing just hands the block, start index, and end index + * to the processor and is useful when none of the others fit. It looks like + * {@code long process(LongBlock block, int start, int end)}. + *

+ *

+ * Pairwise and accumulator processing support a {@code finish = "finish_method"} + * parameter on the annotation which is used to, well, "finish" processing after * all values have been received. Again, think reading the sum from the * Kahan summation. Or doing the division for an "average" operation. * This method is required for accumulator processing. diff --git a/x-pack/plugin/esql/compute/build.gradle b/x-pack/plugin/esql/compute/build.gradle index 1851e2f449da0..532fd51a42437 100644 --- a/x-pack/plugin/esql/compute/build.gradle +++ b/x-pack/plugin/esql/compute/build.gradle @@ -30,12 +30,13 @@ spotless { } } -def prop(Type, type, TYPE, BYTES) { +def prop(Type, type, TYPE, BYTES, Array) { return [ "Type" : Type, "type" : type, "TYPE" : TYPE, "BYTES" : BYTES, + "Array" : Array, "int" : type == "int" ? "true" : "", "long" : type == "long" ? "true" : "", @@ -46,11 +47,11 @@ def prop(Type, type, TYPE, BYTES) { } tasks.named('stringTemplates').configure { - var intProperties = prop("Int", "int", "INT", "Integer.BYTES") - var longProperties = prop("Long", "long", "LONG", "Long.BYTES") - var doubleProperties = prop("Double", "double", "DOUBLE", "Double.BYTES") - var bytesRefProperties = prop("BytesRef", "BytesRef", "BYTES_REF", "org.apache.lucene.util.RamUsageEstimator.NUM_BYTES_OBJECT_REF") - var booleanProperties = prop("Boolean", "boolean", "BOOLEAN", "Byte.BYTES") + var intProperties = prop("Int", "int", "INT", "Integer.BYTES", "IntArray") + var longProperties = prop("Long", "long", "LONG", "Long.BYTES", "LongArray") + var doubleProperties = prop("Double", "double", "DOUBLE", "Double.BYTES", "DoubleArray") + var bytesRefProperties = prop("BytesRef", "BytesRef", "BYTES_REF", "org.apache.lucene.util.RamUsageEstimator.NUM_BYTES_OBJECT_REF", "") + var booleanProperties = prop("Boolean", "boolean", "BOOLEAN", "Byte.BYTES", "BitArray") // primitive vectors File vectorInputFile = new File("${projectDir}/src/main/java/org/elasticsearch/compute/data/X-Vector.java.st") template { @@ -208,6 +209,28 @@ tasks.named('stringTemplates').configure { it.inputFile = arrayBlockInputFile it.outputFile = "org/elasticsearch/compute/data/BooleanArrayBlock.java" } + // BigArray block implementations + File bigArrayBlockInputFile = new File("${projectDir}/src/main/java/org/elasticsearch/compute/data/X-BigArrayBlock.java.st") + template { + it.properties = intProperties + it.inputFile = bigArrayBlockInputFile + it.outputFile = "org/elasticsearch/compute/data/IntBigArrayBlock.java" + } + template { + it.properties = longProperties + it.inputFile = bigArrayBlockInputFile + it.outputFile = "org/elasticsearch/compute/data/LongBigArrayBlock.java" + } + template { + it.properties = doubleProperties + it.inputFile = bigArrayBlockInputFile + it.outputFile = "org/elasticsearch/compute/data/DoubleBigArrayBlock.java" + } + template { + it.properties = booleanProperties + it.inputFile = bigArrayBlockInputFile + it.outputFile = "org/elasticsearch/compute/data/BooleanBigArrayBlock.java" + } // vector blocks File vectorBlockInputFile = new File("${projectDir}/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st") template { diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorFunctionSupplierImplementer.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorFunctionSupplierImplementer.java index a49f3c1f6ec42..a9bea3105ee10 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorFunctionSupplierImplementer.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorFunctionSupplierImplementer.java @@ -29,7 +29,6 @@ import javax.lang.model.util.Elements; import static org.elasticsearch.compute.gen.Types.AGGREGATOR_FUNCTION_SUPPLIER; -import static org.elasticsearch.compute.gen.Types.BIG_ARRAYS; import static org.elasticsearch.compute.gen.Types.DRIVER_CONTEXT; import static org.elasticsearch.compute.gen.Types.LIST_INTEGER; @@ -57,24 +56,8 @@ public AggregatorFunctionSupplierImplementer( Set createParameters = new LinkedHashSet<>(); createParameters.addAll(aggregatorImplementer.createParameters()); createParameters.addAll(groupingAggregatorImplementer.createParameters()); - List sortedParameters = new ArrayList<>(createParameters); - for (Parameter p : sortedParameters) { - if (p.type().equals(BIG_ARRAYS) && false == p.name().equals("bigArrays")) { - throw new IllegalArgumentException("BigArrays should always be named bigArrays but was " + p); - } - } - - /* - * We like putting BigArrays first and then channels second - * regardless of the order that the aggs actually want them. - * Just a little bit of standardization here. - */ - Parameter bigArraysParam = new Parameter(BIG_ARRAYS, "bigArrays"); - sortedParameters.remove(bigArraysParam); - sortedParameters.add(0, bigArraysParam); - sortedParameters.add(1, new Parameter(LIST_INTEGER, "channels")); - - this.createParameters = sortedParameters; + this.createParameters = new ArrayList<>(createParameters); + this.createParameters.add(0, new Parameter(LIST_INTEGER, "channels")); this.implementation = ClassName.get( elements.getPackageOf(declarationType).toString(), diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java index 6acddf6aa5cde..1d74416a81894 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java @@ -25,7 +25,6 @@ import javax.lang.model.element.ExecutableElement; import javax.lang.model.element.Modifier; import javax.lang.model.element.TypeElement; -import javax.lang.model.element.VariableElement; import javax.lang.model.util.Elements; import static java.util.stream.Collectors.joining; @@ -33,6 +32,7 @@ import static org.elasticsearch.compute.gen.Methods.findRequiredMethod; import static org.elasticsearch.compute.gen.Methods.vectorAccessorName; import static org.elasticsearch.compute.gen.Types.AGGREGATOR_FUNCTION; +import static org.elasticsearch.compute.gen.Types.BIG_ARRAYS; import static org.elasticsearch.compute.gen.Types.BLOCK; import static org.elasticsearch.compute.gen.Types.BLOCK_ARRAY; import static org.elasticsearch.compute.gen.Types.BOOLEAN_BLOCK; @@ -69,7 +69,6 @@ public class AggregatorImplementer { private final ExecutableElement init; private final ExecutableElement combine; private final ExecutableElement combineValueCount; - private final ExecutableElement combineStates; private final ExecutableElement combineIntermediate; private final ExecutableElement evaluateFinal; private final ClassName implementation; @@ -77,6 +76,7 @@ public class AggregatorImplementer { private final boolean stateTypeHasSeen; private final boolean valuesIsBytesRef; private final List intermediateState; + private final List createParameters; public AggregatorImplementer(Elements elements, TypeElement declarationType, IntermediateState[] interStateAnno) { this.declarationType = declarationType; @@ -95,9 +95,13 @@ public AggregatorImplementer(Elements elements, TypeElement declarationType, Int return firstParamType.isPrimitive() || firstParamType.toString().equals(stateType.toString()); }); this.combineValueCount = findMethod(declarationType, "combineValueCount"); - this.combineStates = findMethod(declarationType, "combineStates"); this.combineIntermediate = findMethod(declarationType, "combineIntermediate"); this.evaluateFinal = findMethod(declarationType, "evaluateFinal"); + this.createParameters = init.getParameters() + .stream() + .map(Parameter::from) + .filter(f -> false == f.type().equals(BIG_ARRAYS)) + .toList(); this.implementation = ClassName.get( elements.getPackageOf(declarationType).toString(), @@ -114,7 +118,7 @@ ClassName implementation() { } List createParameters() { - return init.getParameters().stream().map(Parameter::from).toList(); + return createParameters; } private TypeName choseStateType() { @@ -198,8 +202,8 @@ private TypeSpec type() { builder.addField(stateType, "state", Modifier.PRIVATE, Modifier.FINAL); builder.addField(LIST_INTEGER, "channels", Modifier.PRIVATE, Modifier.FINAL); - for (VariableElement p : init.getParameters()) { - builder.addField(TypeName.get(p.asType()), p.getSimpleName().toString(), Modifier.PRIVATE, Modifier.FINAL); + for (Parameter p : createParameters) { + builder.addField(p.type(), p.name(), Modifier.PRIVATE, Modifier.FINAL); } builder.addMethod(create()); @@ -222,27 +226,32 @@ private MethodSpec create() { builder.addModifiers(Modifier.PUBLIC, Modifier.STATIC).returns(implementation); builder.addParameter(DRIVER_CONTEXT, "driverContext"); builder.addParameter(LIST_INTEGER, "channels"); - for (VariableElement p : init.getParameters()) { - builder.addParameter(TypeName.get(p.asType()), p.getSimpleName().toString()); + for (Parameter p : createParameters) { + builder.addParameter(p.type(), p.name()); } if (init.getParameters().isEmpty()) { builder.addStatement("return new $T(driverContext, channels, $L)", implementation, callInit()); } else { - builder.addStatement("return new $T(driverContext, channels, $L, $L)", implementation, callInit(), initParameters()); + builder.addStatement( + "return new $T(driverContext, channels, $L, $L)", + implementation, + callInit(), + createParameters.stream().map(p -> p.name()).collect(joining(", ")) + ); } return builder.build(); } - private String initParameters() { - return init.getParameters().stream().map(p -> p.getSimpleName().toString()).collect(joining(", ")); - } - private CodeBlock callInit() { + String initParametersCall = init.getParameters() + .stream() + .map(p -> TypeName.get(p.asType()).equals(BIG_ARRAYS) ? "driverContext.bigArrays()" : p.getSimpleName().toString()) + .collect(joining(", ")); CodeBlock.Builder builder = CodeBlock.builder(); if (init.getReturnType().toString().equals(stateType.toString())) { - builder.add("$T.$L($L)", declarationType, init.getSimpleName(), initParameters()); + builder.add("$T.$L($L)", declarationType, init.getSimpleName(), initParametersCall); } else { - builder.add("new $T($T.$L($L))", stateType, declarationType, init.getSimpleName(), initParameters()); + builder.add("new $T($T.$L($L))", stateType, declarationType, init.getSimpleName(), initParametersCall); } return builder.build(); } @@ -269,9 +278,9 @@ private MethodSpec ctor() { builder.addStatement("this.channels = channels"); builder.addStatement("this.state = state"); - for (VariableElement p : init.getParameters()) { - builder.addParameter(TypeName.get(p.asType()), p.getSimpleName().toString()); - builder.addStatement("this.$N = $N", p.getSimpleName(), p.getSimpleName()); + for (Parameter p : createParameters()) { + builder.addParameter(p.type(), p.name()); + builder.addStatement("this.$N = $N", p.name(), p.name()); } return builder.build(); } @@ -399,34 +408,30 @@ private MethodSpec addIntermediateInput() { builder.addAnnotation(Override.class).addModifiers(Modifier.PUBLIC).addParameter(PAGE, "page"); builder.addStatement("assert channels.size() == intermediateBlockCount()"); builder.addStatement("assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size()"); - builder.addStatement("Block uncastBlock = page.getBlock(channels.get(0))"); - builder.beginControlFlow("if (uncastBlock.areAllValuesNull())"); - { - builder.addStatement("return"); - builder.endControlFlow(); - } - int count = 0; - for (var interState : intermediateState) { + for (int i = 0; i < intermediateState.size(); i++) { + var interState = intermediateState.get(i); + ClassName blockType = blockType(interState.elementType()); + builder.addStatement("Block $L = page.getBlock(channels.get($L))", interState.name + "Uncast", i); + builder.beginControlFlow("if ($L.areAllValuesNull())", interState.name + "Uncast"); + { + builder.addStatement("return"); + builder.endControlFlow(); + } builder.addStatement( - "$T " + interState.name() + " = page.<$T>getBlock(channels.get(" + count + ")).asVector()", + "$T $L = (($T) $L).asVector()", vectorType(interState.elementType()), - blockType(interState.elementType()) + interState.name(), + blockType, + interState.name() + "Uncast" ); - count++; + builder.addStatement("assert $L.getPositionCount() == 1", interState.name()); } - final String first = intermediateState.get(0).name(); - builder.addStatement("assert " + first + ".getPositionCount() == 1"); - if (intermediateState.size() > 1) { - builder.addStatement( - "assert " - + intermediateState.stream() - .map(IntermediateStateDesc::name) - .skip(1) - .map(s -> first + ".getPositionCount() == " + s + ".getPositionCount()") - .collect(joining(" && ")) - ); - } - if (hasPrimitiveState()) { + if (combineIntermediate != null) { + if (intermediateState.stream().map(IntermediateStateDesc::elementType).anyMatch(n -> n.equals("BYTES_REF"))) { + builder.addStatement("$T scratch = new $T()", BYTES_REF, BYTES_REF); + } + builder.addStatement("$T.combineIntermediate(state, " + intermediateStateRowAccess() + ")", declarationType); + } else if (hasPrimitiveState()) { assert intermediateState.size() == 2; assert intermediateState.get(1).name().equals("seen"); builder.beginControlFlow("if (seen.getBoolean(0))"); @@ -438,10 +443,7 @@ private MethodSpec addIntermediateInput() { builder.endControlFlow(); } } else { - if (intermediateState.stream().map(IntermediateStateDesc::elementType).anyMatch(n -> n.equals("BYTES_REF"))) { - builder.addStatement("$T scratch = new $T()", BYTES_REF, BYTES_REF); - } - builder.addStatement("$T.combineIntermediate(state, " + intermediateStateRowAccess() + ")", declarationType); + throw new IllegalArgumentException("Don't know how to combine intermediate input. Define combineIntermediate"); } return builder.build(); } @@ -468,7 +470,7 @@ private String primitiveStateMethod() { return "doubleValue"; default: throw new IllegalArgumentException( - "don't know how to fetch primitive values from " + stateType + ". define combineStates." + "don't know how to fetch primitive values from " + stateType + ". define combineIntermediate." ); } } @@ -493,7 +495,7 @@ private MethodSpec evaluateFinal() { .addParameter(DRIVER_CONTEXT, "driverContext"); if (stateTypeHasSeen) { builder.beginControlFlow("if (state.seen() == false)"); - builder.addStatement("blocks[offset] = $T.constantNullBlock(1, driverContext.blockFactory())", BLOCK); + builder.addStatement("blocks[offset] = driverContext.blockFactory().newConstantNullBlock(1)", BLOCK); builder.addStatement("return"); builder.endControlFlow(); } @@ -508,22 +510,13 @@ private MethodSpec evaluateFinal() { private void primitiveStateToResult(MethodSpec.Builder builder) { switch (stateType.toString()) { case "org.elasticsearch.compute.aggregation.IntState": - builder.addStatement( - "blocks[offset] = $T.newConstantBlockWith(state.intValue(), 1, driverContext.blockFactory())", - INT_BLOCK - ); + builder.addStatement("blocks[offset] = driverContext.blockFactory().newConstantIntBlockWith(state.intValue(), 1)"); return; case "org.elasticsearch.compute.aggregation.LongState": - builder.addStatement( - "blocks[offset] = $T.newConstantBlockWith(state.longValue(), 1, driverContext.blockFactory())", - LONG_BLOCK - ); + builder.addStatement("blocks[offset] = driverContext.blockFactory().newConstantLongBlockWith(state.longValue(), 1)"); return; case "org.elasticsearch.compute.aggregation.DoubleState": - builder.addStatement( - "blocks[offset] = $T.newConstantBlockWith(state.doubleValue(), 1, driverContext.blockFactory())", - DOUBLE_BLOCK - ); + builder.addStatement("blocks[offset] = driverContext.blockFactory().newConstantDoubleBlockWith(state.doubleValue(), 1)"); return; default: throw new IllegalArgumentException("don't know how to convert state to result: " + stateType); diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/ConsumeProcessor.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/ConsumeProcessor.java index 677740862cc04..6b218fab7affb 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/ConsumeProcessor.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/ConsumeProcessor.java @@ -40,7 +40,7 @@ public Set getSupportedAnnotationTypes() { "org.elasticsearch.xpack.esql.expression.function.FunctionInfo", "org.elasticsearch.xpack.esql.expression.function.Param", "org.elasticsearch.rest.ServerlessScope", - + "org.elasticsearch.xcontent.ParserConstructor", Fixed.class.getName() ); } diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java index b84ab64f4f54c..12e5de9fef591 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java @@ -27,7 +27,6 @@ import javax.lang.model.element.ExecutableElement; import javax.lang.model.element.Modifier; import javax.lang.model.element.TypeElement; -import javax.lang.model.element.VariableElement; import javax.lang.model.util.Elements; import static java.util.stream.Collectors.joining; @@ -36,6 +35,7 @@ import static org.elasticsearch.compute.gen.Methods.findMethod; import static org.elasticsearch.compute.gen.Methods.findRequiredMethod; import static org.elasticsearch.compute.gen.Methods.vectorAccessorName; +import static org.elasticsearch.compute.gen.Types.BIG_ARRAYS; import static org.elasticsearch.compute.gen.Types.BLOCK_ARRAY; import static org.elasticsearch.compute.gen.Types.BYTES_REF; import static org.elasticsearch.compute.gen.Types.DRIVER_CONTEXT; @@ -91,7 +91,11 @@ public GroupingAggregatorImplementer(Elements elements, TypeElement declarationT this.combineIntermediate = findMethod(declarationType, "combineIntermediate"); this.evaluateFinal = findMethod(declarationType, "evaluateFinal"); this.valuesIsBytesRef = BYTES_REF.equals(TypeName.get(combine.getParameters().get(combine.getParameters().size() - 1).asType())); - this.createParameters = init.getParameters().stream().map(Parameter::from).collect(Collectors.toList()); + this.createParameters = init.getParameters() + .stream() + .map(Parameter::from) + .filter(f -> false == f.type().equals(BIG_ARRAYS)) + .collect(Collectors.toList()); this.implementation = ClassName.get( elements.getPackageOf(declarationType).toString(), @@ -146,8 +150,8 @@ private TypeSpec type() { builder.addField(LIST_INTEGER, "channels", Modifier.PRIVATE, Modifier.FINAL); builder.addField(DRIVER_CONTEXT, "driverContext", Modifier.PRIVATE, Modifier.FINAL); - for (VariableElement p : init.getParameters()) { - builder.addField(TypeName.get(p.asType()), p.getSimpleName().toString(), Modifier.PRIVATE, Modifier.FINAL); + for (Parameter p : createParameters) { + builder.addField(p.type(), p.name(), Modifier.PRIVATE, Modifier.FINAL); } builder.addMethod(create()); @@ -176,24 +180,35 @@ private MethodSpec create() { for (Parameter p : createParameters) { builder.addParameter(p.type(), p.name()); } - if (init.getParameters().isEmpty()) { + if (createParameters.isEmpty()) { builder.addStatement("return new $T(channels, $L, driverContext)", implementation, callInit()); } else { - builder.addStatement("return new $T(channels, $L, driverContext, $L)", implementation, callInit(), initParameters()); + builder.addStatement( + "return new $T(channels, $L, driverContext, $L)", + implementation, + callInit(), + createParameters.stream().map(p -> p.name()).collect(joining(", ")) + ); } return builder.build(); } - private String initParameters() { - return init.getParameters().stream().map(p -> p.getSimpleName().toString()).collect(Collectors.joining(", ")); - } - private CodeBlock callInit() { + String initParametersCall = init.getParameters() + .stream() + .map(p -> TypeName.get(p.asType()).equals(BIG_ARRAYS) ? "driverContext.bigArrays()" : p.getSimpleName().toString()) + .collect(joining(", ")); CodeBlock.Builder builder = CodeBlock.builder(); if (init.getReturnType().toString().equals(stateType.toString())) { - builder.add("$T.$L($L)", declarationType, init.getSimpleName(), initParameters()); + builder.add("$T.$L($L)", declarationType, init.getSimpleName(), initParametersCall); } else { - builder.add("new $T(driverContext.bigArrays(), $T.$L($L))", stateType, declarationType, init.getSimpleName(), initParameters()); + builder.add( + "new $T(driverContext.bigArrays(), $T.$L($L))", + stateType, + declarationType, + init.getSimpleName(), + initParametersCall + ); } return builder.build(); } @@ -220,9 +235,9 @@ private MethodSpec ctor() { builder.addStatement("this.state = state"); builder.addStatement("this.driverContext = driverContext"); - for (VariableElement p : init.getParameters()) { - builder.addParameter(TypeName.get(p.asType()), p.getSimpleName().toString()); - builder.addStatement("this.$N = $N", p.getSimpleName(), p.getSimpleName()); + for (Parameter p : createParameters) { + builder.addParameter(p.type(), p.name()); + builder.addStatement("this.$N = $N", p.name(), p.name()); } return builder.build(); } diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/Methods.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/Methods.java index f651ab2a316aa..768ca0bd16201 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/Methods.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/Methods.java @@ -83,7 +83,7 @@ static VariableElement[] findMethodArguments(ExecutableElement method, Predicate if (method.getParameters().isEmpty()) { return new VariableElement[0]; } - return method.getParameters().stream().filter(e -> filter.test(e)).toArray(VariableElement[]::new); + return method.getParameters().stream().filter(filter).toArray(VariableElement[]::new); } /** @@ -194,7 +194,7 @@ static String vectorAccessorName(String elementTypeName) { case "DOUBLE" -> "getDouble"; case "BYTES_REF" -> "getBytesRef"; default -> throw new IllegalArgumentException( - "don't know how to fetch primitive values from " + elementTypeName + ". define combineStates." + "don't know how to fetch primitive values from " + elementTypeName + ". define combineIntermediate." ); }; } diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/MvEvaluatorImplementer.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/MvEvaluatorImplementer.java index 0e794d6fa533f..52b1c2b09b629 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/MvEvaluatorImplementer.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/MvEvaluatorImplementer.java @@ -92,13 +92,20 @@ public MvEvaluatorImplementer( ) { this.declarationType = (TypeElement) processFunction.getEnclosingElement(); this.processFunction = processFunction; - if (processFunction.getParameters().size() != 2) { - throw new IllegalArgumentException("process should have exactly two parameters"); + if (processFunction.getParameters().size() == 2) { + this.workType = TypeName.get(processFunction.getParameters().get(0).asType()); + this.fieldType = TypeName.get(processFunction.getParameters().get(1).asType()); + this.finishFunction = FinishFunction.from(declarationType, finishMethodName, workType, fieldType); + this.resultType = this.finishFunction == null ? this.workType : this.finishFunction.resultType; + } else { + if (finishMethodName.equals("") == false) { + throw new IllegalArgumentException("finish function is only supported for pairwise processing"); + } + this.workType = null; + this.fieldType = Types.elementType(TypeName.get(processFunction.getParameters().get(0).asType())); + this.finishFunction = null; + this.resultType = TypeName.get(processFunction.getReturnType()); } - this.workType = TypeName.get(processFunction.getParameters().get(0).asType()); - this.fieldType = TypeName.get(processFunction.getParameters().get(1).asType()); - this.finishFunction = FinishFunction.from(declarationType, finishMethodName, workType, fieldType); - this.resultType = this.finishFunction == null ? this.workType : this.finishFunction.resultType; this.singleValueFunction = SingleValueFunction.from(declarationType, singleValueMethodName, resultType, fieldType); this.ascendingFunction = AscendingFunction.from(this, declarationType, ascendingMethodName); this.warnExceptions = warnExceptions; @@ -130,7 +137,6 @@ private TypeSpec type() { builder.addField(WARNINGS, "warnings", Modifier.PRIVATE, Modifier.FINAL); } - builder.addField(DRIVER_CONTEXT, "driverContext", Modifier.PRIVATE, Modifier.FINAL); builder.addMethod(ctor()); builder.addMethod(name()); @@ -159,12 +165,11 @@ private MethodSpec ctor() { builder.addParameter(SOURCE, "source"); } builder.addParameter(EXPRESSION_EVALUATOR, "field"); - builder.addStatement("super($L)", "field"); + builder.addStatement("super(driverContext, field)"); if (warnExceptions.isEmpty() == false) { builder.addStatement("this.warnings = new Warnings(source)"); } builder.addParameter(DRIVER_CONTEXT, "driverContext"); - builder.addStatement("this.driverContext = driverContext"); return builder.build(); } @@ -210,11 +215,11 @@ private MethodSpec evalShell( Methods.buildFromFactory(builderType) ); - if (false == workType.equals(fieldType) && workType.isPrimitive() == false) { + if (workType != null && false == workType.equals(fieldType) && workType.isPrimitive() == false) { builder.addStatement("$T work = new $T()", workType, workType); } if (fieldType.equals(BYTES_REF)) { - if (workType.equals(fieldType)) { + if (fieldType.equals(workType)) { builder.addStatement("$T firstScratch = new $T()", BYTES_REF, BYTES_REF); builder.addStatement("$T nextScratch = new $T()", BYTES_REF, BYTES_REF); } else { @@ -272,33 +277,45 @@ private MethodSpec eval(String name, boolean nullable) { } builder.addStatement("int end = first + valueCount"); - if (workType.equals(fieldType) || workType.isPrimitive()) { + if (processFunction.getParameters().size() == 2) { // process function evaluates pairwise - fetch(builder, "value", workType, "first", "firstScratch"); - builder.beginControlFlow("for (int i = first + 1; i < end; i++)"); - { - if (fieldType.equals(BYTES_REF)) { - fetch(builder, "next", workType, "i", "nextScratch"); - builder.addStatement("$T.$L(value, next)", declarationType, processFunction.getSimpleName()); + if (workType.equals(fieldType) || workType.isPrimitive()) { + fetch(builder, "value", workType, "first", "firstScratch"); + builder.beginControlFlow("for (int i = first + 1; i < end; i++)"); + { + if (fieldType.equals(BYTES_REF)) { + fetch(builder, "next", workType, "i", "nextScratch"); + builder.addStatement("$T.$L(value, next)", declarationType, processFunction.getSimpleName()); + } else { + fetch(builder, "next", fieldType, "i", "nextScratch"); + builder.addStatement("value = $T.$L(value, next)", declarationType, processFunction.getSimpleName()); + } + } + builder.endControlFlow(); + if (finishFunction == null) { + builder.addStatement("$T result = value", resultType); } else { - fetch(builder, "next", fieldType, "i", "nextScratch"); - builder.addStatement("value = $T.$L(value, next)", declarationType, processFunction.getSimpleName()); + finishFunction.call(builder, "value"); } - } - builder.endControlFlow(); - if (finishFunction == null) { - builder.addStatement("$T result = value", resultType); } else { - finishFunction.call(builder, "value"); + builder.beginControlFlow("for (int i = first; i < end; i++)"); + { + fetch(builder, "value", fieldType, "i", "valueScratch"); + builder.addStatement("$T.$L(work, value)", declarationType, processFunction.getSimpleName()); + } + builder.endControlFlow(); + finishFunction.call(builder, "work"); } } else { - builder.beginControlFlow("for (int i = first; i < end; i++)"); - { - fetch(builder, "value", fieldType, "i", "valueScratch"); - builder.addStatement("$T.$L(work, value)", declarationType, processFunction.getSimpleName()); - } - builder.endControlFlow(); - finishFunction.call(builder, "work"); + // process function evaluates position at a time + String scratch = fieldType.equals(BYTES_REF) ? ", valueScratch" : ""; + builder.addStatement( + "$T result = $T.$L(v, first, end$L)", + resultType, + declarationType, + processFunction.getSimpleName(), + scratch + ); } writeResult(builder); }); @@ -401,7 +418,7 @@ private MethodSpec factoryToString() { private static class FinishFunction { static FinishFunction from(TypeElement declarationType, String name, TypeName workType, TypeName fieldType) { if (name.equals("")) { - if (false == workType.equals(fieldType)) { + if (workType != null && false == workType.equals(fieldType)) { throw new IllegalArgumentException( "the [finish] enum value is required because the first and second arguments differ in type" ); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/DoubleArrayState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/DoubleArrayState.java index f77f1893caa01..5b82950c7de37 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/DoubleArrayState.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/DoubleArrayState.java @@ -10,7 +10,6 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.DoubleArray; import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.DoubleBlock; import org.elasticsearch.compute.data.DoubleVector; import org.elasticsearch.compute.data.IntVector; @@ -60,14 +59,14 @@ void set(int groupId, double value) { Block toValuesBlock(org.elasticsearch.compute.data.IntVector selected, DriverContext driverContext) { if (false == trackingGroupIds()) { - try (DoubleVector.Builder builder = DoubleVector.newVectorBuilder(selected.getPositionCount(), driverContext.blockFactory())) { + try (DoubleVector.Builder builder = driverContext.blockFactory().newDoubleVectorFixedBuilder(selected.getPositionCount())) { for (int i = 0; i < selected.getPositionCount(); i++) { builder.appendDouble(values.get(selected.getInt(i))); } return builder.build().asBlock(); } } - try (DoubleBlock.Builder builder = DoubleBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory())) { + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(selected.getPositionCount())) { for (int i = 0; i < selected.getPositionCount(); i++) { int group = selected.getInt(i); if (hasValue(group)) { @@ -98,8 +97,8 @@ public void toIntermediate( ) { assert blocks.length >= offset + 2; try ( - var valuesBuilder = DoubleBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory()); - var hasValueBuilder = BooleanBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory()) + var valuesBuilder = driverContext.blockFactory().newDoubleBlockBuilder(selected.getPositionCount()); + var hasValueBuilder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount()) ) { for (int i = 0; i < selected.getPositionCount(); i++) { int group = selected.getInt(i); @@ -111,7 +110,7 @@ public void toIntermediate( hasValueBuilder.appendBoolean(hasValue(group)); } blocks[offset + 0] = valuesBuilder.build(); - blocks[offset + 1] = hasValueBuilder.build(); + blocks[offset + 1] = hasValueBuilder.build().asBlock(); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IntArrayState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IntArrayState.java index 82578090503ab..0234f36f6675c 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IntArrayState.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IntArrayState.java @@ -10,7 +10,6 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.IntArray; import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.operator.DriverContext; @@ -59,14 +58,14 @@ void set(int groupId, int value) { Block toValuesBlock(org.elasticsearch.compute.data.IntVector selected, DriverContext driverContext) { if (false == trackingGroupIds()) { - try (IntVector.Builder builder = IntVector.newVectorBuilder(selected.getPositionCount(), driverContext.blockFactory())) { + try (IntVector.Builder builder = driverContext.blockFactory().newIntVectorFixedBuilder(selected.getPositionCount())) { for (int i = 0; i < selected.getPositionCount(); i++) { builder.appendInt(values.get(selected.getInt(i))); } return builder.build().asBlock(); } } - try (IntBlock.Builder builder = IntBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory())) { + try (IntBlock.Builder builder = driverContext.blockFactory().newIntBlockBuilder(selected.getPositionCount())) { for (int i = 0; i < selected.getPositionCount(); i++) { int group = selected.getInt(i); if (hasValue(group)) { @@ -97,8 +96,8 @@ public void toIntermediate( ) { assert blocks.length >= offset + 2; try ( - var valuesBuilder = IntBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory()); - var hasValueBuilder = BooleanBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory()) + var valuesBuilder = driverContext.blockFactory().newIntBlockBuilder(selected.getPositionCount()); + var hasValueBuilder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount()) ) { for (int i = 0; i < selected.getPositionCount(); i++) { int group = selected.getInt(i); @@ -110,7 +109,7 @@ public void toIntermediate( hasValueBuilder.appendBoolean(hasValue(group)); } blocks[offset + 0] = valuesBuilder.build(); - blocks[offset + 1] = hasValueBuilder.build(); + blocks[offset + 1] = hasValueBuilder.build().asBlock(); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/LongArrayState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/LongArrayState.java index f77d22fb1d26a..860bf43eaad82 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/LongArrayState.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/LongArrayState.java @@ -10,7 +10,6 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.LongArray; import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.LongVector; @@ -66,14 +65,14 @@ void increment(int groupId, long value) { Block toValuesBlock(org.elasticsearch.compute.data.IntVector selected, DriverContext driverContext) { if (false == trackingGroupIds()) { - try (LongVector.Builder builder = LongVector.newVectorBuilder(selected.getPositionCount(), driverContext.blockFactory())) { + try (LongVector.Builder builder = driverContext.blockFactory().newLongVectorFixedBuilder(selected.getPositionCount())) { for (int i = 0; i < selected.getPositionCount(); i++) { builder.appendLong(values.get(selected.getInt(i))); } return builder.build().asBlock(); } } - try (LongBlock.Builder builder = LongBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory())) { + try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(selected.getPositionCount())) { for (int i = 0; i < selected.getPositionCount(); i++) { int group = selected.getInt(i); if (hasValue(group)) { @@ -104,8 +103,8 @@ public void toIntermediate( ) { assert blocks.length >= offset + 2; try ( - var valuesBuilder = LongBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory()); - var hasValueBuilder = BooleanBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory()) + var valuesBuilder = driverContext.blockFactory().newLongBlockBuilder(selected.getPositionCount()); + var hasValueBuilder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount()) ) { for (int i = 0; i < selected.getPositionCount(); i++) { int group = selected.getInt(i); @@ -117,7 +116,7 @@ public void toIntermediate( hasValueBuilder.appendBoolean(hasValue(group)); } blocks[offset + 0] = valuesBuilder.build(); - blocks[offset + 1] = hasValueBuilder.build(); + blocks[offset + 1] = hasValueBuilder.build().asBlock(); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java index 7c2723163197a..a592bd65acb3a 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java @@ -8,26 +8,40 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.core.Releasables; -import java.util.Arrays; import java.util.BitSet; /** - * Block implementation that stores an array of boolean. + * Block implementation that stores values in a {@link BooleanArrayVector}. * This class is generated. Do not edit it. */ -public final class BooleanArrayBlock extends AbstractArrayBlock implements BooleanBlock { +final class BooleanArrayBlock extends AbstractArrayBlock implements BooleanBlock { private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(BooleanArrayBlock.class); - private final boolean[] values; + private final BooleanArrayVector vector; - public BooleanArrayBlock(boolean[] values, int positionCount, int[] firstValueIndexes, BitSet nulls, MvOrdering mvOrdering) { - this(values, positionCount, firstValueIndexes, nulls, mvOrdering, BlockFactory.getNonBreakingInstance()); + BooleanArrayBlock( + boolean[] values, + int positionCount, + int[] firstValueIndexes, + BitSet nulls, + MvOrdering mvOrdering, + BlockFactory blockFactory + ) { + this( + new BooleanArrayVector(values, firstValueIndexes == null ? positionCount : firstValueIndexes[positionCount], blockFactory), + positionCount, + firstValueIndexes, + nulls, + mvOrdering, + blockFactory + ); } - public BooleanArrayBlock( - boolean[] values, + private BooleanArrayBlock( + BooleanArrayVector vector, int positionCount, int[] firstValueIndexes, BitSet nulls, @@ -35,7 +49,10 @@ public BooleanArrayBlock( BlockFactory blockFactory ) { super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); - this.values = values; + this.vector = vector; + assert firstValueIndexes == null + ? vector.getPositionCount() == getPositionCount() + : firstValueIndexes[getPositionCount()] == vector.getPositionCount(); } @Override @@ -45,11 +62,12 @@ public BooleanVector asVector() { @Override public boolean getBoolean(int valueIndex) { - return values[valueIndex]; + return vector.getBoolean(valueIndex); } @Override public BooleanBlock filter(int... positions) { + // TODO use reference counting to share the vector try (var builder = blockFactory().newBooleanBlockBuilder(positions.length)) { for (int pos : positions) { if (isNull(pos)) { @@ -83,31 +101,37 @@ public BooleanBlock expand() { incRef(); return this; } - // TODO use reference counting to share the values - try (var builder = blockFactory().newBooleanBlockBuilder(firstValueIndexes[getPositionCount()])) { - for (int pos = 0; pos < getPositionCount(); pos++) { - if (isNull(pos)) { - builder.appendNull(); - continue; - } - int first = getFirstValueIndex(pos); - int end = first + getValueCount(pos); - for (int i = first; i < end; i++) { - builder.appendBoolean(getBoolean(i)); - } - } - return builder.mvOrdering(MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING).build(); + if (nullsMask == null) { + vector.incRef(); + return vector.asBlock(); } + + // The following line is correct because positions with multi-values are never null. + int expandedPositionCount = vector.getPositionCount(); + long bitSetRamUsedEstimate = Math.max(nullsMask.size(), BlockRamUsageEstimator.sizeOfBitSet(expandedPositionCount)); + blockFactory().adjustBreaker(bitSetRamUsedEstimate); + + BooleanArrayBlock expanded = new BooleanArrayBlock( + vector, + expandedPositionCount, + null, + shiftNullsToExpandedPositions(), + MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, + blockFactory() + ); + blockFactory().adjustBreaker(expanded.ramBytesUsedOnlyBlock() - bitSetRamUsedEstimate); + // We need to incRef after adjusting any breakers, otherwise we might leak the vector if the breaker trips. + vector.incRef(); + return expanded; } - public static long ramBytesEstimated(boolean[] values, int[] firstValueIndexes, BitSet nullsMask) { - return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(values) + BlockRamUsageEstimator.sizeOf(firstValueIndexes) - + BlockRamUsageEstimator.sizeOfBitSet(nullsMask); + private long ramBytesUsedOnlyBlock() { + return BASE_RAM_BYTES_USED + BlockRamUsageEstimator.sizeOf(firstValueIndexes) + BlockRamUsageEstimator.sizeOfBitSet(nullsMask); } @Override public long ramBytesUsed() { - return ramBytesEstimated(values, firstValueIndexes, nullsMask); + return ramBytesUsedOnlyBlock() + vector.ramBytesUsed(); } @Override @@ -130,13 +154,20 @@ public String toString() { + getPositionCount() + ", mvOrdering=" + mvOrdering() - + ", values=" - + Arrays.toString(values) + + ", vector=" + + vector + ']'; } + @Override + public void allowPassingToDifferentDriver() { + super.allowPassingToDifferentDriver(); + vector.allowPassingToDifferentDriver(); + } + @Override public void closeInternal() { - blockFactory().adjustBreaker(-ramBytesUsed(), true); + blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock()); + Releasables.closeExpectNoException(vector); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayVector.java index 5aa8724eb0ca2..114d924df467c 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayVector.java @@ -15,27 +15,20 @@ * Vector implementation that stores an array of boolean values. * This class is generated. Do not edit it. */ -public final class BooleanArrayVector extends AbstractVector implements BooleanVector { +final class BooleanArrayVector extends AbstractVector implements BooleanVector { static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(BooleanArrayVector.class); private final boolean[] values; - private final BooleanBlock block; - - public BooleanArrayVector(boolean[] values, int positionCount) { - this(values, positionCount, BlockFactory.getNonBreakingInstance()); - } - - public BooleanArrayVector(boolean[] values, int positionCount, BlockFactory blockFactory) { + BooleanArrayVector(boolean[] values, int positionCount, BlockFactory blockFactory) { super(positionCount, blockFactory); this.values = values; - this.block = new BooleanVectorBlock(this); } @Override public BooleanBlock asBlock() { - return block; + return new BooleanVectorBlock(this); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayBlock.java new file mode 100644 index 0000000000000..82a0bb364966b --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayBlock.java @@ -0,0 +1,174 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data; + +import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.util.BitArray; +import org.elasticsearch.core.Releasables; + +import java.util.BitSet; + +/** + * Block implementation that stores values in a {@link BooleanBigArrayVector}. Does not take ownership of the given + * {@link BitArray} and does not adjust circuit breakers to account for it. + * This class is generated. Do not edit it. + */ +public final class BooleanBigArrayBlock extends AbstractArrayBlock implements BooleanBlock { + + private static final long BASE_RAM_BYTES_USED = 0; // TODO: fix this + private final BooleanBigArrayVector vector; + + public BooleanBigArrayBlock( + BitArray values, + int positionCount, + int[] firstValueIndexes, + BitSet nulls, + MvOrdering mvOrdering, + BlockFactory blockFactory + ) { + this( + new BooleanBigArrayVector(values, firstValueIndexes == null ? positionCount : firstValueIndexes[positionCount], blockFactory), + positionCount, + firstValueIndexes, + nulls, + mvOrdering, + blockFactory + ); + } + + private BooleanBigArrayBlock( + BooleanBigArrayVector vector, + int positionCount, + int[] firstValueIndexes, + BitSet nulls, + MvOrdering mvOrdering, + BlockFactory blockFactory + ) { + super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); + this.vector = vector; + assert firstValueIndexes == null + ? vector.getPositionCount() == getPositionCount() + : firstValueIndexes[getPositionCount()] == vector.getPositionCount(); + } + + @Override + public BooleanVector asVector() { + return null; + } + + @Override + public boolean getBoolean(int valueIndex) { + return vector.getBoolean(valueIndex); + } + + @Override + public BooleanBlock filter(int... positions) { + // TODO use reference counting to share the vector + try (var builder = blockFactory().newBooleanBlockBuilder(positions.length)) { + for (int pos : positions) { + if (isNull(pos)) { + builder.appendNull(); + continue; + } + int valueCount = getValueCount(pos); + int first = getFirstValueIndex(pos); + if (valueCount == 1) { + builder.appendBoolean(getBoolean(getFirstValueIndex(pos))); + } else { + builder.beginPositionEntry(); + for (int c = 0; c < valueCount; c++) { + builder.appendBoolean(getBoolean(first + c)); + } + builder.endPositionEntry(); + } + } + return builder.mvOrdering(mvOrdering()).build(); + } + } + + @Override + public ElementType elementType() { + return ElementType.BOOLEAN; + } + + @Override + public BooleanBlock expand() { + if (firstValueIndexes == null) { + incRef(); + return this; + } + if (nullsMask == null) { + vector.incRef(); + return vector.asBlock(); + } + + // The following line is correct because positions with multi-values are never null. + int expandedPositionCount = vector.getPositionCount(); + long bitSetRamUsedEstimate = Math.max(nullsMask.size(), BlockRamUsageEstimator.sizeOfBitSet(expandedPositionCount)); + blockFactory().adjustBreaker(bitSetRamUsedEstimate); + + BooleanBigArrayBlock expanded = new BooleanBigArrayBlock( + vector, + expandedPositionCount, + null, + shiftNullsToExpandedPositions(), + MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, + blockFactory() + ); + blockFactory().adjustBreaker(expanded.ramBytesUsedOnlyBlock() - bitSetRamUsedEstimate); + // We need to incRef after adjusting any breakers, otherwise we might leak the vector if the breaker trips. + vector.incRef(); + return expanded; + } + + private long ramBytesUsedOnlyBlock() { + return BASE_RAM_BYTES_USED + BlockRamUsageEstimator.sizeOf(firstValueIndexes) + BlockRamUsageEstimator.sizeOfBitSet(nullsMask); + } + + @Override + public long ramBytesUsed() { + return ramBytesUsedOnlyBlock() + RamUsageEstimator.sizeOf(vector); + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof BooleanBlock that) { + return BooleanBlock.equals(this, that); + } + return false; + } + + @Override + public int hashCode() { + return BooleanBlock.hash(this); + } + + @Override + public String toString() { + return getClass().getSimpleName() + + "[positions=" + + getPositionCount() + + ", mvOrdering=" + + mvOrdering() + + ", ramBytesUsed=" + + vector.ramBytesUsed() + + ']'; + } + + @Override + public void allowPassingToDifferentDriver() { + super.allowPassingToDifferentDriver(); + vector.allowPassingToDifferentDriver(); + } + + @Override + public void closeInternal() { + blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock()); + Releasables.closeExpectNoException(vector); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayVector.java index 2621ec612944e..9618edb1fa77a 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayVector.java @@ -12,30 +12,24 @@ import org.elasticsearch.core.Releasable; /** - * Vector implementation that defers to an enclosed BooleanArray. + * Vector implementation that defers to an enclosed {@link BitArray}. + * Does not take ownership of the array and does not adjust circuit breakers to account for it. * This class is generated. Do not edit it. */ public final class BooleanBigArrayVector extends AbstractVector implements BooleanVector, Releasable { - private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(BooleanBigArrayVector.class); + private static final long BASE_RAM_BYTES_USED = 0; // FIXME private final BitArray values; - private final BooleanBlock block; - - public BooleanBigArrayVector(BitArray values, int positionCount) { - this(values, positionCount, BlockFactory.getNonBreakingInstance()); - } - public BooleanBigArrayVector(BitArray values, int positionCount, BlockFactory blockFactory) { super(positionCount, blockFactory); this.values = values; - this.block = new BooleanVectorBlock(this); } @Override public BooleanBlock asBlock() { - return block; + return new BooleanVectorBlock(this); } @Override @@ -71,11 +65,9 @@ public BooleanVector filter(int... positions) { } @Override - public void close() { - if (released) { - throw new IllegalStateException("can't release already released vector [" + this + "]"); - } - released = true; + public void closeInternal() { + // The circuit breaker that tracks the values {@link BitArray} is adjusted outside + // of this class. values.close(); } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java index 352ee783d8614..fffa3af137d76 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java @@ -18,7 +18,7 @@ * Block that stores boolean values. * This class is generated. Do not edit it. */ -public sealed interface BooleanBlock extends Block permits BooleanArrayBlock, BooleanVectorBlock, ConstantNullBlock { +public sealed interface BooleanBlock extends Block permits BooleanArrayBlock, BooleanVectorBlock, ConstantNullBlock, BooleanBigArrayBlock { /** * Retrieves the boolean value stored at the given value index. @@ -166,44 +166,6 @@ static int hash(BooleanBlock block) { return result; } - /** - * Returns a builder using the {@link BlockFactory#getNonBreakingInstance non-breaking block factory}. - * @deprecated use {@link BlockFactory#newBooleanBlockBuilder} - */ - // Eventually, we want to remove this entirely, always passing an explicit BlockFactory - @Deprecated - static Builder newBlockBuilder(int estimatedSize) { - return newBlockBuilder(estimatedSize, BlockFactory.getNonBreakingInstance()); - } - - /** - * Returns a builder. - * @deprecated use {@link BlockFactory#newBooleanBlockBuilder} - */ - @Deprecated - static Builder newBlockBuilder(int estimatedSize, BlockFactory blockFactory) { - return blockFactory.newBooleanBlockBuilder(estimatedSize); - } - - /** - * Returns a constant block built by the {@link BlockFactory#getNonBreakingInstance non-breaking block factory}. - * @deprecated use {@link BlockFactory#newConstantBooleanBlockWith} - */ - // Eventually, we want to remove this entirely, always passing an explicit BlockFactory - @Deprecated - static BooleanBlock newConstantBlockWith(boolean value, int positions) { - return newConstantBlockWith(value, positions, BlockFactory.getNonBreakingInstance()); - } - - /** - * Returns a constant block. - * @deprecated use {@link BlockFactory#newConstantBooleanBlockWith} - */ - @Deprecated - static BooleanBlock newConstantBlockWith(boolean value, int positions, BlockFactory blockFactory) { - return blockFactory.newConstantBooleanBlockWith(value, positions); - } - /** * Builder for {@link BooleanBlock} */ diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlockBuilder.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlockBuilder.java index 1c3549c06ca87..988106779a9d5 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlockBuilder.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlockBuilder.java @@ -9,6 +9,7 @@ import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.common.util.BitArray; import java.util.Arrays; @@ -179,6 +180,31 @@ public BooleanBlockBuilder mvOrdering(Block.MvOrdering mvOrdering) { return this; } + private BooleanBlock buildBigArraysBlock() { + final BooleanBlock theBlock; + final BitArray array = new BitArray(valueCount, blockFactory.bigArrays()); + for (int i = 0; i < valueCount; i++) { + if (values[i]) { + array.set(i); + } + } + if (isDense() && singleValued()) { + theBlock = new BooleanBigArrayVector(array, positionCount, blockFactory).asBlock(); + } else { + theBlock = new BooleanBigArrayBlock(array, positionCount, firstValueIndexes, nullsMask, mvOrdering, blockFactory); + } + /* + * Update the breaker with the actual bytes used. + * We pass false below even though we've used the bytes. That's weird, + * but if we break here we will throw away the used memory, letting + * it be deallocated. The exception will bubble up and the builder will + * still technically be open, meaning the calling code should close it + * which will return all used memory to the breaker. + */ + blockFactory.adjustBreaker(theBlock.ramBytesUsed() - estimatedBytes - array.ramBytesUsed()); + return theBlock; + } + @Override public BooleanBlock build() { try { @@ -187,20 +213,26 @@ public BooleanBlock build() { if (hasNonNullValue && positionCount == 1 && valueCount == 1) { theBlock = blockFactory.newConstantBooleanBlockWith(values[0], 1, estimatedBytes); } else { - if (values.length - valueCount > 1024 || valueCount < (values.length / 2)) { - values = Arrays.copyOf(values, valueCount); - } - if (isDense() && singleValued()) { - theBlock = blockFactory.newBooleanArrayVector(values, positionCount, estimatedBytes).asBlock(); + if (estimatedBytes > blockFactory.maxPrimitiveArrayBytes()) { + theBlock = buildBigArraysBlock(); } else { - theBlock = blockFactory.newBooleanArrayBlock( - values, - positionCount, - firstValueIndexes, - nullsMask, - mvOrdering, - estimatedBytes - ); + if (values.length - valueCount > 1024 || valueCount < (values.length / 2)) { + adjustBreaker(valueCount * elementSize()); + values = Arrays.copyOf(values, valueCount); + adjustBreaker(-values.length * elementSize()); + } + if (isDense() && singleValued()) { + theBlock = blockFactory.newBooleanArrayVector(values, positionCount, estimatedBytes).asBlock(); + } else { + theBlock = blockFactory.newBooleanArrayBlock( + values, + positionCount, + firstValueIndexes, + nullsMask, + mvOrdering, + estimatedBytes + ); + } } } built(); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVector.java index ec4ab8f7def1c..7c86f40981ec7 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVector.java @@ -101,40 +101,10 @@ default void writeTo(StreamOutput out) throws IOException { } } - /** - * Returns a builder using the {@link BlockFactory#getNonBreakingInstance nonbreaking block factory}. - * @deprecated use {@link BlockFactory#newBooleanVectorBuilder} - */ - // Eventually, we want to remove this entirely, always passing an explicit BlockFactory - @Deprecated - static Builder newVectorBuilder(int estimatedSize) { - return newVectorBuilder(estimatedSize, BlockFactory.getNonBreakingInstance()); - } - - /** - * Creates a builder that grows as needed. Prefer {@link #newVectorFixedBuilder} - * if you know the size up front because it's faster. - * @deprecated use {@link BlockFactory#newBooleanVectorBuilder} - */ - @Deprecated - static Builder newVectorBuilder(int estimatedSize, BlockFactory blockFactory) { - return blockFactory.newBooleanVectorBuilder(estimatedSize); - } - - /** - * Creates a builder that never grows. Prefer this over {@link #newVectorBuilder} - * if you know the size up front because it's faster. - * @deprecated use {@link BlockFactory#newBooleanVectorFixedBuilder} - */ - @Deprecated - static FixedBuilder newVectorFixedBuilder(int size, BlockFactory blockFactory) { - return blockFactory.newBooleanVectorFixedBuilder(size); - } - /** * A builder that grows as needed. */ - sealed interface Builder extends Vector.Builder permits BooleanVectorBuilder { + sealed interface Builder extends Vector.Builder permits BooleanVectorBuilder, FixedBuilder { /** * Appends a boolean to the current entry. */ @@ -147,13 +117,11 @@ sealed interface Builder extends Vector.Builder permits BooleanVectorBuilder { /** * A builder that never grows. */ - sealed interface FixedBuilder extends Vector.Builder permits BooleanVectorFixedBuilder { + sealed interface FixedBuilder extends Builder permits BooleanVectorFixedBuilder { /** * Appends a boolean to the current entry. */ - FixedBuilder appendBoolean(boolean value); - @Override - BooleanVector build(); + FixedBuilder appendBoolean(boolean value); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java index 3fa4a90a6e734..d707e3cf901c1 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java @@ -10,7 +10,7 @@ import org.elasticsearch.core.Releasables; /** - * Block view of a BooleanVector. + * Block view of a {@link BooleanVector}. Cannot represent multi-values or nulls. * This class is generated. Do not edit it. */ public final class BooleanVectorBlock extends AbstractVectorBlock implements BooleanBlock { @@ -73,11 +73,6 @@ public String toString() { return getClass().getSimpleName() + "[vector=" + vector + "]"; } - @Override - public boolean isReleased() { - return super.isReleased() || vector.isReleased(); - } - @Override public void closeInternal() { assert (vector.isReleased() == false) : "can't release block [" + this + "] containing already released vector"; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorFixedBuilder.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorFixedBuilder.java index 33daff853eecb..5977dc5de36f0 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorFixedBuilder.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorFixedBuilder.java @@ -27,7 +27,7 @@ final class BooleanVectorFixedBuilder implements BooleanVector.FixedBuilder { BooleanVectorFixedBuilder(int size, BlockFactory blockFactory) { preAdjustedBytes = ramBytesUsed(size); - blockFactory.adjustBreaker(preAdjustedBytes, false); + blockFactory.adjustBreaker(preAdjustedBytes); this.blockFactory = blockFactory; this.values = new boolean[size]; } @@ -70,7 +70,7 @@ public void close() { if (nextIndex >= 0) { // If nextIndex < 0 we've already built the vector nextIndex = -1; - blockFactory.adjustBreaker(-preAdjustedBytes, false); + blockFactory.adjustBreaker(-preAdjustedBytes); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java index 34d4e5aaa43e2..7f1a1608dac5b 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java @@ -15,21 +15,36 @@ import java.util.BitSet; /** - * Block implementation that stores an array of BytesRef. + * Block implementation that stores values in a {@link BytesRefArrayVector}. + * Does not take ownership of the given {@link BytesRefArray} and does not adjust circuit breakers to account for it. * This class is generated. Do not edit it. */ -public final class BytesRefArrayBlock extends AbstractArrayBlock implements BytesRefBlock { +final class BytesRefArrayBlock extends AbstractArrayBlock implements BytesRefBlock { private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(BytesRefArrayBlock.class); - private final BytesRefArray values; + private final BytesRefArrayVector vector; - public BytesRefArrayBlock(BytesRefArray values, int positionCount, int[] firstValueIndexes, BitSet nulls, MvOrdering mvOrdering) { - this(values, positionCount, firstValueIndexes, nulls, mvOrdering, BlockFactory.getNonBreakingInstance()); + BytesRefArrayBlock( + BytesRefArray values, + int positionCount, + int[] firstValueIndexes, + BitSet nulls, + MvOrdering mvOrdering, + BlockFactory blockFactory + ) { + this( + new BytesRefArrayVector(values, firstValueIndexes == null ? positionCount : firstValueIndexes[positionCount], blockFactory), + positionCount, + firstValueIndexes, + nulls, + mvOrdering, + blockFactory + ); } - public BytesRefArrayBlock( - BytesRefArray values, + private BytesRefArrayBlock( + BytesRefArrayVector vector, int positionCount, int[] firstValueIndexes, BitSet nulls, @@ -37,7 +52,10 @@ public BytesRefArrayBlock( BlockFactory blockFactory ) { super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); - this.values = values; + this.vector = vector; + assert firstValueIndexes == null + ? vector.getPositionCount() == getPositionCount() + : firstValueIndexes[getPositionCount()] == vector.getPositionCount(); } @Override @@ -47,11 +65,12 @@ public BytesRefVector asVector() { @Override public BytesRef getBytesRef(int valueIndex, BytesRef dest) { - return values.get(valueIndex, dest); + return vector.getBytesRef(valueIndex, dest); } @Override public BytesRefBlock filter(int... positions) { + // TODO use reference counting to share the vector final BytesRef scratch = new BytesRef(); try (var builder = blockFactory().newBytesRefBlockBuilder(positions.length)) { for (int pos : positions) { @@ -86,32 +105,37 @@ public BytesRefBlock expand() { incRef(); return this; } - // TODO use reference counting to share the values - final BytesRef scratch = new BytesRef(); - try (var builder = blockFactory().newBytesRefBlockBuilder(firstValueIndexes[getPositionCount()])) { - for (int pos = 0; pos < getPositionCount(); pos++) { - if (isNull(pos)) { - builder.appendNull(); - continue; - } - int first = getFirstValueIndex(pos); - int end = first + getValueCount(pos); - for (int i = first; i < end; i++) { - builder.appendBytesRef(getBytesRef(i, scratch)); - } - } - return builder.mvOrdering(MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING).build(); + if (nullsMask == null) { + vector.incRef(); + return vector.asBlock(); } + + // The following line is correct because positions with multi-values are never null. + int expandedPositionCount = vector.getPositionCount(); + long bitSetRamUsedEstimate = Math.max(nullsMask.size(), BlockRamUsageEstimator.sizeOfBitSet(expandedPositionCount)); + blockFactory().adjustBreaker(bitSetRamUsedEstimate); + + BytesRefArrayBlock expanded = new BytesRefArrayBlock( + vector, + expandedPositionCount, + null, + shiftNullsToExpandedPositions(), + MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, + blockFactory() + ); + blockFactory().adjustBreaker(expanded.ramBytesUsedOnlyBlock() - bitSetRamUsedEstimate); + // We need to incRef after adjusting any breakers, otherwise we might leak the vector if the breaker trips. + vector.incRef(); + return expanded; } - public static long ramBytesEstimated(BytesRefArray values, int[] firstValueIndexes, BitSet nullsMask) { - return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(values) + BlockRamUsageEstimator.sizeOf(firstValueIndexes) - + BlockRamUsageEstimator.sizeOfBitSet(nullsMask); + private long ramBytesUsedOnlyBlock() { + return BASE_RAM_BYTES_USED + BlockRamUsageEstimator.sizeOf(firstValueIndexes) + BlockRamUsageEstimator.sizeOfBitSet(nullsMask); } @Override public long ramBytesUsed() { - return ramBytesEstimated(values, firstValueIndexes, nullsMask); + return ramBytesUsedOnlyBlock() + vector.ramBytesUsed(); } @Override @@ -134,14 +158,20 @@ public String toString() { + getPositionCount() + ", mvOrdering=" + mvOrdering() - + ", values=" - + values.size() + + ", vector=" + + vector + ']'; } + @Override + public void allowPassingToDifferentDriver() { + super.allowPassingToDifferentDriver(); + vector.allowPassingToDifferentDriver(); + } + @Override public void closeInternal() { - blockFactory().adjustBreaker(-ramBytesUsed() + values.bigArraysRamBytesUsed(), true); - Releasables.closeExpectNoException(values); + blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock()); + Releasables.closeExpectNoException(vector); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayVector.java index a8bb60f9f20fa..227bf3a78a5bf 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayVector.java @@ -14,29 +14,23 @@ /** * Vector implementation that stores an array of BytesRef values. + * Does not take ownership of the given {@link BytesRefArray} and does not adjust circuit breakers to account for it. * This class is generated. Do not edit it. */ -public final class BytesRefArrayVector extends AbstractVector implements BytesRefVector { +final class BytesRefArrayVector extends AbstractVector implements BytesRefVector { static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(BytesRefArrayVector.class); private final BytesRefArray values; - private final BytesRefBlock block; - - public BytesRefArrayVector(BytesRefArray values, int positionCount) { - this(values, positionCount, BlockFactory.getNonBreakingInstance()); - } - - public BytesRefArrayVector(BytesRefArray values, int positionCount, BlockFactory blockFactory) { + BytesRefArrayVector(BytesRefArray values, int positionCount, BlockFactory blockFactory) { super(positionCount, blockFactory); this.values = values; - this.block = new BytesRefVectorBlock(this); } @Override public BytesRefBlock asBlock() { - return block; + return new BytesRefVectorBlock(this); } @Override @@ -93,12 +87,10 @@ public String toString() { } @Override - public void close() { - if (released) { - throw new IllegalStateException("can't release already released vector [" + this + "]"); - } - released = true; - blockFactory().adjustBreaker(-ramBytesUsed() + values.bigArraysRamBytesUsed(), true); + public void closeInternal() { + // The circuit breaker that tracks the values {@link BytesRefArray} is adjusted outside + // of this class. + blockFactory().adjustBreaker(-ramBytesUsed() + values.bigArraysRamBytesUsed()); Releasables.closeExpectNoException(values); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java index 50611f3e15130..8ed17a1435302 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java @@ -171,44 +171,6 @@ static int hash(BytesRefBlock block) { return result; } - /** - * Returns a builder using the {@link BlockFactory#getNonBreakingInstance non-breaking block factory}. - * @deprecated use {@link BlockFactory#newBytesRefBlockBuilder} - */ - // Eventually, we want to remove this entirely, always passing an explicit BlockFactory - @Deprecated - static Builder newBlockBuilder(int estimatedSize) { - return newBlockBuilder(estimatedSize, BlockFactory.getNonBreakingInstance()); - } - - /** - * Returns a builder. - * @deprecated use {@link BlockFactory#newBytesRefBlockBuilder} - */ - @Deprecated - static Builder newBlockBuilder(int estimatedSize, BlockFactory blockFactory) { - return blockFactory.newBytesRefBlockBuilder(estimatedSize); - } - - /** - * Returns a constant block built by the {@link BlockFactory#getNonBreakingInstance non-breaking block factory}. - * @deprecated use {@link BlockFactory#newConstantBytesRefBlockWith} - */ - // Eventually, we want to remove this entirely, always passing an explicit BlockFactory - @Deprecated - static BytesRefBlock newConstantBlockWith(BytesRef value, int positions) { - return newConstantBlockWith(value, positions, BlockFactory.getNonBreakingInstance()); - } - - /** - * Returns a constant block. - * @deprecated use {@link BlockFactory#newConstantBytesRefBlockWith} - */ - @Deprecated - static BytesRefBlock newConstantBlockWith(BytesRef value, int positions, BlockFactory blockFactory) { - return blockFactory.newConstantBytesRefBlockWith(value, positions); - } - /** * Builder for {@link BytesRefBlock} */ diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlockBuilder.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlockBuilder.java index 70e20ac9f1d00..aed422b0c0104 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlockBuilder.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlockBuilder.java @@ -190,40 +190,46 @@ public BytesRefBlockBuilder mvOrdering(Block.MvOrdering mvOrdering) { return this; } + private BytesRefBlock buildFromBytesArray() { + assert estimatedBytes == 0 || firstValueIndexes != null; + final BytesRefBlock theBlock; + if (hasNonNullValue && positionCount == 1 && valueCount == 1) { + theBlock = new ConstantBytesRefVector(BytesRef.deepCopyOf(values.get(0, new BytesRef())), 1, blockFactory).asBlock(); + /* + * Update the breaker with the actual bytes used. + * We pass false below even though we've used the bytes. That's weird, + * but if we break here we will throw away the used memory, letting + * it be deallocated. The exception will bubble up and the builder will + * still technically be open, meaning the calling code should close it + * which will return all used memory to the breaker. + */ + blockFactory.adjustBreaker(theBlock.ramBytesUsed() - estimatedBytes); + Releasables.closeExpectNoException(values); + } else { + if (isDense() && singleValued()) { + theBlock = new BytesRefArrayVector(values, positionCount, blockFactory).asBlock(); + } else { + theBlock = new BytesRefArrayBlock(values, positionCount, firstValueIndexes, nullsMask, mvOrdering, blockFactory); + } + /* + * Update the breaker with the actual bytes used. + * We pass false below even though we've used the bytes. That's weird, + * but if we break here we will throw away the used memory, letting + * it be deallocated. The exception will bubble up and the builder will + * still technically be open, meaning the calling code should close it + * which will return all used memory to the breaker. + */ + blockFactory.adjustBreaker(theBlock.ramBytesUsed() - estimatedBytes - values.bigArraysRamBytesUsed()); + } + return theBlock; + } + @Override public BytesRefBlock build() { try { finish(); BytesRefBlock theBlock; - assert estimatedBytes == 0 || firstValueIndexes != null; - if (hasNonNullValue && positionCount == 1 && valueCount == 1) { - theBlock = new ConstantBytesRefVector(BytesRef.deepCopyOf(values.get(0, new BytesRef())), 1, blockFactory).asBlock(); - /* - * Update the breaker with the actual bytes used. - * We pass false below even though we've used the bytes. That's weird, - * but if we break here we will throw away the used memory, letting - * it be deallocated. The exception will bubble up and the builder will - * still technically be open, meaning the calling code should close it - * which will return all used memory to the breaker. - */ - blockFactory.adjustBreaker(theBlock.ramBytesUsed() - estimatedBytes, false); - Releasables.closeExpectNoException(values); - } else { - if (isDense() && singleValued()) { - theBlock = new BytesRefArrayVector(values, positionCount, blockFactory).asBlock(); - } else { - theBlock = new BytesRefArrayBlock(values, positionCount, firstValueIndexes, nullsMask, mvOrdering, blockFactory); - } - /* - * Update the breaker with the actual bytes used. - * We pass false below even though we've used the bytes. That's weird, - * but if we break here we will throw away the used memory, letting - * it be deallocated. The exception will bubble up and the builder will - * still technically be open, meaning the calling code should close it - * which will return all used memory to the breaker. - */ - blockFactory.adjustBreaker(theBlock.ramBytesUsed() - estimatedBytes - values.bigArraysRamBytesUsed(), false); - } + theBlock = buildFromBytesArray(); values = null; built(); return theBlock; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVector.java index b7011666b981d..5c56ece72c298 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVector.java @@ -101,25 +101,6 @@ default void writeTo(StreamOutput out) throws IOException { } } - /** - * Returns a builder using the {@link BlockFactory#getNonBreakingInstance nonbreaking block factory}. - * @deprecated use {@link BlockFactory#newBytesRefVectorBuilder} - */ - // Eventually, we want to remove this entirely, always passing an explicit BlockFactory - @Deprecated - static Builder newVectorBuilder(int estimatedSize) { - return newVectorBuilder(estimatedSize, BlockFactory.getNonBreakingInstance()); - } - - /** - * Creates a builder that grows as needed. - * @deprecated use {@link BlockFactory#newBytesRefVectorBuilder} - */ - @Deprecated - static Builder newVectorBuilder(int estimatedSize, BlockFactory blockFactory) { - return blockFactory.newBytesRefVectorBuilder(estimatedSize); - } - /** * A builder that grows as needed. */ diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java index 9d3f69bfaa981..92f93d5d23a49 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java @@ -11,7 +11,7 @@ import org.elasticsearch.core.Releasables; /** - * Block view of a BytesRefVector. + * Block view of a {@link BytesRefVector}. Cannot represent multi-values or nulls. * This class is generated. Do not edit it. */ public final class BytesRefVectorBlock extends AbstractVectorBlock implements BytesRefBlock { @@ -74,11 +74,6 @@ public String toString() { return getClass().getSimpleName() + "[vector=" + vector + "]"; } - @Override - public boolean isReleased() { - return super.isReleased() || vector.isReleased(); - } - @Override public void closeInternal() { assert (vector.isReleased() == false) : "can't release block [" + this + "] containing already released vector"; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBuilder.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBuilder.java index 5ea9a2b7d0184..cd5851e9e49ef 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBuilder.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBuilder.java @@ -67,7 +67,7 @@ public BytesRefVector build() { * still technically be open, meaning the calling code should close it * which will return all used memory to the breaker. */ - blockFactory.adjustBreaker(vector.ramBytesUsed(), false); + blockFactory.adjustBreaker(vector.ramBytesUsed()); Releasables.closeExpectNoException(values); } else { vector = new BytesRefArrayVector(values, valueCount, blockFactory); @@ -79,7 +79,7 @@ public BytesRefVector build() { * still technically be open, meaning the calling code should close it * which will return all used memory to the breaker. */ - blockFactory.adjustBreaker(vector.ramBytesUsed() - values.bigArraysRamBytesUsed(), false); + blockFactory.adjustBreaker(vector.ramBytesUsed() - values.bigArraysRamBytesUsed()); } values = null; built(); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantBooleanVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantBooleanVector.java index b636d89a206e0..16d70d1a0e800 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantBooleanVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantBooleanVector.java @@ -13,22 +13,15 @@ * Vector implementation that stores a constant boolean value. * This class is generated. Do not edit it. */ -public final class ConstantBooleanVector extends AbstractVector implements BooleanVector { +final class ConstantBooleanVector extends AbstractVector implements BooleanVector { static final long RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(ConstantBooleanVector.class); private final boolean value; - private final BooleanBlock block; - - public ConstantBooleanVector(boolean value, int positionCount) { - this(value, positionCount, BlockFactory.getNonBreakingInstance()); - } - - public ConstantBooleanVector(boolean value, int positionCount, BlockFactory blockFactory) { + ConstantBooleanVector(boolean value, int positionCount, BlockFactory blockFactory) { super(positionCount, blockFactory); this.value = value; - this.block = new BooleanVectorBlock(this); } @Override @@ -38,12 +31,12 @@ public boolean getBoolean(int position) { @Override public BooleanBlock asBlock() { - return block; + return new BooleanVectorBlock(this); } @Override public BooleanVector filter(int... positions) { - return new ConstantBooleanVector(value, positions.length); + return blockFactory().newConstantBooleanVector(value, positions.length); } @Override @@ -77,13 +70,4 @@ public int hashCode() { public String toString() { return getClass().getSimpleName() + "[positions=" + getPositionCount() + ", value=" + value + ']'; } - - @Override - public void close() { - if (released) { - throw new IllegalStateException("can't release already released vector [" + this + "]"); - } - released = true; - blockFactory().adjustBreaker(-ramBytesUsed(), true); - } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantBytesRefVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantBytesRefVector.java index be34db592b228..57ec1c945ade5 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantBytesRefVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantBytesRefVector.java @@ -14,22 +14,15 @@ * Vector implementation that stores a constant BytesRef value. * This class is generated. Do not edit it. */ -public final class ConstantBytesRefVector extends AbstractVector implements BytesRefVector { +final class ConstantBytesRefVector extends AbstractVector implements BytesRefVector { static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(ConstantBytesRefVector.class) + RamUsageEstimator .shallowSizeOfInstance(BytesRef.class); private final BytesRef value; - private final BytesRefBlock block; - - public ConstantBytesRefVector(BytesRef value, int positionCount) { - this(value, positionCount, BlockFactory.getNonBreakingInstance()); - } - - public ConstantBytesRefVector(BytesRef value, int positionCount, BlockFactory blockFactory) { + ConstantBytesRefVector(BytesRef value, int positionCount, BlockFactory blockFactory) { super(positionCount, blockFactory); this.value = value; - this.block = new BytesRefVectorBlock(this); } @Override @@ -39,12 +32,12 @@ public BytesRef getBytesRef(int position, BytesRef ignore) { @Override public BytesRefBlock asBlock() { - return block; + return new BytesRefVectorBlock(this); } @Override public BytesRefVector filter(int... positions) { - return new ConstantBytesRefVector(value, positions.length); + return blockFactory().newConstantBytesRefVector(value, positions.length); } @Override @@ -82,13 +75,4 @@ public int hashCode() { public String toString() { return getClass().getSimpleName() + "[positions=" + getPositionCount() + ", value=" + value + ']'; } - - @Override - public void close() { - if (released) { - throw new IllegalStateException("can't release already released vector [" + this + "]"); - } - released = true; - blockFactory().adjustBreaker(-ramBytesUsed(), true); - } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantDoubleVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantDoubleVector.java index f6cce49aa3d42..a783f0243313e 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantDoubleVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantDoubleVector.java @@ -13,22 +13,15 @@ * Vector implementation that stores a constant double value. * This class is generated. Do not edit it. */ -public final class ConstantDoubleVector extends AbstractVector implements DoubleVector { +final class ConstantDoubleVector extends AbstractVector implements DoubleVector { static final long RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(ConstantDoubleVector.class); private final double value; - private final DoubleBlock block; - - public ConstantDoubleVector(double value, int positionCount) { - this(value, positionCount, BlockFactory.getNonBreakingInstance()); - } - - public ConstantDoubleVector(double value, int positionCount, BlockFactory blockFactory) { + ConstantDoubleVector(double value, int positionCount, BlockFactory blockFactory) { super(positionCount, blockFactory); this.value = value; - this.block = new DoubleVectorBlock(this); } @Override @@ -38,12 +31,12 @@ public double getDouble(int position) { @Override public DoubleBlock asBlock() { - return block; + return new DoubleVectorBlock(this); } @Override public DoubleVector filter(int... positions) { - return new ConstantDoubleVector(value, positions.length); + return blockFactory().newConstantDoubleVector(value, positions.length); } @Override @@ -77,13 +70,4 @@ public int hashCode() { public String toString() { return getClass().getSimpleName() + "[positions=" + getPositionCount() + ", value=" + value + ']'; } - - @Override - public void close() { - if (released) { - throw new IllegalStateException("can't release already released vector [" + this + "]"); - } - released = true; - blockFactory().adjustBreaker(-ramBytesUsed(), true); - } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantIntVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantIntVector.java index fa7b9223d5107..56573e985c387 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantIntVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantIntVector.java @@ -13,22 +13,15 @@ * Vector implementation that stores a constant int value. * This class is generated. Do not edit it. */ -public final class ConstantIntVector extends AbstractVector implements IntVector { +final class ConstantIntVector extends AbstractVector implements IntVector { static final long RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(ConstantIntVector.class); private final int value; - private final IntBlock block; - - public ConstantIntVector(int value, int positionCount) { - this(value, positionCount, BlockFactory.getNonBreakingInstance()); - } - - public ConstantIntVector(int value, int positionCount, BlockFactory blockFactory) { + ConstantIntVector(int value, int positionCount, BlockFactory blockFactory) { super(positionCount, blockFactory); this.value = value; - this.block = new IntVectorBlock(this); } @Override @@ -38,12 +31,12 @@ public int getInt(int position) { @Override public IntBlock asBlock() { - return block; + return new IntVectorBlock(this); } @Override public IntVector filter(int... positions) { - return new ConstantIntVector(value, positions.length); + return blockFactory().newConstantIntVector(value, positions.length); } @Override @@ -77,13 +70,4 @@ public int hashCode() { public String toString() { return getClass().getSimpleName() + "[positions=" + getPositionCount() + ", value=" + value + ']'; } - - @Override - public void close() { - if (released) { - throw new IllegalStateException("can't release already released vector [" + this + "]"); - } - released = true; - blockFactory().adjustBreaker(-ramBytesUsed(), true); - } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantLongVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantLongVector.java index 21d4d81dfd193..0173f1c1d4d7a 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantLongVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantLongVector.java @@ -13,22 +13,15 @@ * Vector implementation that stores a constant long value. * This class is generated. Do not edit it. */ -public final class ConstantLongVector extends AbstractVector implements LongVector { +final class ConstantLongVector extends AbstractVector implements LongVector { static final long RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(ConstantLongVector.class); private final long value; - private final LongBlock block; - - public ConstantLongVector(long value, int positionCount) { - this(value, positionCount, BlockFactory.getNonBreakingInstance()); - } - - public ConstantLongVector(long value, int positionCount, BlockFactory blockFactory) { + ConstantLongVector(long value, int positionCount, BlockFactory blockFactory) { super(positionCount, blockFactory); this.value = value; - this.block = new LongVectorBlock(this); } @Override @@ -38,12 +31,12 @@ public long getLong(int position) { @Override public LongBlock asBlock() { - return block; + return new LongVectorBlock(this); } @Override public LongVector filter(int... positions) { - return new ConstantLongVector(value, positions.length); + return blockFactory().newConstantLongVector(value, positions.length); } @Override @@ -77,13 +70,4 @@ public int hashCode() { public String toString() { return getClass().getSimpleName() + "[positions=" + getPositionCount() + ", value=" + value + ']'; } - - @Override - public void close() { - if (released) { - throw new IllegalStateException("can't release already released vector [" + this + "]"); - } - released = true; - blockFactory().adjustBreaker(-ramBytesUsed(), true); - } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java index db3546c73c054..cb5258c7ae22c 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java @@ -8,26 +8,40 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.core.Releasables; -import java.util.Arrays; import java.util.BitSet; /** - * Block implementation that stores an array of double. + * Block implementation that stores values in a {@link DoubleArrayVector}. * This class is generated. Do not edit it. */ -public final class DoubleArrayBlock extends AbstractArrayBlock implements DoubleBlock { +final class DoubleArrayBlock extends AbstractArrayBlock implements DoubleBlock { private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(DoubleArrayBlock.class); - private final double[] values; + private final DoubleArrayVector vector; - public DoubleArrayBlock(double[] values, int positionCount, int[] firstValueIndexes, BitSet nulls, MvOrdering mvOrdering) { - this(values, positionCount, firstValueIndexes, nulls, mvOrdering, BlockFactory.getNonBreakingInstance()); + DoubleArrayBlock( + double[] values, + int positionCount, + int[] firstValueIndexes, + BitSet nulls, + MvOrdering mvOrdering, + BlockFactory blockFactory + ) { + this( + new DoubleArrayVector(values, firstValueIndexes == null ? positionCount : firstValueIndexes[positionCount], blockFactory), + positionCount, + firstValueIndexes, + nulls, + mvOrdering, + blockFactory + ); } - public DoubleArrayBlock( - double[] values, + private DoubleArrayBlock( + DoubleArrayVector vector, int positionCount, int[] firstValueIndexes, BitSet nulls, @@ -35,7 +49,10 @@ public DoubleArrayBlock( BlockFactory blockFactory ) { super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); - this.values = values; + this.vector = vector; + assert firstValueIndexes == null + ? vector.getPositionCount() == getPositionCount() + : firstValueIndexes[getPositionCount()] == vector.getPositionCount(); } @Override @@ -45,11 +62,12 @@ public DoubleVector asVector() { @Override public double getDouble(int valueIndex) { - return values[valueIndex]; + return vector.getDouble(valueIndex); } @Override public DoubleBlock filter(int... positions) { + // TODO use reference counting to share the vector try (var builder = blockFactory().newDoubleBlockBuilder(positions.length)) { for (int pos : positions) { if (isNull(pos)) { @@ -83,31 +101,37 @@ public DoubleBlock expand() { incRef(); return this; } - // TODO use reference counting to share the values - try (var builder = blockFactory().newDoubleBlockBuilder(firstValueIndexes[getPositionCount()])) { - for (int pos = 0; pos < getPositionCount(); pos++) { - if (isNull(pos)) { - builder.appendNull(); - continue; - } - int first = getFirstValueIndex(pos); - int end = first + getValueCount(pos); - for (int i = first; i < end; i++) { - builder.appendDouble(getDouble(i)); - } - } - return builder.mvOrdering(MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING).build(); + if (nullsMask == null) { + vector.incRef(); + return vector.asBlock(); } + + // The following line is correct because positions with multi-values are never null. + int expandedPositionCount = vector.getPositionCount(); + long bitSetRamUsedEstimate = Math.max(nullsMask.size(), BlockRamUsageEstimator.sizeOfBitSet(expandedPositionCount)); + blockFactory().adjustBreaker(bitSetRamUsedEstimate); + + DoubleArrayBlock expanded = new DoubleArrayBlock( + vector, + expandedPositionCount, + null, + shiftNullsToExpandedPositions(), + MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, + blockFactory() + ); + blockFactory().adjustBreaker(expanded.ramBytesUsedOnlyBlock() - bitSetRamUsedEstimate); + // We need to incRef after adjusting any breakers, otherwise we might leak the vector if the breaker trips. + vector.incRef(); + return expanded; } - public static long ramBytesEstimated(double[] values, int[] firstValueIndexes, BitSet nullsMask) { - return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(values) + BlockRamUsageEstimator.sizeOf(firstValueIndexes) - + BlockRamUsageEstimator.sizeOfBitSet(nullsMask); + private long ramBytesUsedOnlyBlock() { + return BASE_RAM_BYTES_USED + BlockRamUsageEstimator.sizeOf(firstValueIndexes) + BlockRamUsageEstimator.sizeOfBitSet(nullsMask); } @Override public long ramBytesUsed() { - return ramBytesEstimated(values, firstValueIndexes, nullsMask); + return ramBytesUsedOnlyBlock() + vector.ramBytesUsed(); } @Override @@ -130,13 +154,20 @@ public String toString() { + getPositionCount() + ", mvOrdering=" + mvOrdering() - + ", values=" - + Arrays.toString(values) + + ", vector=" + + vector + ']'; } + @Override + public void allowPassingToDifferentDriver() { + super.allowPassingToDifferentDriver(); + vector.allowPassingToDifferentDriver(); + } + @Override public void closeInternal() { - blockFactory().adjustBreaker(-ramBytesUsed(), true); + blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock()); + Releasables.closeExpectNoException(vector); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayVector.java index 08e51b0e313d8..bb6d9c22539a6 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayVector.java @@ -15,27 +15,20 @@ * Vector implementation that stores an array of double values. * This class is generated. Do not edit it. */ -public final class DoubleArrayVector extends AbstractVector implements DoubleVector { +final class DoubleArrayVector extends AbstractVector implements DoubleVector { static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(DoubleArrayVector.class); private final double[] values; - private final DoubleBlock block; - - public DoubleArrayVector(double[] values, int positionCount) { - this(values, positionCount, BlockFactory.getNonBreakingInstance()); - } - - public DoubleArrayVector(double[] values, int positionCount, BlockFactory blockFactory) { + DoubleArrayVector(double[] values, int positionCount, BlockFactory blockFactory) { super(positionCount, blockFactory); this.values = values; - this.block = new DoubleVectorBlock(this); } @Override public DoubleBlock asBlock() { - return block; + return new DoubleVectorBlock(this); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayBlock.java new file mode 100644 index 0000000000000..59bbd5a941e4b --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayBlock.java @@ -0,0 +1,174 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data; + +import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.util.DoubleArray; +import org.elasticsearch.core.Releasables; + +import java.util.BitSet; + +/** + * Block implementation that stores values in a {@link DoubleBigArrayVector}. Does not take ownership of the given + * {@link DoubleArray} and does not adjust circuit breakers to account for it. + * This class is generated. Do not edit it. + */ +public final class DoubleBigArrayBlock extends AbstractArrayBlock implements DoubleBlock { + + private static final long BASE_RAM_BYTES_USED = 0; // TODO: fix this + private final DoubleBigArrayVector vector; + + public DoubleBigArrayBlock( + DoubleArray values, + int positionCount, + int[] firstValueIndexes, + BitSet nulls, + MvOrdering mvOrdering, + BlockFactory blockFactory + ) { + this( + new DoubleBigArrayVector(values, firstValueIndexes == null ? positionCount : firstValueIndexes[positionCount], blockFactory), + positionCount, + firstValueIndexes, + nulls, + mvOrdering, + blockFactory + ); + } + + private DoubleBigArrayBlock( + DoubleBigArrayVector vector, + int positionCount, + int[] firstValueIndexes, + BitSet nulls, + MvOrdering mvOrdering, + BlockFactory blockFactory + ) { + super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); + this.vector = vector; + assert firstValueIndexes == null + ? vector.getPositionCount() == getPositionCount() + : firstValueIndexes[getPositionCount()] == vector.getPositionCount(); + } + + @Override + public DoubleVector asVector() { + return null; + } + + @Override + public double getDouble(int valueIndex) { + return vector.getDouble(valueIndex); + } + + @Override + public DoubleBlock filter(int... positions) { + // TODO use reference counting to share the vector + try (var builder = blockFactory().newDoubleBlockBuilder(positions.length)) { + for (int pos : positions) { + if (isNull(pos)) { + builder.appendNull(); + continue; + } + int valueCount = getValueCount(pos); + int first = getFirstValueIndex(pos); + if (valueCount == 1) { + builder.appendDouble(getDouble(getFirstValueIndex(pos))); + } else { + builder.beginPositionEntry(); + for (int c = 0; c < valueCount; c++) { + builder.appendDouble(getDouble(first + c)); + } + builder.endPositionEntry(); + } + } + return builder.mvOrdering(mvOrdering()).build(); + } + } + + @Override + public ElementType elementType() { + return ElementType.DOUBLE; + } + + @Override + public DoubleBlock expand() { + if (firstValueIndexes == null) { + incRef(); + return this; + } + if (nullsMask == null) { + vector.incRef(); + return vector.asBlock(); + } + + // The following line is correct because positions with multi-values are never null. + int expandedPositionCount = vector.getPositionCount(); + long bitSetRamUsedEstimate = Math.max(nullsMask.size(), BlockRamUsageEstimator.sizeOfBitSet(expandedPositionCount)); + blockFactory().adjustBreaker(bitSetRamUsedEstimate); + + DoubleBigArrayBlock expanded = new DoubleBigArrayBlock( + vector, + expandedPositionCount, + null, + shiftNullsToExpandedPositions(), + MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, + blockFactory() + ); + blockFactory().adjustBreaker(expanded.ramBytesUsedOnlyBlock() - bitSetRamUsedEstimate); + // We need to incRef after adjusting any breakers, otherwise we might leak the vector if the breaker trips. + vector.incRef(); + return expanded; + } + + private long ramBytesUsedOnlyBlock() { + return BASE_RAM_BYTES_USED + BlockRamUsageEstimator.sizeOf(firstValueIndexes) + BlockRamUsageEstimator.sizeOfBitSet(nullsMask); + } + + @Override + public long ramBytesUsed() { + return ramBytesUsedOnlyBlock() + RamUsageEstimator.sizeOf(vector); + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof DoubleBlock that) { + return DoubleBlock.equals(this, that); + } + return false; + } + + @Override + public int hashCode() { + return DoubleBlock.hash(this); + } + + @Override + public String toString() { + return getClass().getSimpleName() + + "[positions=" + + getPositionCount() + + ", mvOrdering=" + + mvOrdering() + + ", ramBytesUsed=" + + vector.ramBytesUsed() + + ']'; + } + + @Override + public void allowPassingToDifferentDriver() { + super.allowPassingToDifferentDriver(); + vector.allowPassingToDifferentDriver(); + } + + @Override + public void closeInternal() { + blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock()); + Releasables.closeExpectNoException(vector); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayVector.java index 476b94ad3fa05..45b9b4bec14ba 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayVector.java @@ -12,30 +12,24 @@ import org.elasticsearch.core.Releasable; /** - * Vector implementation that defers to an enclosed DoubleArray. + * Vector implementation that defers to an enclosed {@link DoubleArray}. + * Does not take ownership of the array and does not adjust circuit breakers to account for it. * This class is generated. Do not edit it. */ public final class DoubleBigArrayVector extends AbstractVector implements DoubleVector, Releasable { - private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(DoubleBigArrayVector.class); + private static final long BASE_RAM_BYTES_USED = 0; // FIXME private final DoubleArray values; - private final DoubleBlock block; - - public DoubleBigArrayVector(DoubleArray values, int positionCount) { - this(values, positionCount, BlockFactory.getNonBreakingInstance()); - } - public DoubleBigArrayVector(DoubleArray values, int positionCount, BlockFactory blockFactory) { super(positionCount, blockFactory); this.values = values; - this.block = new DoubleVectorBlock(this); } @Override public DoubleBlock asBlock() { - return block; + return new DoubleVectorBlock(this); } @Override @@ -61,7 +55,7 @@ public long ramBytesUsed() { @Override public DoubleVector filter(int... positions) { var blockFactory = blockFactory(); - final DoubleArray filtered = blockFactory.bigArrays().newDoubleArray(positions.length, true); + final DoubleArray filtered = blockFactory.bigArrays().newDoubleArray(positions.length); for (int i = 0; i < positions.length; i++) { filtered.set(i, values.get(positions[i])); } @@ -69,11 +63,9 @@ public DoubleVector filter(int... positions) { } @Override - public void close() { - if (released) { - throw new IllegalStateException("can't release already released vector [" + this + "]"); - } - released = true; + public void closeInternal() { + // The circuit breaker that tracks the values {@link DoubleArray} is adjusted outside + // of this class. values.close(); } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java index 31d0000d28515..890f965c765bb 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java @@ -18,7 +18,7 @@ * Block that stores double values. * This class is generated. Do not edit it. */ -public sealed interface DoubleBlock extends Block permits DoubleArrayBlock, DoubleVectorBlock, ConstantNullBlock { +public sealed interface DoubleBlock extends Block permits DoubleArrayBlock, DoubleVectorBlock, ConstantNullBlock, DoubleBigArrayBlock { /** * Retrieves the double value stored at the given value index. @@ -167,44 +167,6 @@ static int hash(DoubleBlock block) { return result; } - /** - * Returns a builder using the {@link BlockFactory#getNonBreakingInstance non-breaking block factory}. - * @deprecated use {@link BlockFactory#newDoubleBlockBuilder} - */ - // Eventually, we want to remove this entirely, always passing an explicit BlockFactory - @Deprecated - static Builder newBlockBuilder(int estimatedSize) { - return newBlockBuilder(estimatedSize, BlockFactory.getNonBreakingInstance()); - } - - /** - * Returns a builder. - * @deprecated use {@link BlockFactory#newDoubleBlockBuilder} - */ - @Deprecated - static Builder newBlockBuilder(int estimatedSize, BlockFactory blockFactory) { - return blockFactory.newDoubleBlockBuilder(estimatedSize); - } - - /** - * Returns a constant block built by the {@link BlockFactory#getNonBreakingInstance non-breaking block factory}. - * @deprecated use {@link BlockFactory#newConstantDoubleBlockWith} - */ - // Eventually, we want to remove this entirely, always passing an explicit BlockFactory - @Deprecated - static DoubleBlock newConstantBlockWith(double value, int positions) { - return newConstantBlockWith(value, positions, BlockFactory.getNonBreakingInstance()); - } - - /** - * Returns a constant block. - * @deprecated use {@link BlockFactory#newConstantDoubleBlockWith} - */ - @Deprecated - static DoubleBlock newConstantBlockWith(double value, int positions, BlockFactory blockFactory) { - return blockFactory.newConstantDoubleBlockWith(value, positions); - } - /** * Builder for {@link DoubleBlock} */ diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlockBuilder.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlockBuilder.java index 7781e4c353e8e..9a157cdcef50e 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlockBuilder.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlockBuilder.java @@ -9,6 +9,7 @@ import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.common.util.DoubleArray; import java.util.Arrays; @@ -179,6 +180,29 @@ public DoubleBlockBuilder mvOrdering(Block.MvOrdering mvOrdering) { return this; } + private DoubleBlock buildBigArraysBlock() { + final DoubleBlock theBlock; + final DoubleArray array = blockFactory.bigArrays().newDoubleArray(valueCount, false); + for (int i = 0; i < valueCount; i++) { + array.set(i, values[i]); + } + if (isDense() && singleValued()) { + theBlock = new DoubleBigArrayVector(array, positionCount, blockFactory).asBlock(); + } else { + theBlock = new DoubleBigArrayBlock(array, positionCount, firstValueIndexes, nullsMask, mvOrdering, blockFactory); + } + /* + * Update the breaker with the actual bytes used. + * We pass false below even though we've used the bytes. That's weird, + * but if we break here we will throw away the used memory, letting + * it be deallocated. The exception will bubble up and the builder will + * still technically be open, meaning the calling code should close it + * which will return all used memory to the breaker. + */ + blockFactory.adjustBreaker(theBlock.ramBytesUsed() - estimatedBytes - array.ramBytesUsed()); + return theBlock; + } + @Override public DoubleBlock build() { try { @@ -187,20 +211,26 @@ public DoubleBlock build() { if (hasNonNullValue && positionCount == 1 && valueCount == 1) { theBlock = blockFactory.newConstantDoubleBlockWith(values[0], 1, estimatedBytes); } else { - if (values.length - valueCount > 1024 || valueCount < (values.length / 2)) { - values = Arrays.copyOf(values, valueCount); - } - if (isDense() && singleValued()) { - theBlock = blockFactory.newDoubleArrayVector(values, positionCount, estimatedBytes).asBlock(); + if (estimatedBytes > blockFactory.maxPrimitiveArrayBytes()) { + theBlock = buildBigArraysBlock(); } else { - theBlock = blockFactory.newDoubleArrayBlock( - values, - positionCount, - firstValueIndexes, - nullsMask, - mvOrdering, - estimatedBytes - ); + if (values.length - valueCount > 1024 || valueCount < (values.length / 2)) { + adjustBreaker(valueCount * elementSize()); + values = Arrays.copyOf(values, valueCount); + adjustBreaker(-values.length * elementSize()); + } + if (isDense() && singleValued()) { + theBlock = blockFactory.newDoubleArrayVector(values, positionCount, estimatedBytes).asBlock(); + } else { + theBlock = blockFactory.newDoubleArrayBlock( + values, + positionCount, + firstValueIndexes, + nullsMask, + mvOrdering, + estimatedBytes + ); + } } } built(); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVector.java index acabd0deb17f6..f54044874acdd 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVector.java @@ -102,40 +102,10 @@ default void writeTo(StreamOutput out) throws IOException { } } - /** - * Returns a builder using the {@link BlockFactory#getNonBreakingInstance nonbreaking block factory}. - * @deprecated use {@link BlockFactory#newDoubleVectorBuilder} - */ - // Eventually, we want to remove this entirely, always passing an explicit BlockFactory - @Deprecated - static Builder newVectorBuilder(int estimatedSize) { - return newVectorBuilder(estimatedSize, BlockFactory.getNonBreakingInstance()); - } - - /** - * Creates a builder that grows as needed. Prefer {@link #newVectorFixedBuilder} - * if you know the size up front because it's faster. - * @deprecated use {@link BlockFactory#newDoubleVectorBuilder} - */ - @Deprecated - static Builder newVectorBuilder(int estimatedSize, BlockFactory blockFactory) { - return blockFactory.newDoubleVectorBuilder(estimatedSize); - } - - /** - * Creates a builder that never grows. Prefer this over {@link #newVectorBuilder} - * if you know the size up front because it's faster. - * @deprecated use {@link BlockFactory#newDoubleVectorFixedBuilder} - */ - @Deprecated - static FixedBuilder newVectorFixedBuilder(int size, BlockFactory blockFactory) { - return blockFactory.newDoubleVectorFixedBuilder(size); - } - /** * A builder that grows as needed. */ - sealed interface Builder extends Vector.Builder permits DoubleVectorBuilder { + sealed interface Builder extends Vector.Builder permits DoubleVectorBuilder, FixedBuilder { /** * Appends a double to the current entry. */ @@ -148,13 +118,11 @@ sealed interface Builder extends Vector.Builder permits DoubleVectorBuilder { /** * A builder that never grows. */ - sealed interface FixedBuilder extends Vector.Builder permits DoubleVectorFixedBuilder { + sealed interface FixedBuilder extends Builder permits DoubleVectorFixedBuilder { /** * Appends a double to the current entry. */ - FixedBuilder appendDouble(double value); - @Override - DoubleVector build(); + FixedBuilder appendDouble(double value); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java index b23a448c58336..2aa8e07c25604 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java @@ -10,7 +10,7 @@ import org.elasticsearch.core.Releasables; /** - * Block view of a DoubleVector. + * Block view of a {@link DoubleVector}. Cannot represent multi-values or nulls. * This class is generated. Do not edit it. */ public final class DoubleVectorBlock extends AbstractVectorBlock implements DoubleBlock { @@ -73,11 +73,6 @@ public String toString() { return getClass().getSimpleName() + "[vector=" + vector + "]"; } - @Override - public boolean isReleased() { - return super.isReleased() || vector.isReleased(); - } - @Override public void closeInternal() { assert (vector.isReleased() == false) : "can't release block [" + this + "] containing already released vector"; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorFixedBuilder.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorFixedBuilder.java index 7353515e8ffd8..c58856afa0266 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorFixedBuilder.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorFixedBuilder.java @@ -27,7 +27,7 @@ final class DoubleVectorFixedBuilder implements DoubleVector.FixedBuilder { DoubleVectorFixedBuilder(int size, BlockFactory blockFactory) { preAdjustedBytes = ramBytesUsed(size); - blockFactory.adjustBreaker(preAdjustedBytes, false); + blockFactory.adjustBreaker(preAdjustedBytes); this.blockFactory = blockFactory; this.values = new double[size]; } @@ -70,7 +70,7 @@ public void close() { if (nextIndex >= 0) { // If nextIndex < 0 we've already built the vector nextIndex = -1; - blockFactory.adjustBreaker(-preAdjustedBytes, false); + blockFactory.adjustBreaker(-preAdjustedBytes); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java index 111fc0c757af1..0d8262975c535 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java @@ -8,26 +8,40 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.core.Releasables; -import java.util.Arrays; import java.util.BitSet; /** - * Block implementation that stores an array of int. + * Block implementation that stores values in a {@link IntArrayVector}. * This class is generated. Do not edit it. */ -public final class IntArrayBlock extends AbstractArrayBlock implements IntBlock { +final class IntArrayBlock extends AbstractArrayBlock implements IntBlock { private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(IntArrayBlock.class); - private final int[] values; + private final IntArrayVector vector; - public IntArrayBlock(int[] values, int positionCount, int[] firstValueIndexes, BitSet nulls, MvOrdering mvOrdering) { - this(values, positionCount, firstValueIndexes, nulls, mvOrdering, BlockFactory.getNonBreakingInstance()); + IntArrayBlock( + int[] values, + int positionCount, + int[] firstValueIndexes, + BitSet nulls, + MvOrdering mvOrdering, + BlockFactory blockFactory + ) { + this( + new IntArrayVector(values, firstValueIndexes == null ? positionCount : firstValueIndexes[positionCount], blockFactory), + positionCount, + firstValueIndexes, + nulls, + mvOrdering, + blockFactory + ); } - public IntArrayBlock( - int[] values, + private IntArrayBlock( + IntArrayVector vector, int positionCount, int[] firstValueIndexes, BitSet nulls, @@ -35,7 +49,10 @@ public IntArrayBlock( BlockFactory blockFactory ) { super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); - this.values = values; + this.vector = vector; + assert firstValueIndexes == null + ? vector.getPositionCount() == getPositionCount() + : firstValueIndexes[getPositionCount()] == vector.getPositionCount(); } @Override @@ -45,11 +62,12 @@ public IntVector asVector() { @Override public int getInt(int valueIndex) { - return values[valueIndex]; + return vector.getInt(valueIndex); } @Override public IntBlock filter(int... positions) { + // TODO use reference counting to share the vector try (var builder = blockFactory().newIntBlockBuilder(positions.length)) { for (int pos : positions) { if (isNull(pos)) { @@ -83,31 +101,37 @@ public IntBlock expand() { incRef(); return this; } - // TODO use reference counting to share the values - try (var builder = blockFactory().newIntBlockBuilder(firstValueIndexes[getPositionCount()])) { - for (int pos = 0; pos < getPositionCount(); pos++) { - if (isNull(pos)) { - builder.appendNull(); - continue; - } - int first = getFirstValueIndex(pos); - int end = first + getValueCount(pos); - for (int i = first; i < end; i++) { - builder.appendInt(getInt(i)); - } - } - return builder.mvOrdering(MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING).build(); + if (nullsMask == null) { + vector.incRef(); + return vector.asBlock(); } + + // The following line is correct because positions with multi-values are never null. + int expandedPositionCount = vector.getPositionCount(); + long bitSetRamUsedEstimate = Math.max(nullsMask.size(), BlockRamUsageEstimator.sizeOfBitSet(expandedPositionCount)); + blockFactory().adjustBreaker(bitSetRamUsedEstimate); + + IntArrayBlock expanded = new IntArrayBlock( + vector, + expandedPositionCount, + null, + shiftNullsToExpandedPositions(), + MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, + blockFactory() + ); + blockFactory().adjustBreaker(expanded.ramBytesUsedOnlyBlock() - bitSetRamUsedEstimate); + // We need to incRef after adjusting any breakers, otherwise we might leak the vector if the breaker trips. + vector.incRef(); + return expanded; } - public static long ramBytesEstimated(int[] values, int[] firstValueIndexes, BitSet nullsMask) { - return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(values) + BlockRamUsageEstimator.sizeOf(firstValueIndexes) - + BlockRamUsageEstimator.sizeOfBitSet(nullsMask); + private long ramBytesUsedOnlyBlock() { + return BASE_RAM_BYTES_USED + BlockRamUsageEstimator.sizeOf(firstValueIndexes) + BlockRamUsageEstimator.sizeOfBitSet(nullsMask); } @Override public long ramBytesUsed() { - return ramBytesEstimated(values, firstValueIndexes, nullsMask); + return ramBytesUsedOnlyBlock() + vector.ramBytesUsed(); } @Override @@ -130,13 +154,20 @@ public String toString() { + getPositionCount() + ", mvOrdering=" + mvOrdering() - + ", values=" - + Arrays.toString(values) + + ", vector=" + + vector + ']'; } + @Override + public void allowPassingToDifferentDriver() { + super.allowPassingToDifferentDriver(); + vector.allowPassingToDifferentDriver(); + } + @Override public void closeInternal() { - blockFactory().adjustBreaker(-ramBytesUsed(), true); + blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock()); + Releasables.closeExpectNoException(vector); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayVector.java index 9c8c27efa0806..0576b77a0d700 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayVector.java @@ -15,27 +15,20 @@ * Vector implementation that stores an array of int values. * This class is generated. Do not edit it. */ -public final class IntArrayVector extends AbstractVector implements IntVector { +final class IntArrayVector extends AbstractVector implements IntVector { static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(IntArrayVector.class); private final int[] values; - private final IntBlock block; - - public IntArrayVector(int[] values, int positionCount) { - this(values, positionCount, BlockFactory.getNonBreakingInstance()); - } - - public IntArrayVector(int[] values, int positionCount, BlockFactory blockFactory) { + IntArrayVector(int[] values, int positionCount, BlockFactory blockFactory) { super(positionCount, blockFactory); this.values = values; - this.block = new IntVectorBlock(this); } @Override public IntBlock asBlock() { - return block; + return new IntVectorBlock(this); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayBlock.java new file mode 100644 index 0000000000000..b1a1473ff4b4a --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayBlock.java @@ -0,0 +1,174 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data; + +import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.util.IntArray; +import org.elasticsearch.core.Releasables; + +import java.util.BitSet; + +/** + * Block implementation that stores values in a {@link IntBigArrayVector}. Does not take ownership of the given + * {@link IntArray} and does not adjust circuit breakers to account for it. + * This class is generated. Do not edit it. + */ +public final class IntBigArrayBlock extends AbstractArrayBlock implements IntBlock { + + private static final long BASE_RAM_BYTES_USED = 0; // TODO: fix this + private final IntBigArrayVector vector; + + public IntBigArrayBlock( + IntArray values, + int positionCount, + int[] firstValueIndexes, + BitSet nulls, + MvOrdering mvOrdering, + BlockFactory blockFactory + ) { + this( + new IntBigArrayVector(values, firstValueIndexes == null ? positionCount : firstValueIndexes[positionCount], blockFactory), + positionCount, + firstValueIndexes, + nulls, + mvOrdering, + blockFactory + ); + } + + private IntBigArrayBlock( + IntBigArrayVector vector, + int positionCount, + int[] firstValueIndexes, + BitSet nulls, + MvOrdering mvOrdering, + BlockFactory blockFactory + ) { + super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); + this.vector = vector; + assert firstValueIndexes == null + ? vector.getPositionCount() == getPositionCount() + : firstValueIndexes[getPositionCount()] == vector.getPositionCount(); + } + + @Override + public IntVector asVector() { + return null; + } + + @Override + public int getInt(int valueIndex) { + return vector.getInt(valueIndex); + } + + @Override + public IntBlock filter(int... positions) { + // TODO use reference counting to share the vector + try (var builder = blockFactory().newIntBlockBuilder(positions.length)) { + for (int pos : positions) { + if (isNull(pos)) { + builder.appendNull(); + continue; + } + int valueCount = getValueCount(pos); + int first = getFirstValueIndex(pos); + if (valueCount == 1) { + builder.appendInt(getInt(getFirstValueIndex(pos))); + } else { + builder.beginPositionEntry(); + for (int c = 0; c < valueCount; c++) { + builder.appendInt(getInt(first + c)); + } + builder.endPositionEntry(); + } + } + return builder.mvOrdering(mvOrdering()).build(); + } + } + + @Override + public ElementType elementType() { + return ElementType.INT; + } + + @Override + public IntBlock expand() { + if (firstValueIndexes == null) { + incRef(); + return this; + } + if (nullsMask == null) { + vector.incRef(); + return vector.asBlock(); + } + + // The following line is correct because positions with multi-values are never null. + int expandedPositionCount = vector.getPositionCount(); + long bitSetRamUsedEstimate = Math.max(nullsMask.size(), BlockRamUsageEstimator.sizeOfBitSet(expandedPositionCount)); + blockFactory().adjustBreaker(bitSetRamUsedEstimate); + + IntBigArrayBlock expanded = new IntBigArrayBlock( + vector, + expandedPositionCount, + null, + shiftNullsToExpandedPositions(), + MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, + blockFactory() + ); + blockFactory().adjustBreaker(expanded.ramBytesUsedOnlyBlock() - bitSetRamUsedEstimate); + // We need to incRef after adjusting any breakers, otherwise we might leak the vector if the breaker trips. + vector.incRef(); + return expanded; + } + + private long ramBytesUsedOnlyBlock() { + return BASE_RAM_BYTES_USED + BlockRamUsageEstimator.sizeOf(firstValueIndexes) + BlockRamUsageEstimator.sizeOfBitSet(nullsMask); + } + + @Override + public long ramBytesUsed() { + return ramBytesUsedOnlyBlock() + RamUsageEstimator.sizeOf(vector); + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof IntBlock that) { + return IntBlock.equals(this, that); + } + return false; + } + + @Override + public int hashCode() { + return IntBlock.hash(this); + } + + @Override + public String toString() { + return getClass().getSimpleName() + + "[positions=" + + getPositionCount() + + ", mvOrdering=" + + mvOrdering() + + ", ramBytesUsed=" + + vector.ramBytesUsed() + + ']'; + } + + @Override + public void allowPassingToDifferentDriver() { + super.allowPassingToDifferentDriver(); + vector.allowPassingToDifferentDriver(); + } + + @Override + public void closeInternal() { + blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock()); + Releasables.closeExpectNoException(vector); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayVector.java index 76d2797f2a64b..b553c8aab8761 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayVector.java @@ -12,30 +12,24 @@ import org.elasticsearch.core.Releasable; /** - * Vector implementation that defers to an enclosed IntArray. + * Vector implementation that defers to an enclosed {@link IntArray}. + * Does not take ownership of the array and does not adjust circuit breakers to account for it. * This class is generated. Do not edit it. */ public final class IntBigArrayVector extends AbstractVector implements IntVector, Releasable { - private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(IntBigArrayVector.class); + private static final long BASE_RAM_BYTES_USED = 0; // FIXME private final IntArray values; - private final IntBlock block; - - public IntBigArrayVector(IntArray values, int positionCount) { - this(values, positionCount, BlockFactory.getNonBreakingInstance()); - } - public IntBigArrayVector(IntArray values, int positionCount, BlockFactory blockFactory) { super(positionCount, blockFactory); this.values = values; - this.block = new IntVectorBlock(this); } @Override public IntBlock asBlock() { - return block; + return new IntVectorBlock(this); } @Override @@ -61,7 +55,7 @@ public long ramBytesUsed() { @Override public IntVector filter(int... positions) { var blockFactory = blockFactory(); - final IntArray filtered = blockFactory.bigArrays().newIntArray(positions.length, true); + final IntArray filtered = blockFactory.bigArrays().newIntArray(positions.length); for (int i = 0; i < positions.length; i++) { filtered.set(i, values.get(positions[i])); } @@ -69,11 +63,9 @@ public IntVector filter(int... positions) { } @Override - public void close() { - if (released) { - throw new IllegalStateException("can't release already released vector [" + this + "]"); - } - released = true; + public void closeInternal() { + // The circuit breaker that tracks the values {@link IntArray} is adjusted outside + // of this class. values.close(); } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java index 3909d2b6761be..9a66445eb55a2 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java @@ -18,7 +18,7 @@ * Block that stores int values. * This class is generated. Do not edit it. */ -public sealed interface IntBlock extends Block permits IntArrayBlock, IntVectorBlock, ConstantNullBlock { +public sealed interface IntBlock extends Block permits IntArrayBlock, IntVectorBlock, ConstantNullBlock, IntBigArrayBlock { /** * Retrieves the int value stored at the given value index. @@ -166,44 +166,6 @@ static int hash(IntBlock block) { return result; } - /** - * Returns a builder using the {@link BlockFactory#getNonBreakingInstance non-breaking block factory}. - * @deprecated use {@link BlockFactory#newIntBlockBuilder} - */ - // Eventually, we want to remove this entirely, always passing an explicit BlockFactory - @Deprecated - static Builder newBlockBuilder(int estimatedSize) { - return newBlockBuilder(estimatedSize, BlockFactory.getNonBreakingInstance()); - } - - /** - * Returns a builder. - * @deprecated use {@link BlockFactory#newIntBlockBuilder} - */ - @Deprecated - static Builder newBlockBuilder(int estimatedSize, BlockFactory blockFactory) { - return blockFactory.newIntBlockBuilder(estimatedSize); - } - - /** - * Returns a constant block built by the {@link BlockFactory#getNonBreakingInstance non-breaking block factory}. - * @deprecated use {@link BlockFactory#newConstantIntBlockWith} - */ - // Eventually, we want to remove this entirely, always passing an explicit BlockFactory - @Deprecated - static IntBlock newConstantBlockWith(int value, int positions) { - return newConstantBlockWith(value, positions, BlockFactory.getNonBreakingInstance()); - } - - /** - * Returns a constant block. - * @deprecated use {@link BlockFactory#newConstantIntBlockWith} - */ - @Deprecated - static IntBlock newConstantBlockWith(int value, int positions, BlockFactory blockFactory) { - return blockFactory.newConstantIntBlockWith(value, positions); - } - /** * Builder for {@link IntBlock} */ diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlockBuilder.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlockBuilder.java index 49c3b156ce44b..d49f5af05a8a7 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlockBuilder.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlockBuilder.java @@ -9,6 +9,7 @@ import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.common.util.IntArray; import java.util.Arrays; @@ -179,6 +180,29 @@ public IntBlockBuilder mvOrdering(Block.MvOrdering mvOrdering) { return this; } + private IntBlock buildBigArraysBlock() { + final IntBlock theBlock; + final IntArray array = blockFactory.bigArrays().newIntArray(valueCount, false); + for (int i = 0; i < valueCount; i++) { + array.set(i, values[i]); + } + if (isDense() && singleValued()) { + theBlock = new IntBigArrayVector(array, positionCount, blockFactory).asBlock(); + } else { + theBlock = new IntBigArrayBlock(array, positionCount, firstValueIndexes, nullsMask, mvOrdering, blockFactory); + } + /* + * Update the breaker with the actual bytes used. + * We pass false below even though we've used the bytes. That's weird, + * but if we break here we will throw away the used memory, letting + * it be deallocated. The exception will bubble up and the builder will + * still technically be open, meaning the calling code should close it + * which will return all used memory to the breaker. + */ + blockFactory.adjustBreaker(theBlock.ramBytesUsed() - estimatedBytes - array.ramBytesUsed()); + return theBlock; + } + @Override public IntBlock build() { try { @@ -187,20 +211,26 @@ public IntBlock build() { if (hasNonNullValue && positionCount == 1 && valueCount == 1) { theBlock = blockFactory.newConstantIntBlockWith(values[0], 1, estimatedBytes); } else { - if (values.length - valueCount > 1024 || valueCount < (values.length / 2)) { - values = Arrays.copyOf(values, valueCount); - } - if (isDense() && singleValued()) { - theBlock = blockFactory.newIntArrayVector(values, positionCount, estimatedBytes).asBlock(); + if (estimatedBytes > blockFactory.maxPrimitiveArrayBytes()) { + theBlock = buildBigArraysBlock(); } else { - theBlock = blockFactory.newIntArrayBlock( - values, - positionCount, - firstValueIndexes, - nullsMask, - mvOrdering, - estimatedBytes - ); + if (values.length - valueCount > 1024 || valueCount < (values.length / 2)) { + adjustBreaker(valueCount * elementSize()); + values = Arrays.copyOf(values, valueCount); + adjustBreaker(-values.length * elementSize()); + } + if (isDense() && singleValued()) { + theBlock = blockFactory.newIntArrayVector(values, positionCount, estimatedBytes).asBlock(); + } else { + theBlock = blockFactory.newIntArrayBlock( + values, + positionCount, + firstValueIndexes, + nullsMask, + mvOrdering, + estimatedBytes + ); + } } } built(); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVector.java index 645288565c431..bc7e3c87ec33d 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVector.java @@ -101,36 +101,6 @@ default void writeTo(StreamOutput out) throws IOException { } } - /** - * Returns a builder using the {@link BlockFactory#getNonBreakingInstance nonbreaking block factory}. - * @deprecated use {@link BlockFactory#newIntVectorBuilder} - */ - // Eventually, we want to remove this entirely, always passing an explicit BlockFactory - @Deprecated - static Builder newVectorBuilder(int estimatedSize) { - return newVectorBuilder(estimatedSize, BlockFactory.getNonBreakingInstance()); - } - - /** - * Creates a builder that grows as needed. Prefer {@link #newVectorFixedBuilder} - * if you know the size up front because it's faster. - * @deprecated use {@link BlockFactory#newIntVectorBuilder} - */ - @Deprecated - static Builder newVectorBuilder(int estimatedSize, BlockFactory blockFactory) { - return blockFactory.newIntVectorBuilder(estimatedSize); - } - - /** - * Creates a builder that never grows. Prefer this over {@link #newVectorBuilder} - * if you know the size up front because it's faster. - * @deprecated use {@link BlockFactory#newIntVectorFixedBuilder} - */ - @Deprecated - static FixedBuilder newVectorFixedBuilder(int size, BlockFactory blockFactory) { - return blockFactory.newIntVectorFixedBuilder(size); - } - /** Create a vector for a range of ints. */ static IntVector range(int startInclusive, int endExclusive, BlockFactory blockFactory) { int[] values = new int[endExclusive - startInclusive]; @@ -143,7 +113,7 @@ static IntVector range(int startInclusive, int endExclusive, BlockFactory blockF /** * A builder that grows as needed. */ - sealed interface Builder extends Vector.Builder permits IntVectorBuilder { + sealed interface Builder extends Vector.Builder permits IntVectorBuilder, FixedBuilder { /** * Appends a int to the current entry. */ @@ -156,13 +126,11 @@ sealed interface Builder extends Vector.Builder permits IntVectorBuilder { /** * A builder that never grows. */ - sealed interface FixedBuilder extends Vector.Builder permits IntVectorFixedBuilder { + sealed interface FixedBuilder extends Builder permits IntVectorFixedBuilder { /** * Appends a int to the current entry. */ - FixedBuilder appendInt(int value); - @Override - IntVector build(); + FixedBuilder appendInt(int value); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java index 028ef35577753..97a4a48533e3a 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java @@ -10,7 +10,7 @@ import org.elasticsearch.core.Releasables; /** - * Block view of a IntVector. + * Block view of a {@link IntVector}. Cannot represent multi-values or nulls. * This class is generated. Do not edit it. */ public final class IntVectorBlock extends AbstractVectorBlock implements IntBlock { @@ -73,11 +73,6 @@ public String toString() { return getClass().getSimpleName() + "[vector=" + vector + "]"; } - @Override - public boolean isReleased() { - return super.isReleased() || vector.isReleased(); - } - @Override public void closeInternal() { assert (vector.isReleased() == false) : "can't release block [" + this + "] containing already released vector"; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorFixedBuilder.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorFixedBuilder.java index a4755addf0b16..b143e9d592dc6 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorFixedBuilder.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorFixedBuilder.java @@ -27,7 +27,7 @@ final class IntVectorFixedBuilder implements IntVector.FixedBuilder { IntVectorFixedBuilder(int size, BlockFactory blockFactory) { preAdjustedBytes = ramBytesUsed(size); - blockFactory.adjustBreaker(preAdjustedBytes, false); + blockFactory.adjustBreaker(preAdjustedBytes); this.blockFactory = blockFactory; this.values = new int[size]; } @@ -70,7 +70,7 @@ public void close() { if (nextIndex >= 0) { // If nextIndex < 0 we've already built the vector nextIndex = -1; - blockFactory.adjustBreaker(-preAdjustedBytes, false); + blockFactory.adjustBreaker(-preAdjustedBytes); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java index 9e0fa9bcc2993..c12033e829e6f 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java @@ -8,26 +8,40 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.core.Releasables; -import java.util.Arrays; import java.util.BitSet; /** - * Block implementation that stores an array of long. + * Block implementation that stores values in a {@link LongArrayVector}. * This class is generated. Do not edit it. */ -public final class LongArrayBlock extends AbstractArrayBlock implements LongBlock { +final class LongArrayBlock extends AbstractArrayBlock implements LongBlock { private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(LongArrayBlock.class); - private final long[] values; + private final LongArrayVector vector; - public LongArrayBlock(long[] values, int positionCount, int[] firstValueIndexes, BitSet nulls, MvOrdering mvOrdering) { - this(values, positionCount, firstValueIndexes, nulls, mvOrdering, BlockFactory.getNonBreakingInstance()); + LongArrayBlock( + long[] values, + int positionCount, + int[] firstValueIndexes, + BitSet nulls, + MvOrdering mvOrdering, + BlockFactory blockFactory + ) { + this( + new LongArrayVector(values, firstValueIndexes == null ? positionCount : firstValueIndexes[positionCount], blockFactory), + positionCount, + firstValueIndexes, + nulls, + mvOrdering, + blockFactory + ); } - public LongArrayBlock( - long[] values, + private LongArrayBlock( + LongArrayVector vector, int positionCount, int[] firstValueIndexes, BitSet nulls, @@ -35,7 +49,10 @@ public LongArrayBlock( BlockFactory blockFactory ) { super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); - this.values = values; + this.vector = vector; + assert firstValueIndexes == null + ? vector.getPositionCount() == getPositionCount() + : firstValueIndexes[getPositionCount()] == vector.getPositionCount(); } @Override @@ -45,11 +62,12 @@ public LongVector asVector() { @Override public long getLong(int valueIndex) { - return values[valueIndex]; + return vector.getLong(valueIndex); } @Override public LongBlock filter(int... positions) { + // TODO use reference counting to share the vector try (var builder = blockFactory().newLongBlockBuilder(positions.length)) { for (int pos : positions) { if (isNull(pos)) { @@ -83,31 +101,37 @@ public LongBlock expand() { incRef(); return this; } - // TODO use reference counting to share the values - try (var builder = blockFactory().newLongBlockBuilder(firstValueIndexes[getPositionCount()])) { - for (int pos = 0; pos < getPositionCount(); pos++) { - if (isNull(pos)) { - builder.appendNull(); - continue; - } - int first = getFirstValueIndex(pos); - int end = first + getValueCount(pos); - for (int i = first; i < end; i++) { - builder.appendLong(getLong(i)); - } - } - return builder.mvOrdering(MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING).build(); + if (nullsMask == null) { + vector.incRef(); + return vector.asBlock(); } + + // The following line is correct because positions with multi-values are never null. + int expandedPositionCount = vector.getPositionCount(); + long bitSetRamUsedEstimate = Math.max(nullsMask.size(), BlockRamUsageEstimator.sizeOfBitSet(expandedPositionCount)); + blockFactory().adjustBreaker(bitSetRamUsedEstimate); + + LongArrayBlock expanded = new LongArrayBlock( + vector, + expandedPositionCount, + null, + shiftNullsToExpandedPositions(), + MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, + blockFactory() + ); + blockFactory().adjustBreaker(expanded.ramBytesUsedOnlyBlock() - bitSetRamUsedEstimate); + // We need to incRef after adjusting any breakers, otherwise we might leak the vector if the breaker trips. + vector.incRef(); + return expanded; } - public static long ramBytesEstimated(long[] values, int[] firstValueIndexes, BitSet nullsMask) { - return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(values) + BlockRamUsageEstimator.sizeOf(firstValueIndexes) - + BlockRamUsageEstimator.sizeOfBitSet(nullsMask); + private long ramBytesUsedOnlyBlock() { + return BASE_RAM_BYTES_USED + BlockRamUsageEstimator.sizeOf(firstValueIndexes) + BlockRamUsageEstimator.sizeOfBitSet(nullsMask); } @Override public long ramBytesUsed() { - return ramBytesEstimated(values, firstValueIndexes, nullsMask); + return ramBytesUsedOnlyBlock() + vector.ramBytesUsed(); } @Override @@ -130,13 +154,20 @@ public String toString() { + getPositionCount() + ", mvOrdering=" + mvOrdering() - + ", values=" - + Arrays.toString(values) + + ", vector=" + + vector + ']'; } + @Override + public void allowPassingToDifferentDriver() { + super.allowPassingToDifferentDriver(); + vector.allowPassingToDifferentDriver(); + } + @Override public void closeInternal() { - blockFactory().adjustBreaker(-ramBytesUsed(), true); + blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock()); + Releasables.closeExpectNoException(vector); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayVector.java index 0a3ada321d94c..3c5f6b7448321 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayVector.java @@ -15,27 +15,20 @@ * Vector implementation that stores an array of long values. * This class is generated. Do not edit it. */ -public final class LongArrayVector extends AbstractVector implements LongVector { +final class LongArrayVector extends AbstractVector implements LongVector { static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(LongArrayVector.class); private final long[] values; - private final LongBlock block; - - public LongArrayVector(long[] values, int positionCount) { - this(values, positionCount, BlockFactory.getNonBreakingInstance()); - } - - public LongArrayVector(long[] values, int positionCount, BlockFactory blockFactory) { + LongArrayVector(long[] values, int positionCount, BlockFactory blockFactory) { super(positionCount, blockFactory); this.values = values; - this.block = new LongVectorBlock(this); } @Override public LongBlock asBlock() { - return block; + return new LongVectorBlock(this); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayBlock.java new file mode 100644 index 0000000000000..9eb8a527a96b5 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayBlock.java @@ -0,0 +1,174 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data; + +import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.util.LongArray; +import org.elasticsearch.core.Releasables; + +import java.util.BitSet; + +/** + * Block implementation that stores values in a {@link LongBigArrayVector}. Does not take ownership of the given + * {@link LongArray} and does not adjust circuit breakers to account for it. + * This class is generated. Do not edit it. + */ +public final class LongBigArrayBlock extends AbstractArrayBlock implements LongBlock { + + private static final long BASE_RAM_BYTES_USED = 0; // TODO: fix this + private final LongBigArrayVector vector; + + public LongBigArrayBlock( + LongArray values, + int positionCount, + int[] firstValueIndexes, + BitSet nulls, + MvOrdering mvOrdering, + BlockFactory blockFactory + ) { + this( + new LongBigArrayVector(values, firstValueIndexes == null ? positionCount : firstValueIndexes[positionCount], blockFactory), + positionCount, + firstValueIndexes, + nulls, + mvOrdering, + blockFactory + ); + } + + private LongBigArrayBlock( + LongBigArrayVector vector, + int positionCount, + int[] firstValueIndexes, + BitSet nulls, + MvOrdering mvOrdering, + BlockFactory blockFactory + ) { + super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); + this.vector = vector; + assert firstValueIndexes == null + ? vector.getPositionCount() == getPositionCount() + : firstValueIndexes[getPositionCount()] == vector.getPositionCount(); + } + + @Override + public LongVector asVector() { + return null; + } + + @Override + public long getLong(int valueIndex) { + return vector.getLong(valueIndex); + } + + @Override + public LongBlock filter(int... positions) { + // TODO use reference counting to share the vector + try (var builder = blockFactory().newLongBlockBuilder(positions.length)) { + for (int pos : positions) { + if (isNull(pos)) { + builder.appendNull(); + continue; + } + int valueCount = getValueCount(pos); + int first = getFirstValueIndex(pos); + if (valueCount == 1) { + builder.appendLong(getLong(getFirstValueIndex(pos))); + } else { + builder.beginPositionEntry(); + for (int c = 0; c < valueCount; c++) { + builder.appendLong(getLong(first + c)); + } + builder.endPositionEntry(); + } + } + return builder.mvOrdering(mvOrdering()).build(); + } + } + + @Override + public ElementType elementType() { + return ElementType.LONG; + } + + @Override + public LongBlock expand() { + if (firstValueIndexes == null) { + incRef(); + return this; + } + if (nullsMask == null) { + vector.incRef(); + return vector.asBlock(); + } + + // The following line is correct because positions with multi-values are never null. + int expandedPositionCount = vector.getPositionCount(); + long bitSetRamUsedEstimate = Math.max(nullsMask.size(), BlockRamUsageEstimator.sizeOfBitSet(expandedPositionCount)); + blockFactory().adjustBreaker(bitSetRamUsedEstimate); + + LongBigArrayBlock expanded = new LongBigArrayBlock( + vector, + expandedPositionCount, + null, + shiftNullsToExpandedPositions(), + MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, + blockFactory() + ); + blockFactory().adjustBreaker(expanded.ramBytesUsedOnlyBlock() - bitSetRamUsedEstimate); + // We need to incRef after adjusting any breakers, otherwise we might leak the vector if the breaker trips. + vector.incRef(); + return expanded; + } + + private long ramBytesUsedOnlyBlock() { + return BASE_RAM_BYTES_USED + BlockRamUsageEstimator.sizeOf(firstValueIndexes) + BlockRamUsageEstimator.sizeOfBitSet(nullsMask); + } + + @Override + public long ramBytesUsed() { + return ramBytesUsedOnlyBlock() + RamUsageEstimator.sizeOf(vector); + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof LongBlock that) { + return LongBlock.equals(this, that); + } + return false; + } + + @Override + public int hashCode() { + return LongBlock.hash(this); + } + + @Override + public String toString() { + return getClass().getSimpleName() + + "[positions=" + + getPositionCount() + + ", mvOrdering=" + + mvOrdering() + + ", ramBytesUsed=" + + vector.ramBytesUsed() + + ']'; + } + + @Override + public void allowPassingToDifferentDriver() { + super.allowPassingToDifferentDriver(); + vector.allowPassingToDifferentDriver(); + } + + @Override + public void closeInternal() { + blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock()); + Releasables.closeExpectNoException(vector); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayVector.java index 2101b606e9a90..d5ea5c9e2a453 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayVector.java @@ -12,30 +12,24 @@ import org.elasticsearch.core.Releasable; /** - * Vector implementation that defers to an enclosed LongArray. + * Vector implementation that defers to an enclosed {@link LongArray}. + * Does not take ownership of the array and does not adjust circuit breakers to account for it. * This class is generated. Do not edit it. */ public final class LongBigArrayVector extends AbstractVector implements LongVector, Releasable { - private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(LongBigArrayVector.class); + private static final long BASE_RAM_BYTES_USED = 0; // FIXME private final LongArray values; - private final LongBlock block; - - public LongBigArrayVector(LongArray values, int positionCount) { - this(values, positionCount, BlockFactory.getNonBreakingInstance()); - } - public LongBigArrayVector(LongArray values, int positionCount, BlockFactory blockFactory) { super(positionCount, blockFactory); this.values = values; - this.block = new LongVectorBlock(this); } @Override public LongBlock asBlock() { - return block; + return new LongVectorBlock(this); } @Override @@ -61,7 +55,7 @@ public long ramBytesUsed() { @Override public LongVector filter(int... positions) { var blockFactory = blockFactory(); - final LongArray filtered = blockFactory.bigArrays().newLongArray(positions.length, true); + final LongArray filtered = blockFactory.bigArrays().newLongArray(positions.length); for (int i = 0; i < positions.length; i++) { filtered.set(i, values.get(positions[i])); } @@ -69,11 +63,9 @@ public LongVector filter(int... positions) { } @Override - public void close() { - if (released) { - throw new IllegalStateException("can't release already released vector [" + this + "]"); - } - released = true; + public void closeInternal() { + // The circuit breaker that tracks the values {@link LongArray} is adjusted outside + // of this class. values.close(); } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java index 41ac8f7237f64..5e5dc0606b896 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java @@ -18,7 +18,7 @@ * Block that stores long values. * This class is generated. Do not edit it. */ -public sealed interface LongBlock extends Block permits LongArrayBlock, LongVectorBlock, ConstantNullBlock { +public sealed interface LongBlock extends Block permits LongArrayBlock, LongVectorBlock, ConstantNullBlock, LongBigArrayBlock { /** * Retrieves the long value stored at the given value index. @@ -167,44 +167,6 @@ static int hash(LongBlock block) { return result; } - /** - * Returns a builder using the {@link BlockFactory#getNonBreakingInstance non-breaking block factory}. - * @deprecated use {@link BlockFactory#newLongBlockBuilder} - */ - // Eventually, we want to remove this entirely, always passing an explicit BlockFactory - @Deprecated - static Builder newBlockBuilder(int estimatedSize) { - return newBlockBuilder(estimatedSize, BlockFactory.getNonBreakingInstance()); - } - - /** - * Returns a builder. - * @deprecated use {@link BlockFactory#newLongBlockBuilder} - */ - @Deprecated - static Builder newBlockBuilder(int estimatedSize, BlockFactory blockFactory) { - return blockFactory.newLongBlockBuilder(estimatedSize); - } - - /** - * Returns a constant block built by the {@link BlockFactory#getNonBreakingInstance non-breaking block factory}. - * @deprecated use {@link BlockFactory#newConstantLongBlockWith} - */ - // Eventually, we want to remove this entirely, always passing an explicit BlockFactory - @Deprecated - static LongBlock newConstantBlockWith(long value, int positions) { - return newConstantBlockWith(value, positions, BlockFactory.getNonBreakingInstance()); - } - - /** - * Returns a constant block. - * @deprecated use {@link BlockFactory#newConstantLongBlockWith} - */ - @Deprecated - static LongBlock newConstantBlockWith(long value, int positions, BlockFactory blockFactory) { - return blockFactory.newConstantLongBlockWith(value, positions); - } - /** * Builder for {@link LongBlock} */ diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlockBuilder.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlockBuilder.java index 1692c4cff6a57..b74831599276b 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlockBuilder.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlockBuilder.java @@ -9,6 +9,7 @@ import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.common.util.LongArray; import java.util.Arrays; @@ -179,6 +180,29 @@ public LongBlockBuilder mvOrdering(Block.MvOrdering mvOrdering) { return this; } + private LongBlock buildBigArraysBlock() { + final LongBlock theBlock; + final LongArray array = blockFactory.bigArrays().newLongArray(valueCount, false); + for (int i = 0; i < valueCount; i++) { + array.set(i, values[i]); + } + if (isDense() && singleValued()) { + theBlock = new LongBigArrayVector(array, positionCount, blockFactory).asBlock(); + } else { + theBlock = new LongBigArrayBlock(array, positionCount, firstValueIndexes, nullsMask, mvOrdering, blockFactory); + } + /* + * Update the breaker with the actual bytes used. + * We pass false below even though we've used the bytes. That's weird, + * but if we break here we will throw away the used memory, letting + * it be deallocated. The exception will bubble up and the builder will + * still technically be open, meaning the calling code should close it + * which will return all used memory to the breaker. + */ + blockFactory.adjustBreaker(theBlock.ramBytesUsed() - estimatedBytes - array.ramBytesUsed()); + return theBlock; + } + @Override public LongBlock build() { try { @@ -187,20 +211,26 @@ public LongBlock build() { if (hasNonNullValue && positionCount == 1 && valueCount == 1) { theBlock = blockFactory.newConstantLongBlockWith(values[0], 1, estimatedBytes); } else { - if (values.length - valueCount > 1024 || valueCount < (values.length / 2)) { - values = Arrays.copyOf(values, valueCount); - } - if (isDense() && singleValued()) { - theBlock = blockFactory.newLongArrayVector(values, positionCount, estimatedBytes).asBlock(); + if (estimatedBytes > blockFactory.maxPrimitiveArrayBytes()) { + theBlock = buildBigArraysBlock(); } else { - theBlock = blockFactory.newLongArrayBlock( - values, - positionCount, - firstValueIndexes, - nullsMask, - mvOrdering, - estimatedBytes - ); + if (values.length - valueCount > 1024 || valueCount < (values.length / 2)) { + adjustBreaker(valueCount * elementSize()); + values = Arrays.copyOf(values, valueCount); + adjustBreaker(-values.length * elementSize()); + } + if (isDense() && singleValued()) { + theBlock = blockFactory.newLongArrayVector(values, positionCount, estimatedBytes).asBlock(); + } else { + theBlock = blockFactory.newLongArrayBlock( + values, + positionCount, + firstValueIndexes, + nullsMask, + mvOrdering, + estimatedBytes + ); + } } } built(); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVector.java index a312d7aeab0cc..358f5b32366cb 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVector.java @@ -102,40 +102,10 @@ default void writeTo(StreamOutput out) throws IOException { } } - /** - * Returns a builder using the {@link BlockFactory#getNonBreakingInstance nonbreaking block factory}. - * @deprecated use {@link BlockFactory#newLongVectorBuilder} - */ - // Eventually, we want to remove this entirely, always passing an explicit BlockFactory - @Deprecated - static Builder newVectorBuilder(int estimatedSize) { - return newVectorBuilder(estimatedSize, BlockFactory.getNonBreakingInstance()); - } - - /** - * Creates a builder that grows as needed. Prefer {@link #newVectorFixedBuilder} - * if you know the size up front because it's faster. - * @deprecated use {@link BlockFactory#newLongVectorBuilder} - */ - @Deprecated - static Builder newVectorBuilder(int estimatedSize, BlockFactory blockFactory) { - return blockFactory.newLongVectorBuilder(estimatedSize); - } - - /** - * Creates a builder that never grows. Prefer this over {@link #newVectorBuilder} - * if you know the size up front because it's faster. - * @deprecated use {@link BlockFactory#newLongVectorFixedBuilder} - */ - @Deprecated - static FixedBuilder newVectorFixedBuilder(int size, BlockFactory blockFactory) { - return blockFactory.newLongVectorFixedBuilder(size); - } - /** * A builder that grows as needed. */ - sealed interface Builder extends Vector.Builder permits LongVectorBuilder { + sealed interface Builder extends Vector.Builder permits LongVectorBuilder, FixedBuilder { /** * Appends a long to the current entry. */ @@ -148,13 +118,11 @@ sealed interface Builder extends Vector.Builder permits LongVectorBuilder { /** * A builder that never grows. */ - sealed interface FixedBuilder extends Vector.Builder permits LongVectorFixedBuilder { + sealed interface FixedBuilder extends Builder permits LongVectorFixedBuilder { /** * Appends a long to the current entry. */ - FixedBuilder appendLong(long value); - @Override - LongVector build(); + FixedBuilder appendLong(long value); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java index 589a9341188fc..1f4565fec5a8d 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java @@ -10,7 +10,7 @@ import org.elasticsearch.core.Releasables; /** - * Block view of a LongVector. + * Block view of a {@link LongVector}. Cannot represent multi-values or nulls. * This class is generated. Do not edit it. */ public final class LongVectorBlock extends AbstractVectorBlock implements LongBlock { @@ -73,11 +73,6 @@ public String toString() { return getClass().getSimpleName() + "[vector=" + vector + "]"; } - @Override - public boolean isReleased() { - return super.isReleased() || vector.isReleased(); - } - @Override public void closeInternal() { assert (vector.isReleased() == false) : "can't release block [" + this + "] containing already released vector"; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorFixedBuilder.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorFixedBuilder.java index 4a11012e769d8..ccf87da153667 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorFixedBuilder.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorFixedBuilder.java @@ -27,7 +27,7 @@ final class LongVectorFixedBuilder implements LongVector.FixedBuilder { LongVectorFixedBuilder(int size, BlockFactory blockFactory) { preAdjustedBytes = ramBytesUsed(size); - blockFactory.adjustBreaker(preAdjustedBytes, false); + blockFactory.adjustBreaker(preAdjustedBytes); this.blockFactory = blockFactory; this.values = new long[size]; } @@ -70,7 +70,7 @@ public void close() { if (nextIndex >= 0) { // If nextIndex < 0 we've already built the vector nextIndex = -1; - blockFactory.adjustBreaker(-preAdjustedBytes, false); + blockFactory.adjustBreaker(-preAdjustedBytes); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeBytesRef.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeBytesRef.java index 1fd4c1ea3562d..89388cd9cc109 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeBytesRef.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeBytesRef.java @@ -49,7 +49,7 @@ public BytesRefBlock dedupeToBlockAdaptive(BlockFactory blockFactory) { block.incRef(); return block; } - try (BytesRefBlock.Builder builder = BytesRefBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (BytesRefBlock.Builder builder = blockFactory.newBytesRefBlockBuilder(block.getPositionCount())) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -99,7 +99,7 @@ public BytesRefBlock dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { block.incRef(); return block; } - try (BytesRefBlock.Builder builder = BytesRefBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (BytesRefBlock.Builder builder = blockFactory.newBytesRefBlockBuilder(block.getPositionCount())) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -129,7 +129,7 @@ public BytesRefBlock dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { block.incRef(); return block; } - try (BytesRefBlock.Builder builder = BytesRefBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (BytesRefBlock.Builder builder = blockFactory.newBytesRefBlockBuilder(block.getPositionCount())) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeDouble.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeDouble.java index 157b6670e95af..6066dbe8a74e0 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeDouble.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeDouble.java @@ -46,7 +46,7 @@ public DoubleBlock dedupeToBlockAdaptive(BlockFactory blockFactory) { block.incRef(); return block; } - try (DoubleBlock.Builder builder = DoubleBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (DoubleBlock.Builder builder = blockFactory.newDoubleBlockBuilder(block.getPositionCount())) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -96,7 +96,7 @@ public DoubleBlock dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { block.incRef(); return block; } - try (DoubleBlock.Builder builder = DoubleBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (DoubleBlock.Builder builder = blockFactory.newDoubleBlockBuilder(block.getPositionCount())) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -126,7 +126,7 @@ public DoubleBlock dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { block.incRef(); return block; } - try (DoubleBlock.Builder builder = DoubleBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (DoubleBlock.Builder builder = blockFactory.newDoubleBlockBuilder(block.getPositionCount())) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeInt.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeInt.java index 7bc9d77d3f877..3961208d5e46f 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeInt.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeInt.java @@ -28,6 +28,7 @@ public class MultivalueDedupeInt { * The choice of number has been experimentally derived. */ private static final int ALWAYS_COPY_MISSING = 300; + private final IntBlock block; private int[] work = new int[ArrayUtil.oversize(2, Integer.BYTES)]; private int w; @@ -45,7 +46,7 @@ public IntBlock dedupeToBlockAdaptive(BlockFactory blockFactory) { block.incRef(); return block; } - try (IntBlock.Builder builder = IntBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (IntBlock.Builder builder = blockFactory.newIntBlockBuilder(block.getPositionCount())) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -95,7 +96,7 @@ public IntBlock dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { block.incRef(); return block; } - try (IntBlock.Builder builder = IntBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (IntBlock.Builder builder = blockFactory.newIntBlockBuilder(block.getPositionCount())) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -125,7 +126,7 @@ public IntBlock dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { block.incRef(); return block; } - try (IntBlock.Builder builder = IntBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (IntBlock.Builder builder = blockFactory.newIntBlockBuilder(block.getPositionCount())) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeLong.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeLong.java index acbc9139a75c5..a3012ffa551b2 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeLong.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeLong.java @@ -47,7 +47,7 @@ public LongBlock dedupeToBlockAdaptive(BlockFactory blockFactory) { block.incRef(); return block; } - try (LongBlock.Builder builder = LongBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (LongBlock.Builder builder = blockFactory.newLongBlockBuilder(block.getPositionCount())) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -97,7 +97,7 @@ public LongBlock dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { block.incRef(); return block; } - try (LongBlock.Builder builder = LongBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (LongBlock.Builder builder = blockFactory.newLongBlockBuilder(block.getPositionCount())) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -127,7 +127,7 @@ public LongBlock dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { block.incRef(); return block; } - try (LongBlock.Builder builder = LongBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (LongBlock.Builder builder = blockFactory.newLongBlockBuilder(block.getPositionCount())) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForBoolean.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForBoolean.java index 3d568adc2b5ea..184ef69f00d85 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForBoolean.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForBoolean.java @@ -24,7 +24,7 @@ class ResultBuilderForBoolean implements ResultBuilder { ResultBuilderForBoolean(BlockFactory blockFactory, TopNEncoder encoder, boolean inKey, int initialSize) { assert encoder == TopNEncoder.DEFAULT_UNSORTABLE : encoder.toString(); this.inKey = inKey; - this.builder = BooleanBlock.newBlockBuilder(initialSize, blockFactory); + this.builder = blockFactory.newBooleanBlockBuilder(initialSize); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForBytesRef.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForBytesRef.java index e37f82f3363a9..4008f7fbd924b 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForBytesRef.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForBytesRef.java @@ -28,7 +28,7 @@ class ResultBuilderForBytesRef implements ResultBuilder { ResultBuilderForBytesRef(BlockFactory blockFactory, TopNEncoder encoder, boolean inKey, int initialSize) { this.encoder = encoder; this.inKey = inKey; - this.builder = BytesRefBlock.newBlockBuilder(initialSize, blockFactory); + this.builder = blockFactory.newBytesRefBlockBuilder(initialSize); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForDouble.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForDouble.java index 77c976c6e0085..f06a1e814ef43 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForDouble.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForDouble.java @@ -24,7 +24,7 @@ class ResultBuilderForDouble implements ResultBuilder { ResultBuilderForDouble(BlockFactory blockFactory, TopNEncoder encoder, boolean inKey, int initialSize) { assert encoder == TopNEncoder.DEFAULT_UNSORTABLE : encoder.toString(); this.inKey = inKey; - this.builder = DoubleBlock.newBlockBuilder(initialSize, blockFactory); + this.builder = blockFactory.newDoubleBlockBuilder(initialSize); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForInt.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForInt.java index 389ed3bc2e3c3..848bbf9ab6a0a 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForInt.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForInt.java @@ -24,7 +24,7 @@ class ResultBuilderForInt implements ResultBuilder { ResultBuilderForInt(BlockFactory blockFactory, TopNEncoder encoder, boolean inKey, int initialSize) { assert encoder == TopNEncoder.DEFAULT_UNSORTABLE : encoder.toString(); this.inKey = inKey; - this.builder = IntBlock.newBlockBuilder(initialSize, blockFactory); + this.builder = blockFactory.newIntBlockBuilder(initialSize); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForLong.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForLong.java index 63ee9d35c59e5..b4361ad83180a 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForLong.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForLong.java @@ -24,7 +24,7 @@ class ResultBuilderForLong implements ResultBuilder { ResultBuilderForLong(BlockFactory blockFactory, TopNEncoder encoder, boolean inKey, int initialSize) { assert encoder == TopNEncoder.DEFAULT_UNSORTABLE : encoder.toString(); this.inKey = inKey; - this.builder = LongBlock.newBlockBuilder(initialSize, blockFactory); + this.builder = blockFactory.newLongBlockBuilder(initialSize); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBooleanAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBooleanAggregatorFunction.java index dd5450d3b460c..e9b4498d50265 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBooleanAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBooleanAggregatorFunction.java @@ -86,14 +86,18 @@ private void addRawBlock(BooleanBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block uncastBlock = page.getBlock(channels.get(0)); - if (uncastBlock.areAllValuesNull()) { + Block fbitUncast = page.getBlock(channels.get(0)); + if (fbitUncast.areAllValuesNull()) { return; } - BooleanVector fbit = page.getBlock(channels.get(0)).asVector(); - BooleanVector tbit = page.getBlock(channels.get(1)).asVector(); + BooleanVector fbit = ((BooleanBlock) fbitUncast).asVector(); assert fbit.getPositionCount() == 1; - assert fbit.getPositionCount() == tbit.getPositionCount(); + Block tbitUncast = page.getBlock(channels.get(1)); + if (tbitUncast.areAllValuesNull()) { + return; + } + BooleanVector tbit = ((BooleanBlock) tbitUncast).asVector(); + assert tbit.getPositionCount() == 1; CountDistinctBooleanAggregator.combineIntermediate(state, fbit.getBoolean(0), tbit.getBoolean(0)); } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBooleanAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBooleanAggregatorFunctionSupplier.java index 87eb2b97974da..9512f4e76c49c 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBooleanAggregatorFunctionSupplier.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBooleanAggregatorFunctionSupplier.java @@ -8,7 +8,6 @@ import java.lang.Override; import java.lang.String; import java.util.List; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.operator.DriverContext; /** @@ -16,13 +15,9 @@ * This class is generated. Do not edit it. */ public final class CountDistinctBooleanAggregatorFunctionSupplier implements AggregatorFunctionSupplier { - private final BigArrays bigArrays; - private final List channels; - public CountDistinctBooleanAggregatorFunctionSupplier(BigArrays bigArrays, - List channels) { - this.bigArrays = bigArrays; + public CountDistinctBooleanAggregatorFunctionSupplier(List channels) { this.channels = channels; } @@ -34,7 +29,7 @@ public CountDistinctBooleanAggregatorFunction aggregator(DriverContext driverCon @Override public CountDistinctBooleanGroupingAggregatorFunction groupingAggregator( DriverContext driverContext) { - return CountDistinctBooleanGroupingAggregatorFunction.create(channels, driverContext, bigArrays); + return CountDistinctBooleanGroupingAggregatorFunction.create(channels, driverContext); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBooleanGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBooleanGroupingAggregatorFunction.java index adc09f8bba828..eb618f4569ce7 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBooleanGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBooleanGroupingAggregatorFunction.java @@ -9,7 +9,6 @@ import java.lang.String; import java.lang.StringBuilder; import java.util.List; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.BooleanVector; @@ -34,20 +33,16 @@ public final class CountDistinctBooleanGroupingAggregatorFunction implements Gro private final DriverContext driverContext; - private final BigArrays bigArrays; - public CountDistinctBooleanGroupingAggregatorFunction(List channels, - CountDistinctBooleanAggregator.GroupingState state, DriverContext driverContext, - BigArrays bigArrays) { + CountDistinctBooleanAggregator.GroupingState state, DriverContext driverContext) { this.channels = channels; this.state = state; this.driverContext = driverContext; - this.bigArrays = bigArrays; } public static CountDistinctBooleanGroupingAggregatorFunction create(List channels, - DriverContext driverContext, BigArrays bigArrays) { - return new CountDistinctBooleanGroupingAggregatorFunction(channels, CountDistinctBooleanAggregator.initGrouping(bigArrays), driverContext, bigArrays); + DriverContext driverContext) { + return new CountDistinctBooleanGroupingAggregatorFunction(channels, CountDistinctBooleanAggregator.initGrouping(driverContext.bigArrays()), driverContext); } public static List intermediateStateDesc() { diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBytesRefAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBytesRefAggregatorFunction.java index fd770678d5943..83917ba218285 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBytesRefAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBytesRefAggregatorFunction.java @@ -10,7 +10,6 @@ import java.lang.StringBuilder; import java.util.List; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.BytesRefVector; @@ -32,22 +31,19 @@ public final class CountDistinctBytesRefAggregatorFunction implements Aggregator private final List channels; - private final BigArrays bigArrays; - private final int precision; public CountDistinctBytesRefAggregatorFunction(DriverContext driverContext, - List channels, HllStates.SingleState state, BigArrays bigArrays, int precision) { + List channels, HllStates.SingleState state, int precision) { this.driverContext = driverContext; this.channels = channels; this.state = state; - this.bigArrays = bigArrays; this.precision = precision; } public static CountDistinctBytesRefAggregatorFunction create(DriverContext driverContext, - List channels, BigArrays bigArrays, int precision) { - return new CountDistinctBytesRefAggregatorFunction(driverContext, channels, CountDistinctBytesRefAggregator.initSingle(bigArrays, precision), bigArrays, precision); + List channels, int precision) { + return new CountDistinctBytesRefAggregatorFunction(driverContext, channels, CountDistinctBytesRefAggregator.initSingle(driverContext.bigArrays(), precision), precision); } public static List intermediateStateDesc() { @@ -95,11 +91,11 @@ private void addRawBlock(BytesRefBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block uncastBlock = page.getBlock(channels.get(0)); - if (uncastBlock.areAllValuesNull()) { + Block hllUncast = page.getBlock(channels.get(0)); + if (hllUncast.areAllValuesNull()) { return; } - BytesRefVector hll = page.getBlock(channels.get(0)).asVector(); + BytesRefVector hll = ((BytesRefBlock) hllUncast).asVector(); assert hll.getPositionCount() == 1; BytesRef scratch = new BytesRef(); CountDistinctBytesRefAggregator.combineIntermediate(state, hll.getBytesRef(0, scratch)); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBytesRefAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBytesRefAggregatorFunctionSupplier.java index 3f336519ac69f..b05c529c2ce9b 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBytesRefAggregatorFunctionSupplier.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBytesRefAggregatorFunctionSupplier.java @@ -8,7 +8,6 @@ import java.lang.Override; import java.lang.String; import java.util.List; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.operator.DriverContext; /** @@ -16,28 +15,24 @@ * This class is generated. Do not edit it. */ public final class CountDistinctBytesRefAggregatorFunctionSupplier implements AggregatorFunctionSupplier { - private final BigArrays bigArrays; - private final List channels; private final int precision; - public CountDistinctBytesRefAggregatorFunctionSupplier(BigArrays bigArrays, - List channels, int precision) { - this.bigArrays = bigArrays; + public CountDistinctBytesRefAggregatorFunctionSupplier(List channels, int precision) { this.channels = channels; this.precision = precision; } @Override public CountDistinctBytesRefAggregatorFunction aggregator(DriverContext driverContext) { - return CountDistinctBytesRefAggregatorFunction.create(driverContext, channels, bigArrays, precision); + return CountDistinctBytesRefAggregatorFunction.create(driverContext, channels, precision); } @Override public CountDistinctBytesRefGroupingAggregatorFunction groupingAggregator( DriverContext driverContext) { - return CountDistinctBytesRefGroupingAggregatorFunction.create(channels, driverContext, bigArrays, precision); + return CountDistinctBytesRefGroupingAggregatorFunction.create(channels, driverContext, precision); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBytesRefGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBytesRefGroupingAggregatorFunction.java index 46cfbcc99a373..ba2eaf66bf2af 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBytesRefGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBytesRefGroupingAggregatorFunction.java @@ -10,7 +10,6 @@ import java.lang.StringBuilder; import java.util.List; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.BytesRefVector; @@ -34,23 +33,19 @@ public final class CountDistinctBytesRefGroupingAggregatorFunction implements Gr private final DriverContext driverContext; - private final BigArrays bigArrays; - private final int precision; public CountDistinctBytesRefGroupingAggregatorFunction(List channels, - HllStates.GroupingState state, DriverContext driverContext, BigArrays bigArrays, - int precision) { + HllStates.GroupingState state, DriverContext driverContext, int precision) { this.channels = channels; this.state = state; this.driverContext = driverContext; - this.bigArrays = bigArrays; this.precision = precision; } public static CountDistinctBytesRefGroupingAggregatorFunction create(List channels, - DriverContext driverContext, BigArrays bigArrays, int precision) { - return new CountDistinctBytesRefGroupingAggregatorFunction(channels, CountDistinctBytesRefAggregator.initGrouping(bigArrays, precision), driverContext, bigArrays, precision); + DriverContext driverContext, int precision) { + return new CountDistinctBytesRefGroupingAggregatorFunction(channels, CountDistinctBytesRefAggregator.initGrouping(driverContext.bigArrays(), precision), driverContext, precision); } public static List intermediateStateDesc() { diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctDoubleAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctDoubleAggregatorFunction.java index a8169b5a901e1..c720df313bd99 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctDoubleAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctDoubleAggregatorFunction.java @@ -10,7 +10,6 @@ import java.lang.StringBuilder; import java.util.List; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.BytesRefVector; @@ -34,22 +33,19 @@ public final class CountDistinctDoubleAggregatorFunction implements AggregatorFu private final List channels; - private final BigArrays bigArrays; - private final int precision; public CountDistinctDoubleAggregatorFunction(DriverContext driverContext, List channels, - HllStates.SingleState state, BigArrays bigArrays, int precision) { + HllStates.SingleState state, int precision) { this.driverContext = driverContext; this.channels = channels; this.state = state; - this.bigArrays = bigArrays; this.precision = precision; } public static CountDistinctDoubleAggregatorFunction create(DriverContext driverContext, - List channels, BigArrays bigArrays, int precision) { - return new CountDistinctDoubleAggregatorFunction(driverContext, channels, CountDistinctDoubleAggregator.initSingle(bigArrays, precision), bigArrays, precision); + List channels, int precision) { + return new CountDistinctDoubleAggregatorFunction(driverContext, channels, CountDistinctDoubleAggregator.initSingle(driverContext.bigArrays(), precision), precision); } public static List intermediateStateDesc() { @@ -95,11 +91,11 @@ private void addRawBlock(DoubleBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block uncastBlock = page.getBlock(channels.get(0)); - if (uncastBlock.areAllValuesNull()) { + Block hllUncast = page.getBlock(channels.get(0)); + if (hllUncast.areAllValuesNull()) { return; } - BytesRefVector hll = page.getBlock(channels.get(0)).asVector(); + BytesRefVector hll = ((BytesRefBlock) hllUncast).asVector(); assert hll.getPositionCount() == 1; BytesRef scratch = new BytesRef(); CountDistinctDoubleAggregator.combineIntermediate(state, hll.getBytesRef(0, scratch)); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctDoubleAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctDoubleAggregatorFunctionSupplier.java index ee33aefb5242c..0a15ebb07ecf4 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctDoubleAggregatorFunctionSupplier.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctDoubleAggregatorFunctionSupplier.java @@ -8,7 +8,6 @@ import java.lang.Override; import java.lang.String; import java.util.List; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.operator.DriverContext; /** @@ -16,28 +15,24 @@ * This class is generated. Do not edit it. */ public final class CountDistinctDoubleAggregatorFunctionSupplier implements AggregatorFunctionSupplier { - private final BigArrays bigArrays; - private final List channels; private final int precision; - public CountDistinctDoubleAggregatorFunctionSupplier(BigArrays bigArrays, List channels, - int precision) { - this.bigArrays = bigArrays; + public CountDistinctDoubleAggregatorFunctionSupplier(List channels, int precision) { this.channels = channels; this.precision = precision; } @Override public CountDistinctDoubleAggregatorFunction aggregator(DriverContext driverContext) { - return CountDistinctDoubleAggregatorFunction.create(driverContext, channels, bigArrays, precision); + return CountDistinctDoubleAggregatorFunction.create(driverContext, channels, precision); } @Override public CountDistinctDoubleGroupingAggregatorFunction groupingAggregator( DriverContext driverContext) { - return CountDistinctDoubleGroupingAggregatorFunction.create(channels, driverContext, bigArrays, precision); + return CountDistinctDoubleGroupingAggregatorFunction.create(channels, driverContext, precision); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctDoubleGroupingAggregatorFunction.java index 84ed6d2f06329..2bb273bf2598c 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctDoubleGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctDoubleGroupingAggregatorFunction.java @@ -10,7 +10,6 @@ import java.lang.StringBuilder; import java.util.List; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.BytesRefVector; @@ -36,23 +35,19 @@ public final class CountDistinctDoubleGroupingAggregatorFunction implements Grou private final DriverContext driverContext; - private final BigArrays bigArrays; - private final int precision; public CountDistinctDoubleGroupingAggregatorFunction(List channels, - HllStates.GroupingState state, DriverContext driverContext, BigArrays bigArrays, - int precision) { + HllStates.GroupingState state, DriverContext driverContext, int precision) { this.channels = channels; this.state = state; this.driverContext = driverContext; - this.bigArrays = bigArrays; this.precision = precision; } public static CountDistinctDoubleGroupingAggregatorFunction create(List channels, - DriverContext driverContext, BigArrays bigArrays, int precision) { - return new CountDistinctDoubleGroupingAggregatorFunction(channels, CountDistinctDoubleAggregator.initGrouping(bigArrays, precision), driverContext, bigArrays, precision); + DriverContext driverContext, int precision) { + return new CountDistinctDoubleGroupingAggregatorFunction(channels, CountDistinctDoubleAggregator.initGrouping(driverContext.bigArrays(), precision), driverContext, precision); } public static List intermediateStateDesc() { diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctIntAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctIntAggregatorFunction.java index 9f685f4672939..083c483a9c9bc 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctIntAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctIntAggregatorFunction.java @@ -10,7 +10,6 @@ import java.lang.StringBuilder; import java.util.List; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.BytesRefVector; @@ -34,22 +33,19 @@ public final class CountDistinctIntAggregatorFunction implements AggregatorFunct private final List channels; - private final BigArrays bigArrays; - private final int precision; public CountDistinctIntAggregatorFunction(DriverContext driverContext, List channels, - HllStates.SingleState state, BigArrays bigArrays, int precision) { + HllStates.SingleState state, int precision) { this.driverContext = driverContext; this.channels = channels; this.state = state; - this.bigArrays = bigArrays; this.precision = precision; } public static CountDistinctIntAggregatorFunction create(DriverContext driverContext, - List channels, BigArrays bigArrays, int precision) { - return new CountDistinctIntAggregatorFunction(driverContext, channels, CountDistinctIntAggregator.initSingle(bigArrays, precision), bigArrays, precision); + List channels, int precision) { + return new CountDistinctIntAggregatorFunction(driverContext, channels, CountDistinctIntAggregator.initSingle(driverContext.bigArrays(), precision), precision); } public static List intermediateStateDesc() { @@ -95,11 +91,11 @@ private void addRawBlock(IntBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block uncastBlock = page.getBlock(channels.get(0)); - if (uncastBlock.areAllValuesNull()) { + Block hllUncast = page.getBlock(channels.get(0)); + if (hllUncast.areAllValuesNull()) { return; } - BytesRefVector hll = page.getBlock(channels.get(0)).asVector(); + BytesRefVector hll = ((BytesRefBlock) hllUncast).asVector(); assert hll.getPositionCount() == 1; BytesRef scratch = new BytesRef(); CountDistinctIntAggregator.combineIntermediate(state, hll.getBytesRef(0, scratch)); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctIntAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctIntAggregatorFunctionSupplier.java index 315df36fcaa1b..fec5b7df48a21 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctIntAggregatorFunctionSupplier.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctIntAggregatorFunctionSupplier.java @@ -8,7 +8,6 @@ import java.lang.Override; import java.lang.String; import java.util.List; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.operator.DriverContext; /** @@ -16,28 +15,24 @@ * This class is generated. Do not edit it. */ public final class CountDistinctIntAggregatorFunctionSupplier implements AggregatorFunctionSupplier { - private final BigArrays bigArrays; - private final List channels; private final int precision; - public CountDistinctIntAggregatorFunctionSupplier(BigArrays bigArrays, List channels, - int precision) { - this.bigArrays = bigArrays; + public CountDistinctIntAggregatorFunctionSupplier(List channels, int precision) { this.channels = channels; this.precision = precision; } @Override public CountDistinctIntAggregatorFunction aggregator(DriverContext driverContext) { - return CountDistinctIntAggregatorFunction.create(driverContext, channels, bigArrays, precision); + return CountDistinctIntAggregatorFunction.create(driverContext, channels, precision); } @Override public CountDistinctIntGroupingAggregatorFunction groupingAggregator( DriverContext driverContext) { - return CountDistinctIntGroupingAggregatorFunction.create(channels, driverContext, bigArrays, precision); + return CountDistinctIntGroupingAggregatorFunction.create(channels, driverContext, precision); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctIntGroupingAggregatorFunction.java index b5dc7e43467a8..6e1017d962254 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctIntGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctIntGroupingAggregatorFunction.java @@ -10,7 +10,6 @@ import java.lang.StringBuilder; import java.util.List; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.BytesRefVector; @@ -34,23 +33,19 @@ public final class CountDistinctIntGroupingAggregatorFunction implements Groupin private final DriverContext driverContext; - private final BigArrays bigArrays; - private final int precision; public CountDistinctIntGroupingAggregatorFunction(List channels, - HllStates.GroupingState state, DriverContext driverContext, BigArrays bigArrays, - int precision) { + HllStates.GroupingState state, DriverContext driverContext, int precision) { this.channels = channels; this.state = state; this.driverContext = driverContext; - this.bigArrays = bigArrays; this.precision = precision; } public static CountDistinctIntGroupingAggregatorFunction create(List channels, - DriverContext driverContext, BigArrays bigArrays, int precision) { - return new CountDistinctIntGroupingAggregatorFunction(channels, CountDistinctIntAggregator.initGrouping(bigArrays, precision), driverContext, bigArrays, precision); + DriverContext driverContext, int precision) { + return new CountDistinctIntGroupingAggregatorFunction(channels, CountDistinctIntAggregator.initGrouping(driverContext.bigArrays(), precision), driverContext, precision); } public static List intermediateStateDesc() { diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctLongAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctLongAggregatorFunction.java index 55b396aa627d5..ee6fb5b470442 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctLongAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctLongAggregatorFunction.java @@ -10,7 +10,6 @@ import java.lang.StringBuilder; import java.util.List; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.BytesRefVector; @@ -34,22 +33,19 @@ public final class CountDistinctLongAggregatorFunction implements AggregatorFunc private final List channels; - private final BigArrays bigArrays; - private final int precision; public CountDistinctLongAggregatorFunction(DriverContext driverContext, List channels, - HllStates.SingleState state, BigArrays bigArrays, int precision) { + HllStates.SingleState state, int precision) { this.driverContext = driverContext; this.channels = channels; this.state = state; - this.bigArrays = bigArrays; this.precision = precision; } public static CountDistinctLongAggregatorFunction create(DriverContext driverContext, - List channels, BigArrays bigArrays, int precision) { - return new CountDistinctLongAggregatorFunction(driverContext, channels, CountDistinctLongAggregator.initSingle(bigArrays, precision), bigArrays, precision); + List channels, int precision) { + return new CountDistinctLongAggregatorFunction(driverContext, channels, CountDistinctLongAggregator.initSingle(driverContext.bigArrays(), precision), precision); } public static List intermediateStateDesc() { @@ -95,11 +91,11 @@ private void addRawBlock(LongBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block uncastBlock = page.getBlock(channels.get(0)); - if (uncastBlock.areAllValuesNull()) { + Block hllUncast = page.getBlock(channels.get(0)); + if (hllUncast.areAllValuesNull()) { return; } - BytesRefVector hll = page.getBlock(channels.get(0)).asVector(); + BytesRefVector hll = ((BytesRefBlock) hllUncast).asVector(); assert hll.getPositionCount() == 1; BytesRef scratch = new BytesRef(); CountDistinctLongAggregator.combineIntermediate(state, hll.getBytesRef(0, scratch)); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctLongAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctLongAggregatorFunctionSupplier.java index 6069b5744b31b..e3cc788215d39 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctLongAggregatorFunctionSupplier.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctLongAggregatorFunctionSupplier.java @@ -8,7 +8,6 @@ import java.lang.Override; import java.lang.String; import java.util.List; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.operator.DriverContext; /** @@ -16,28 +15,24 @@ * This class is generated. Do not edit it. */ public final class CountDistinctLongAggregatorFunctionSupplier implements AggregatorFunctionSupplier { - private final BigArrays bigArrays; - private final List channels; private final int precision; - public CountDistinctLongAggregatorFunctionSupplier(BigArrays bigArrays, List channels, - int precision) { - this.bigArrays = bigArrays; + public CountDistinctLongAggregatorFunctionSupplier(List channels, int precision) { this.channels = channels; this.precision = precision; } @Override public CountDistinctLongAggregatorFunction aggregator(DriverContext driverContext) { - return CountDistinctLongAggregatorFunction.create(driverContext, channels, bigArrays, precision); + return CountDistinctLongAggregatorFunction.create(driverContext, channels, precision); } @Override public CountDistinctLongGroupingAggregatorFunction groupingAggregator( DriverContext driverContext) { - return CountDistinctLongGroupingAggregatorFunction.create(channels, driverContext, bigArrays, precision); + return CountDistinctLongGroupingAggregatorFunction.create(channels, driverContext, precision); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctLongGroupingAggregatorFunction.java index 6c69845dbf107..d0c6cedeed2ff 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctLongGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctLongGroupingAggregatorFunction.java @@ -10,7 +10,6 @@ import java.lang.StringBuilder; import java.util.List; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.BytesRefVector; @@ -36,23 +35,19 @@ public final class CountDistinctLongGroupingAggregatorFunction implements Groupi private final DriverContext driverContext; - private final BigArrays bigArrays; - private final int precision; public CountDistinctLongGroupingAggregatorFunction(List channels, - HllStates.GroupingState state, DriverContext driverContext, BigArrays bigArrays, - int precision) { + HllStates.GroupingState state, DriverContext driverContext, int precision) { this.channels = channels; this.state = state; this.driverContext = driverContext; - this.bigArrays = bigArrays; this.precision = precision; } public static CountDistinctLongGroupingAggregatorFunction create(List channels, - DriverContext driverContext, BigArrays bigArrays, int precision) { - return new CountDistinctLongGroupingAggregatorFunction(channels, CountDistinctLongAggregator.initGrouping(bigArrays, precision), driverContext, bigArrays, precision); + DriverContext driverContext, int precision) { + return new CountDistinctLongGroupingAggregatorFunction(channels, CountDistinctLongAggregator.initGrouping(driverContext.bigArrays(), precision), driverContext, precision); } public static List intermediateStateDesc() { diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxDoubleAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxDoubleAggregatorFunction.java index 6929900c29ea1..f78a8773ccfcd 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxDoubleAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxDoubleAggregatorFunction.java @@ -90,14 +90,18 @@ private void addRawBlock(DoubleBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block uncastBlock = page.getBlock(channels.get(0)); - if (uncastBlock.areAllValuesNull()) { + Block maxUncast = page.getBlock(channels.get(0)); + if (maxUncast.areAllValuesNull()) { return; } - DoubleVector max = page.getBlock(channels.get(0)).asVector(); - BooleanVector seen = page.getBlock(channels.get(1)).asVector(); + DoubleVector max = ((DoubleBlock) maxUncast).asVector(); assert max.getPositionCount() == 1; - assert max.getPositionCount() == seen.getPositionCount(); + Block seenUncast = page.getBlock(channels.get(1)); + if (seenUncast.areAllValuesNull()) { + return; + } + BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); + assert seen.getPositionCount() == 1; if (seen.getBoolean(0)) { state.doubleValue(MaxDoubleAggregator.combine(state.doubleValue(), max.getDouble(0))); state.seen(true); @@ -112,10 +116,10 @@ public void evaluateIntermediate(Block[] blocks, int offset, DriverContext drive @Override public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { if (state.seen() == false) { - blocks[offset] = Block.constantNullBlock(1, driverContext.blockFactory()); + blocks[offset] = driverContext.blockFactory().newConstantNullBlock(1); return; } - blocks[offset] = DoubleBlock.newConstantBlockWith(state.doubleValue(), 1, driverContext.blockFactory()); + blocks[offset] = driverContext.blockFactory().newConstantDoubleBlockWith(state.doubleValue(), 1); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxDoubleAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxDoubleAggregatorFunctionSupplier.java index 850fff9a946ba..af878fc778985 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxDoubleAggregatorFunctionSupplier.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxDoubleAggregatorFunctionSupplier.java @@ -8,7 +8,6 @@ import java.lang.Override; import java.lang.String; import java.util.List; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.operator.DriverContext; /** @@ -16,12 +15,9 @@ * This class is generated. Do not edit it. */ public final class MaxDoubleAggregatorFunctionSupplier implements AggregatorFunctionSupplier { - private final BigArrays bigArrays; - private final List channels; - public MaxDoubleAggregatorFunctionSupplier(BigArrays bigArrays, List channels) { - this.bigArrays = bigArrays; + public MaxDoubleAggregatorFunctionSupplier(List channels) { this.channels = channels; } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxIntAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxIntAggregatorFunction.java index 1759442fbb12a..6f83ee7224879 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxIntAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxIntAggregatorFunction.java @@ -90,14 +90,18 @@ private void addRawBlock(IntBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block uncastBlock = page.getBlock(channels.get(0)); - if (uncastBlock.areAllValuesNull()) { + Block maxUncast = page.getBlock(channels.get(0)); + if (maxUncast.areAllValuesNull()) { return; } - IntVector max = page.getBlock(channels.get(0)).asVector(); - BooleanVector seen = page.getBlock(channels.get(1)).asVector(); + IntVector max = ((IntBlock) maxUncast).asVector(); assert max.getPositionCount() == 1; - assert max.getPositionCount() == seen.getPositionCount(); + Block seenUncast = page.getBlock(channels.get(1)); + if (seenUncast.areAllValuesNull()) { + return; + } + BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); + assert seen.getPositionCount() == 1; if (seen.getBoolean(0)) { state.intValue(MaxIntAggregator.combine(state.intValue(), max.getInt(0))); state.seen(true); @@ -112,10 +116,10 @@ public void evaluateIntermediate(Block[] blocks, int offset, DriverContext drive @Override public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { if (state.seen() == false) { - blocks[offset] = Block.constantNullBlock(1, driverContext.blockFactory()); + blocks[offset] = driverContext.blockFactory().newConstantNullBlock(1); return; } - blocks[offset] = IntBlock.newConstantBlockWith(state.intValue(), 1, driverContext.blockFactory()); + blocks[offset] = driverContext.blockFactory().newConstantIntBlockWith(state.intValue(), 1); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxIntAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxIntAggregatorFunctionSupplier.java index cfac1c68fc065..5e0a4e2172696 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxIntAggregatorFunctionSupplier.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxIntAggregatorFunctionSupplier.java @@ -8,7 +8,6 @@ import java.lang.Override; import java.lang.String; import java.util.List; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.operator.DriverContext; /** @@ -16,12 +15,9 @@ * This class is generated. Do not edit it. */ public final class MaxIntAggregatorFunctionSupplier implements AggregatorFunctionSupplier { - private final BigArrays bigArrays; - private final List channels; - public MaxIntAggregatorFunctionSupplier(BigArrays bigArrays, List channels) { - this.bigArrays = bigArrays; + public MaxIntAggregatorFunctionSupplier(List channels) { this.channels = channels; } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxLongAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxLongAggregatorFunction.java index fe7d797faf10a..8826128a68837 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxLongAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxLongAggregatorFunction.java @@ -90,14 +90,18 @@ private void addRawBlock(LongBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block uncastBlock = page.getBlock(channels.get(0)); - if (uncastBlock.areAllValuesNull()) { + Block maxUncast = page.getBlock(channels.get(0)); + if (maxUncast.areAllValuesNull()) { return; } - LongVector max = page.getBlock(channels.get(0)).asVector(); - BooleanVector seen = page.getBlock(channels.get(1)).asVector(); + LongVector max = ((LongBlock) maxUncast).asVector(); assert max.getPositionCount() == 1; - assert max.getPositionCount() == seen.getPositionCount(); + Block seenUncast = page.getBlock(channels.get(1)); + if (seenUncast.areAllValuesNull()) { + return; + } + BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); + assert seen.getPositionCount() == 1; if (seen.getBoolean(0)) { state.longValue(MaxLongAggregator.combine(state.longValue(), max.getLong(0))); state.seen(true); @@ -112,10 +116,10 @@ public void evaluateIntermediate(Block[] blocks, int offset, DriverContext drive @Override public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { if (state.seen() == false) { - blocks[offset] = Block.constantNullBlock(1, driverContext.blockFactory()); + blocks[offset] = driverContext.blockFactory().newConstantNullBlock(1); return; } - blocks[offset] = LongBlock.newConstantBlockWith(state.longValue(), 1, driverContext.blockFactory()); + blocks[offset] = driverContext.blockFactory().newConstantLongBlockWith(state.longValue(), 1); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxLongAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxLongAggregatorFunctionSupplier.java index 5f1b1d1e7dc82..f4d17da186d58 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxLongAggregatorFunctionSupplier.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxLongAggregatorFunctionSupplier.java @@ -8,7 +8,6 @@ import java.lang.Override; import java.lang.String; import java.util.List; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.operator.DriverContext; /** @@ -16,12 +15,9 @@ * This class is generated. Do not edit it. */ public final class MaxLongAggregatorFunctionSupplier implements AggregatorFunctionSupplier { - private final BigArrays bigArrays; - private final List channels; - public MaxLongAggregatorFunctionSupplier(BigArrays bigArrays, List channels) { - this.bigArrays = bigArrays; + public MaxLongAggregatorFunctionSupplier(List channels) { this.channels = channels; } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleAggregatorFunction.java index a2e8d8fbf592c..4bcf08ce0fa35 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleAggregatorFunction.java @@ -88,11 +88,11 @@ private void addRawBlock(DoubleBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block uncastBlock = page.getBlock(channels.get(0)); - if (uncastBlock.areAllValuesNull()) { + Block quartUncast = page.getBlock(channels.get(0)); + if (quartUncast.areAllValuesNull()) { return; } - BytesRefVector quart = page.getBlock(channels.get(0)).asVector(); + BytesRefVector quart = ((BytesRefBlock) quartUncast).asVector(); assert quart.getPositionCount() == 1; BytesRef scratch = new BytesRef(); MedianAbsoluteDeviationDoubleAggregator.combineIntermediate(state, quart.getBytesRef(0, scratch)); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleAggregatorFunctionSupplier.java index 44cea6eab23bb..4720ce08fa282 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleAggregatorFunctionSupplier.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleAggregatorFunctionSupplier.java @@ -8,7 +8,6 @@ import java.lang.Override; import java.lang.String; import java.util.List; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.operator.DriverContext; /** @@ -16,13 +15,9 @@ * This class is generated. Do not edit it. */ public final class MedianAbsoluteDeviationDoubleAggregatorFunctionSupplier implements AggregatorFunctionSupplier { - private final BigArrays bigArrays; - private final List channels; - public MedianAbsoluteDeviationDoubleAggregatorFunctionSupplier(BigArrays bigArrays, - List channels) { - this.bigArrays = bigArrays; + public MedianAbsoluteDeviationDoubleAggregatorFunctionSupplier(List channels) { this.channels = channels; } @@ -34,7 +29,7 @@ public MedianAbsoluteDeviationDoubleAggregatorFunction aggregator(DriverContext @Override public MedianAbsoluteDeviationDoubleGroupingAggregatorFunction groupingAggregator( DriverContext driverContext) { - return MedianAbsoluteDeviationDoubleGroupingAggregatorFunction.create(channels, driverContext, bigArrays); + return MedianAbsoluteDeviationDoubleGroupingAggregatorFunction.create(channels, driverContext); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleGroupingAggregatorFunction.java index 08bbde35e592e..39d65eabbe4b7 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleGroupingAggregatorFunction.java @@ -10,7 +10,6 @@ import java.lang.StringBuilder; import java.util.List; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.BytesRefVector; @@ -36,19 +35,16 @@ public final class MedianAbsoluteDeviationDoubleGroupingAggregatorFunction imple private final DriverContext driverContext; - private final BigArrays bigArrays; - public MedianAbsoluteDeviationDoubleGroupingAggregatorFunction(List channels, - QuantileStates.GroupingState state, DriverContext driverContext, BigArrays bigArrays) { + QuantileStates.GroupingState state, DriverContext driverContext) { this.channels = channels; this.state = state; this.driverContext = driverContext; - this.bigArrays = bigArrays; } public static MedianAbsoluteDeviationDoubleGroupingAggregatorFunction create( - List channels, DriverContext driverContext, BigArrays bigArrays) { - return new MedianAbsoluteDeviationDoubleGroupingAggregatorFunction(channels, MedianAbsoluteDeviationDoubleAggregator.initGrouping(bigArrays), driverContext, bigArrays); + List channels, DriverContext driverContext) { + return new MedianAbsoluteDeviationDoubleGroupingAggregatorFunction(channels, MedianAbsoluteDeviationDoubleAggregator.initGrouping(driverContext.bigArrays()), driverContext); } public static List intermediateStateDesc() { diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntAggregatorFunction.java index 21e99587a5d09..db9dbdab52244 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntAggregatorFunction.java @@ -88,11 +88,11 @@ private void addRawBlock(IntBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block uncastBlock = page.getBlock(channels.get(0)); - if (uncastBlock.areAllValuesNull()) { + Block quartUncast = page.getBlock(channels.get(0)); + if (quartUncast.areAllValuesNull()) { return; } - BytesRefVector quart = page.getBlock(channels.get(0)).asVector(); + BytesRefVector quart = ((BytesRefBlock) quartUncast).asVector(); assert quart.getPositionCount() == 1; BytesRef scratch = new BytesRef(); MedianAbsoluteDeviationIntAggregator.combineIntermediate(state, quart.getBytesRef(0, scratch)); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntAggregatorFunctionSupplier.java index c00fb4b0c7b5e..e72918359b2f6 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntAggregatorFunctionSupplier.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntAggregatorFunctionSupplier.java @@ -8,7 +8,6 @@ import java.lang.Override; import java.lang.String; import java.util.List; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.operator.DriverContext; /** @@ -16,13 +15,9 @@ * This class is generated. Do not edit it. */ public final class MedianAbsoluteDeviationIntAggregatorFunctionSupplier implements AggregatorFunctionSupplier { - private final BigArrays bigArrays; - private final List channels; - public MedianAbsoluteDeviationIntAggregatorFunctionSupplier(BigArrays bigArrays, - List channels) { - this.bigArrays = bigArrays; + public MedianAbsoluteDeviationIntAggregatorFunctionSupplier(List channels) { this.channels = channels; } @@ -34,7 +29,7 @@ public MedianAbsoluteDeviationIntAggregatorFunction aggregator(DriverContext dri @Override public MedianAbsoluteDeviationIntGroupingAggregatorFunction groupingAggregator( DriverContext driverContext) { - return MedianAbsoluteDeviationIntGroupingAggregatorFunction.create(channels, driverContext, bigArrays); + return MedianAbsoluteDeviationIntGroupingAggregatorFunction.create(channels, driverContext); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntGroupingAggregatorFunction.java index cad7e172e67bb..ec7b21fc440a1 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntGroupingAggregatorFunction.java @@ -10,7 +10,6 @@ import java.lang.StringBuilder; import java.util.List; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.BytesRefVector; @@ -34,19 +33,16 @@ public final class MedianAbsoluteDeviationIntGroupingAggregatorFunction implemen private final DriverContext driverContext; - private final BigArrays bigArrays; - public MedianAbsoluteDeviationIntGroupingAggregatorFunction(List channels, - QuantileStates.GroupingState state, DriverContext driverContext, BigArrays bigArrays) { + QuantileStates.GroupingState state, DriverContext driverContext) { this.channels = channels; this.state = state; this.driverContext = driverContext; - this.bigArrays = bigArrays; } public static MedianAbsoluteDeviationIntGroupingAggregatorFunction create(List channels, - DriverContext driverContext, BigArrays bigArrays) { - return new MedianAbsoluteDeviationIntGroupingAggregatorFunction(channels, MedianAbsoluteDeviationIntAggregator.initGrouping(bigArrays), driverContext, bigArrays); + DriverContext driverContext) { + return new MedianAbsoluteDeviationIntGroupingAggregatorFunction(channels, MedianAbsoluteDeviationIntAggregator.initGrouping(driverContext.bigArrays()), driverContext); } public static List intermediateStateDesc() { diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongAggregatorFunction.java index 8c3aa95864aff..bf5fd51d7ed17 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongAggregatorFunction.java @@ -88,11 +88,11 @@ private void addRawBlock(LongBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block uncastBlock = page.getBlock(channels.get(0)); - if (uncastBlock.areAllValuesNull()) { + Block quartUncast = page.getBlock(channels.get(0)); + if (quartUncast.areAllValuesNull()) { return; } - BytesRefVector quart = page.getBlock(channels.get(0)).asVector(); + BytesRefVector quart = ((BytesRefBlock) quartUncast).asVector(); assert quart.getPositionCount() == 1; BytesRef scratch = new BytesRef(); MedianAbsoluteDeviationLongAggregator.combineIntermediate(state, quart.getBytesRef(0, scratch)); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongAggregatorFunctionSupplier.java index 71b0488488227..aa79691ba220e 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongAggregatorFunctionSupplier.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongAggregatorFunctionSupplier.java @@ -8,7 +8,6 @@ import java.lang.Override; import java.lang.String; import java.util.List; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.operator.DriverContext; /** @@ -16,13 +15,9 @@ * This class is generated. Do not edit it. */ public final class MedianAbsoluteDeviationLongAggregatorFunctionSupplier implements AggregatorFunctionSupplier { - private final BigArrays bigArrays; - private final List channels; - public MedianAbsoluteDeviationLongAggregatorFunctionSupplier(BigArrays bigArrays, - List channels) { - this.bigArrays = bigArrays; + public MedianAbsoluteDeviationLongAggregatorFunctionSupplier(List channels) { this.channels = channels; } @@ -34,7 +29,7 @@ public MedianAbsoluteDeviationLongAggregatorFunction aggregator(DriverContext dr @Override public MedianAbsoluteDeviationLongGroupingAggregatorFunction groupingAggregator( DriverContext driverContext) { - return MedianAbsoluteDeviationLongGroupingAggregatorFunction.create(channels, driverContext, bigArrays); + return MedianAbsoluteDeviationLongGroupingAggregatorFunction.create(channels, driverContext); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongGroupingAggregatorFunction.java index dae97d17db711..4028bac4628a1 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongGroupingAggregatorFunction.java @@ -10,7 +10,6 @@ import java.lang.StringBuilder; import java.util.List; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.BytesRefVector; @@ -36,19 +35,16 @@ public final class MedianAbsoluteDeviationLongGroupingAggregatorFunction impleme private final DriverContext driverContext; - private final BigArrays bigArrays; - public MedianAbsoluteDeviationLongGroupingAggregatorFunction(List channels, - QuantileStates.GroupingState state, DriverContext driverContext, BigArrays bigArrays) { + QuantileStates.GroupingState state, DriverContext driverContext) { this.channels = channels; this.state = state; this.driverContext = driverContext; - this.bigArrays = bigArrays; } public static MedianAbsoluteDeviationLongGroupingAggregatorFunction create(List channels, - DriverContext driverContext, BigArrays bigArrays) { - return new MedianAbsoluteDeviationLongGroupingAggregatorFunction(channels, MedianAbsoluteDeviationLongAggregator.initGrouping(bigArrays), driverContext, bigArrays); + DriverContext driverContext) { + return new MedianAbsoluteDeviationLongGroupingAggregatorFunction(channels, MedianAbsoluteDeviationLongAggregator.initGrouping(driverContext.bigArrays()), driverContext); } public static List intermediateStateDesc() { diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinDoubleAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinDoubleAggregatorFunction.java index 1f9a8fb49fb2d..7d7544e5d8470 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinDoubleAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinDoubleAggregatorFunction.java @@ -90,14 +90,18 @@ private void addRawBlock(DoubleBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block uncastBlock = page.getBlock(channels.get(0)); - if (uncastBlock.areAllValuesNull()) { + Block minUncast = page.getBlock(channels.get(0)); + if (minUncast.areAllValuesNull()) { return; } - DoubleVector min = page.getBlock(channels.get(0)).asVector(); - BooleanVector seen = page.getBlock(channels.get(1)).asVector(); + DoubleVector min = ((DoubleBlock) minUncast).asVector(); assert min.getPositionCount() == 1; - assert min.getPositionCount() == seen.getPositionCount(); + Block seenUncast = page.getBlock(channels.get(1)); + if (seenUncast.areAllValuesNull()) { + return; + } + BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); + assert seen.getPositionCount() == 1; if (seen.getBoolean(0)) { state.doubleValue(MinDoubleAggregator.combine(state.doubleValue(), min.getDouble(0))); state.seen(true); @@ -112,10 +116,10 @@ public void evaluateIntermediate(Block[] blocks, int offset, DriverContext drive @Override public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { if (state.seen() == false) { - blocks[offset] = Block.constantNullBlock(1, driverContext.blockFactory()); + blocks[offset] = driverContext.blockFactory().newConstantNullBlock(1); return; } - blocks[offset] = DoubleBlock.newConstantBlockWith(state.doubleValue(), 1, driverContext.blockFactory()); + blocks[offset] = driverContext.blockFactory().newConstantDoubleBlockWith(state.doubleValue(), 1); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinDoubleAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinDoubleAggregatorFunctionSupplier.java index 1dcc4126dc508..f91cdfe54c89e 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinDoubleAggregatorFunctionSupplier.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinDoubleAggregatorFunctionSupplier.java @@ -8,7 +8,6 @@ import java.lang.Override; import java.lang.String; import java.util.List; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.operator.DriverContext; /** @@ -16,12 +15,9 @@ * This class is generated. Do not edit it. */ public final class MinDoubleAggregatorFunctionSupplier implements AggregatorFunctionSupplier { - private final BigArrays bigArrays; - private final List channels; - public MinDoubleAggregatorFunctionSupplier(BigArrays bigArrays, List channels) { - this.bigArrays = bigArrays; + public MinDoubleAggregatorFunctionSupplier(List channels) { this.channels = channels; } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinIntAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinIntAggregatorFunction.java index bbeba4c8374ab..0f2385cc120f9 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinIntAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinIntAggregatorFunction.java @@ -90,14 +90,18 @@ private void addRawBlock(IntBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block uncastBlock = page.getBlock(channels.get(0)); - if (uncastBlock.areAllValuesNull()) { + Block minUncast = page.getBlock(channels.get(0)); + if (minUncast.areAllValuesNull()) { return; } - IntVector min = page.getBlock(channels.get(0)).asVector(); - BooleanVector seen = page.getBlock(channels.get(1)).asVector(); + IntVector min = ((IntBlock) minUncast).asVector(); assert min.getPositionCount() == 1; - assert min.getPositionCount() == seen.getPositionCount(); + Block seenUncast = page.getBlock(channels.get(1)); + if (seenUncast.areAllValuesNull()) { + return; + } + BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); + assert seen.getPositionCount() == 1; if (seen.getBoolean(0)) { state.intValue(MinIntAggregator.combine(state.intValue(), min.getInt(0))); state.seen(true); @@ -112,10 +116,10 @@ public void evaluateIntermediate(Block[] blocks, int offset, DriverContext drive @Override public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { if (state.seen() == false) { - blocks[offset] = Block.constantNullBlock(1, driverContext.blockFactory()); + blocks[offset] = driverContext.blockFactory().newConstantNullBlock(1); return; } - blocks[offset] = IntBlock.newConstantBlockWith(state.intValue(), 1, driverContext.blockFactory()); + blocks[offset] = driverContext.blockFactory().newConstantIntBlockWith(state.intValue(), 1); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinIntAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinIntAggregatorFunctionSupplier.java index d1a6411c2cf2c..e09102a1a88ec 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinIntAggregatorFunctionSupplier.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinIntAggregatorFunctionSupplier.java @@ -8,7 +8,6 @@ import java.lang.Override; import java.lang.String; import java.util.List; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.operator.DriverContext; /** @@ -16,12 +15,9 @@ * This class is generated. Do not edit it. */ public final class MinIntAggregatorFunctionSupplier implements AggregatorFunctionSupplier { - private final BigArrays bigArrays; - private final List channels; - public MinIntAggregatorFunctionSupplier(BigArrays bigArrays, List channels) { - this.bigArrays = bigArrays; + public MinIntAggregatorFunctionSupplier(List channels) { this.channels = channels; } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinLongAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinLongAggregatorFunction.java index 5299b505e124c..805729588158e 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinLongAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinLongAggregatorFunction.java @@ -90,14 +90,18 @@ private void addRawBlock(LongBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block uncastBlock = page.getBlock(channels.get(0)); - if (uncastBlock.areAllValuesNull()) { + Block minUncast = page.getBlock(channels.get(0)); + if (minUncast.areAllValuesNull()) { return; } - LongVector min = page.getBlock(channels.get(0)).asVector(); - BooleanVector seen = page.getBlock(channels.get(1)).asVector(); + LongVector min = ((LongBlock) minUncast).asVector(); assert min.getPositionCount() == 1; - assert min.getPositionCount() == seen.getPositionCount(); + Block seenUncast = page.getBlock(channels.get(1)); + if (seenUncast.areAllValuesNull()) { + return; + } + BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); + assert seen.getPositionCount() == 1; if (seen.getBoolean(0)) { state.longValue(MinLongAggregator.combine(state.longValue(), min.getLong(0))); state.seen(true); @@ -112,10 +116,10 @@ public void evaluateIntermediate(Block[] blocks, int offset, DriverContext drive @Override public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { if (state.seen() == false) { - blocks[offset] = Block.constantNullBlock(1, driverContext.blockFactory()); + blocks[offset] = driverContext.blockFactory().newConstantNullBlock(1); return; } - blocks[offset] = LongBlock.newConstantBlockWith(state.longValue(), 1, driverContext.blockFactory()); + blocks[offset] = driverContext.blockFactory().newConstantLongBlockWith(state.longValue(), 1); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinLongAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinLongAggregatorFunctionSupplier.java index 4015e8de18e7b..85805767c9168 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinLongAggregatorFunctionSupplier.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinLongAggregatorFunctionSupplier.java @@ -8,7 +8,6 @@ import java.lang.Override; import java.lang.String; import java.util.List; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.operator.DriverContext; /** @@ -16,12 +15,9 @@ * This class is generated. Do not edit it. */ public final class MinLongAggregatorFunctionSupplier implements AggregatorFunctionSupplier { - private final BigArrays bigArrays; - private final List channels; - public MinLongAggregatorFunctionSupplier(BigArrays bigArrays, List channels) { - this.bigArrays = bigArrays; + public MinLongAggregatorFunctionSupplier(List channels) { this.channels = channels; } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileDoubleAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileDoubleAggregatorFunction.java index f7560379e476d..cd7a5b5974442 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileDoubleAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileDoubleAggregatorFunction.java @@ -91,11 +91,11 @@ private void addRawBlock(DoubleBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block uncastBlock = page.getBlock(channels.get(0)); - if (uncastBlock.areAllValuesNull()) { + Block quartUncast = page.getBlock(channels.get(0)); + if (quartUncast.areAllValuesNull()) { return; } - BytesRefVector quart = page.getBlock(channels.get(0)).asVector(); + BytesRefVector quart = ((BytesRefBlock) quartUncast).asVector(); assert quart.getPositionCount() == 1; BytesRef scratch = new BytesRef(); PercentileDoubleAggregator.combineIntermediate(state, quart.getBytesRef(0, scratch)); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileDoubleAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileDoubleAggregatorFunctionSupplier.java index b32c28ef133ec..996cebd805aa8 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileDoubleAggregatorFunctionSupplier.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileDoubleAggregatorFunctionSupplier.java @@ -8,7 +8,6 @@ import java.lang.Override; import java.lang.String; import java.util.List; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.operator.DriverContext; /** @@ -16,15 +15,11 @@ * This class is generated. Do not edit it. */ public final class PercentileDoubleAggregatorFunctionSupplier implements AggregatorFunctionSupplier { - private final BigArrays bigArrays; - private final List channels; private final double percentile; - public PercentileDoubleAggregatorFunctionSupplier(BigArrays bigArrays, List channels, - double percentile) { - this.bigArrays = bigArrays; + public PercentileDoubleAggregatorFunctionSupplier(List channels, double percentile) { this.channels = channels; this.percentile = percentile; } @@ -37,7 +32,7 @@ public PercentileDoubleAggregatorFunction aggregator(DriverContext driverContext @Override public PercentileDoubleGroupingAggregatorFunction groupingAggregator( DriverContext driverContext) { - return PercentileDoubleGroupingAggregatorFunction.create(channels, driverContext, bigArrays, percentile); + return PercentileDoubleGroupingAggregatorFunction.create(channels, driverContext, percentile); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileDoubleGroupingAggregatorFunction.java index 232287356174c..e8c9fe4728308 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileDoubleGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileDoubleGroupingAggregatorFunction.java @@ -10,7 +10,6 @@ import java.lang.StringBuilder; import java.util.List; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.BytesRefVector; @@ -36,23 +35,19 @@ public final class PercentileDoubleGroupingAggregatorFunction implements Groupin private final DriverContext driverContext; - private final BigArrays bigArrays; - private final double percentile; public PercentileDoubleGroupingAggregatorFunction(List channels, - QuantileStates.GroupingState state, DriverContext driverContext, BigArrays bigArrays, - double percentile) { + QuantileStates.GroupingState state, DriverContext driverContext, double percentile) { this.channels = channels; this.state = state; this.driverContext = driverContext; - this.bigArrays = bigArrays; this.percentile = percentile; } public static PercentileDoubleGroupingAggregatorFunction create(List channels, - DriverContext driverContext, BigArrays bigArrays, double percentile) { - return new PercentileDoubleGroupingAggregatorFunction(channels, PercentileDoubleAggregator.initGrouping(bigArrays, percentile), driverContext, bigArrays, percentile); + DriverContext driverContext, double percentile) { + return new PercentileDoubleGroupingAggregatorFunction(channels, PercentileDoubleAggregator.initGrouping(driverContext.bigArrays(), percentile), driverContext, percentile); } public static List intermediateStateDesc() { diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileIntAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileIntAggregatorFunction.java index d45ba7a1e350a..b9b1c2e90b768 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileIntAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileIntAggregatorFunction.java @@ -91,11 +91,11 @@ private void addRawBlock(IntBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block uncastBlock = page.getBlock(channels.get(0)); - if (uncastBlock.areAllValuesNull()) { + Block quartUncast = page.getBlock(channels.get(0)); + if (quartUncast.areAllValuesNull()) { return; } - BytesRefVector quart = page.getBlock(channels.get(0)).asVector(); + BytesRefVector quart = ((BytesRefBlock) quartUncast).asVector(); assert quart.getPositionCount() == 1; BytesRef scratch = new BytesRef(); PercentileIntAggregator.combineIntermediate(state, quart.getBytesRef(0, scratch)); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileIntAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileIntAggregatorFunctionSupplier.java index 72893a1dd95b3..7ce62dd7b600b 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileIntAggregatorFunctionSupplier.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileIntAggregatorFunctionSupplier.java @@ -8,7 +8,6 @@ import java.lang.Override; import java.lang.String; import java.util.List; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.operator.DriverContext; /** @@ -16,15 +15,11 @@ * This class is generated. Do not edit it. */ public final class PercentileIntAggregatorFunctionSupplier implements AggregatorFunctionSupplier { - private final BigArrays bigArrays; - private final List channels; private final double percentile; - public PercentileIntAggregatorFunctionSupplier(BigArrays bigArrays, List channels, - double percentile) { - this.bigArrays = bigArrays; + public PercentileIntAggregatorFunctionSupplier(List channels, double percentile) { this.channels = channels; this.percentile = percentile; } @@ -36,7 +31,7 @@ public PercentileIntAggregatorFunction aggregator(DriverContext driverContext) { @Override public PercentileIntGroupingAggregatorFunction groupingAggregator(DriverContext driverContext) { - return PercentileIntGroupingAggregatorFunction.create(channels, driverContext, bigArrays, percentile); + return PercentileIntGroupingAggregatorFunction.create(channels, driverContext, percentile); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileIntGroupingAggregatorFunction.java index 1b4e49fa1e040..fb4e06784823d 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileIntGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileIntGroupingAggregatorFunction.java @@ -10,7 +10,6 @@ import java.lang.StringBuilder; import java.util.List; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.BytesRefVector; @@ -34,23 +33,19 @@ public final class PercentileIntGroupingAggregatorFunction implements GroupingAg private final DriverContext driverContext; - private final BigArrays bigArrays; - private final double percentile; public PercentileIntGroupingAggregatorFunction(List channels, - QuantileStates.GroupingState state, DriverContext driverContext, BigArrays bigArrays, - double percentile) { + QuantileStates.GroupingState state, DriverContext driverContext, double percentile) { this.channels = channels; this.state = state; this.driverContext = driverContext; - this.bigArrays = bigArrays; this.percentile = percentile; } public static PercentileIntGroupingAggregatorFunction create(List channels, - DriverContext driverContext, BigArrays bigArrays, double percentile) { - return new PercentileIntGroupingAggregatorFunction(channels, PercentileIntAggregator.initGrouping(bigArrays, percentile), driverContext, bigArrays, percentile); + DriverContext driverContext, double percentile) { + return new PercentileIntGroupingAggregatorFunction(channels, PercentileIntAggregator.initGrouping(driverContext.bigArrays(), percentile), driverContext, percentile); } public static List intermediateStateDesc() { diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileLongAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileLongAggregatorFunction.java index dac045d814926..cc785ce55bb55 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileLongAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileLongAggregatorFunction.java @@ -91,11 +91,11 @@ private void addRawBlock(LongBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block uncastBlock = page.getBlock(channels.get(0)); - if (uncastBlock.areAllValuesNull()) { + Block quartUncast = page.getBlock(channels.get(0)); + if (quartUncast.areAllValuesNull()) { return; } - BytesRefVector quart = page.getBlock(channels.get(0)).asVector(); + BytesRefVector quart = ((BytesRefBlock) quartUncast).asVector(); assert quart.getPositionCount() == 1; BytesRef scratch = new BytesRef(); PercentileLongAggregator.combineIntermediate(state, quart.getBytesRef(0, scratch)); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileLongAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileLongAggregatorFunctionSupplier.java index a71de850814ff..7e32bfc9d9937 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileLongAggregatorFunctionSupplier.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileLongAggregatorFunctionSupplier.java @@ -8,7 +8,6 @@ import java.lang.Override; import java.lang.String; import java.util.List; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.operator.DriverContext; /** @@ -16,15 +15,11 @@ * This class is generated. Do not edit it. */ public final class PercentileLongAggregatorFunctionSupplier implements AggregatorFunctionSupplier { - private final BigArrays bigArrays; - private final List channels; private final double percentile; - public PercentileLongAggregatorFunctionSupplier(BigArrays bigArrays, List channels, - double percentile) { - this.bigArrays = bigArrays; + public PercentileLongAggregatorFunctionSupplier(List channels, double percentile) { this.channels = channels; this.percentile = percentile; } @@ -36,7 +31,7 @@ public PercentileLongAggregatorFunction aggregator(DriverContext driverContext) @Override public PercentileLongGroupingAggregatorFunction groupingAggregator(DriverContext driverContext) { - return PercentileLongGroupingAggregatorFunction.create(channels, driverContext, bigArrays, percentile); + return PercentileLongGroupingAggregatorFunction.create(channels, driverContext, percentile); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileLongGroupingAggregatorFunction.java index 1382aa8b27331..45ce7d0d1c267 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileLongGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileLongGroupingAggregatorFunction.java @@ -10,7 +10,6 @@ import java.lang.StringBuilder; import java.util.List; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.BytesRefVector; @@ -36,23 +35,19 @@ public final class PercentileLongGroupingAggregatorFunction implements GroupingA private final DriverContext driverContext; - private final BigArrays bigArrays; - private final double percentile; public PercentileLongGroupingAggregatorFunction(List channels, - QuantileStates.GroupingState state, DriverContext driverContext, BigArrays bigArrays, - double percentile) { + QuantileStates.GroupingState state, DriverContext driverContext, double percentile) { this.channels = channels; this.state = state; this.driverContext = driverContext; - this.bigArrays = bigArrays; this.percentile = percentile; } public static PercentileLongGroupingAggregatorFunction create(List channels, - DriverContext driverContext, BigArrays bigArrays, double percentile) { - return new PercentileLongGroupingAggregatorFunction(channels, PercentileLongAggregator.initGrouping(bigArrays, percentile), driverContext, bigArrays, percentile); + DriverContext driverContext, double percentile) { + return new PercentileLongGroupingAggregatorFunction(channels, PercentileLongAggregator.initGrouping(driverContext.bigArrays(), percentile), driverContext, percentile); } public static List intermediateStateDesc() { diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumDoubleAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumDoubleAggregatorFunction.java index 5520c587555b3..354726f82b8f3 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumDoubleAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumDoubleAggregatorFunction.java @@ -91,15 +91,24 @@ private void addRawBlock(DoubleBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block uncastBlock = page.getBlock(channels.get(0)); - if (uncastBlock.areAllValuesNull()) { + Block valueUncast = page.getBlock(channels.get(0)); + if (valueUncast.areAllValuesNull()) { return; } - DoubleVector value = page.getBlock(channels.get(0)).asVector(); - DoubleVector delta = page.getBlock(channels.get(1)).asVector(); - BooleanVector seen = page.getBlock(channels.get(2)).asVector(); + DoubleVector value = ((DoubleBlock) valueUncast).asVector(); assert value.getPositionCount() == 1; - assert value.getPositionCount() == delta.getPositionCount() && value.getPositionCount() == seen.getPositionCount(); + Block deltaUncast = page.getBlock(channels.get(1)); + if (deltaUncast.areAllValuesNull()) { + return; + } + DoubleVector delta = ((DoubleBlock) deltaUncast).asVector(); + assert delta.getPositionCount() == 1; + Block seenUncast = page.getBlock(channels.get(2)); + if (seenUncast.areAllValuesNull()) { + return; + } + BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); + assert seen.getPositionCount() == 1; SumDoubleAggregator.combineIntermediate(state, value.getDouble(0), delta.getDouble(0), seen.getBoolean(0)); } @@ -111,7 +120,7 @@ public void evaluateIntermediate(Block[] blocks, int offset, DriverContext drive @Override public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { if (state.seen() == false) { - blocks[offset] = Block.constantNullBlock(1, driverContext.blockFactory()); + blocks[offset] = driverContext.blockFactory().newConstantNullBlock(1); return; } blocks[offset] = SumDoubleAggregator.evaluateFinal(state, driverContext); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumDoubleAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumDoubleAggregatorFunctionSupplier.java index d6898669ab339..b68bed30013c6 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumDoubleAggregatorFunctionSupplier.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumDoubleAggregatorFunctionSupplier.java @@ -8,7 +8,6 @@ import java.lang.Override; import java.lang.String; import java.util.List; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.operator.DriverContext; /** @@ -16,12 +15,9 @@ * This class is generated. Do not edit it. */ public final class SumDoubleAggregatorFunctionSupplier implements AggregatorFunctionSupplier { - private final BigArrays bigArrays; - private final List channels; - public SumDoubleAggregatorFunctionSupplier(BigArrays bigArrays, List channels) { - this.bigArrays = bigArrays; + public SumDoubleAggregatorFunctionSupplier(List channels) { this.channels = channels; } @@ -32,7 +28,7 @@ public SumDoubleAggregatorFunction aggregator(DriverContext driverContext) { @Override public SumDoubleGroupingAggregatorFunction groupingAggregator(DriverContext driverContext) { - return SumDoubleGroupingAggregatorFunction.create(channels, driverContext, bigArrays); + return SumDoubleGroupingAggregatorFunction.create(channels, driverContext); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumDoubleGroupingAggregatorFunction.java index 6e2207ca069cd..f60a3c8cf152a 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumDoubleGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumDoubleGroupingAggregatorFunction.java @@ -9,7 +9,6 @@ import java.lang.String; import java.lang.StringBuilder; import java.util.List; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.BooleanVector; @@ -37,20 +36,16 @@ public final class SumDoubleGroupingAggregatorFunction implements GroupingAggreg private final DriverContext driverContext; - private final BigArrays bigArrays; - public SumDoubleGroupingAggregatorFunction(List channels, - SumDoubleAggregator.GroupingSumState state, DriverContext driverContext, - BigArrays bigArrays) { + SumDoubleAggregator.GroupingSumState state, DriverContext driverContext) { this.channels = channels; this.state = state; this.driverContext = driverContext; - this.bigArrays = bigArrays; } public static SumDoubleGroupingAggregatorFunction create(List channels, - DriverContext driverContext, BigArrays bigArrays) { - return new SumDoubleGroupingAggregatorFunction(channels, SumDoubleAggregator.initGrouping(bigArrays), driverContext, bigArrays); + DriverContext driverContext) { + return new SumDoubleGroupingAggregatorFunction(channels, SumDoubleAggregator.initGrouping(driverContext.bigArrays()), driverContext); } public static List intermediateStateDesc() { diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumIntAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumIntAggregatorFunction.java index 1225b90bf09f7..e210429991aa6 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumIntAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumIntAggregatorFunction.java @@ -92,14 +92,18 @@ private void addRawBlock(IntBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block uncastBlock = page.getBlock(channels.get(0)); - if (uncastBlock.areAllValuesNull()) { + Block sumUncast = page.getBlock(channels.get(0)); + if (sumUncast.areAllValuesNull()) { return; } - LongVector sum = page.getBlock(channels.get(0)).asVector(); - BooleanVector seen = page.getBlock(channels.get(1)).asVector(); + LongVector sum = ((LongBlock) sumUncast).asVector(); assert sum.getPositionCount() == 1; - assert sum.getPositionCount() == seen.getPositionCount(); + Block seenUncast = page.getBlock(channels.get(1)); + if (seenUncast.areAllValuesNull()) { + return; + } + BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); + assert seen.getPositionCount() == 1; if (seen.getBoolean(0)) { state.longValue(SumIntAggregator.combine(state.longValue(), sum.getLong(0))); state.seen(true); @@ -114,10 +118,10 @@ public void evaluateIntermediate(Block[] blocks, int offset, DriverContext drive @Override public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { if (state.seen() == false) { - blocks[offset] = Block.constantNullBlock(1, driverContext.blockFactory()); + blocks[offset] = driverContext.blockFactory().newConstantNullBlock(1); return; } - blocks[offset] = LongBlock.newConstantBlockWith(state.longValue(), 1, driverContext.blockFactory()); + blocks[offset] = driverContext.blockFactory().newConstantLongBlockWith(state.longValue(), 1); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumIntAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumIntAggregatorFunctionSupplier.java index 01294de12de45..dcb48944dc557 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumIntAggregatorFunctionSupplier.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumIntAggregatorFunctionSupplier.java @@ -8,7 +8,6 @@ import java.lang.Override; import java.lang.String; import java.util.List; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.operator.DriverContext; /** @@ -16,12 +15,9 @@ * This class is generated. Do not edit it. */ public final class SumIntAggregatorFunctionSupplier implements AggregatorFunctionSupplier { - private final BigArrays bigArrays; - private final List channels; - public SumIntAggregatorFunctionSupplier(BigArrays bigArrays, List channels) { - this.bigArrays = bigArrays; + public SumIntAggregatorFunctionSupplier(List channels) { this.channels = channels; } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumLongAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumLongAggregatorFunction.java index 720e7ca9f3bbf..38d1b3de78265 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumLongAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumLongAggregatorFunction.java @@ -90,14 +90,18 @@ private void addRawBlock(LongBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block uncastBlock = page.getBlock(channels.get(0)); - if (uncastBlock.areAllValuesNull()) { + Block sumUncast = page.getBlock(channels.get(0)); + if (sumUncast.areAllValuesNull()) { return; } - LongVector sum = page.getBlock(channels.get(0)).asVector(); - BooleanVector seen = page.getBlock(channels.get(1)).asVector(); + LongVector sum = ((LongBlock) sumUncast).asVector(); assert sum.getPositionCount() == 1; - assert sum.getPositionCount() == seen.getPositionCount(); + Block seenUncast = page.getBlock(channels.get(1)); + if (seenUncast.areAllValuesNull()) { + return; + } + BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); + assert seen.getPositionCount() == 1; if (seen.getBoolean(0)) { state.longValue(SumLongAggregator.combine(state.longValue(), sum.getLong(0))); state.seen(true); @@ -112,10 +116,10 @@ public void evaluateIntermediate(Block[] blocks, int offset, DriverContext drive @Override public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { if (state.seen() == false) { - blocks[offset] = Block.constantNullBlock(1, driverContext.blockFactory()); + blocks[offset] = driverContext.blockFactory().newConstantNullBlock(1); return; } - blocks[offset] = LongBlock.newConstantBlockWith(state.longValue(), 1, driverContext.blockFactory()); + blocks[offset] = driverContext.blockFactory().newConstantLongBlockWith(state.longValue(), 1); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumLongAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumLongAggregatorFunctionSupplier.java index d72927d181f12..b4d36aa526075 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumLongAggregatorFunctionSupplier.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumLongAggregatorFunctionSupplier.java @@ -8,7 +8,6 @@ import java.lang.Override; import java.lang.String; import java.util.List; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.operator.DriverContext; /** @@ -16,12 +15,9 @@ * This class is generated. Do not edit it. */ public final class SumLongAggregatorFunctionSupplier implements AggregatorFunctionSupplier { - private final BigArrays bigArrays; - private final List channels; - public SumLongAggregatorFunctionSupplier(BigArrays bigArrays, List channels) { - this.bigArrays = bigArrays; + public SumLongAggregatorFunctionSupplier(List channels) { this.channels = channels; } diff --git a/x-pack/plugin/esql/compute/src/main/java/module-info.java b/x-pack/plugin/esql/compute/src/main/java/module-info.java index 195c5fff6142b..37c91dfd836a7 100644 --- a/x-pack/plugin/esql/compute/src/main/java/module-info.java +++ b/x-pack/plugin/esql/compute/src/main/java/module-info.java @@ -16,6 +16,7 @@ requires org.apache.logging.log4j; requires org.elasticsearch.logging; requires org.elasticsearch.tdigest; + requires org.elasticsearch.geo; exports org.elasticsearch.compute; exports org.elasticsearch.compute.aggregation; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/OwningChannelActionListener.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/OwningChannelActionListener.java deleted file mode 100644 index 50a20ee6ee73d..0000000000000 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/OwningChannelActionListener.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.compute; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.ChannelActionListener; -import org.elasticsearch.transport.TransportChannel; -import org.elasticsearch.transport.TransportResponse; - -/** - * Wraps a {@link ChannelActionListener} and takes ownership of responses passed to - * {@link org.elasticsearch.action.ActionListener#onResponse(Object)}; the reference count will be decreased once sending is done. - * - * Deprecated: use {@link ChannelActionListener} instead and ensure responses sent to it are properly closed after. - */ -@Deprecated(forRemoval = true) -public final class OwningChannelActionListener implements ActionListener { - private final ChannelActionListener listener; - - public OwningChannelActionListener(TransportChannel channel) { - this.listener = new ChannelActionListener<>(channel); - } - - @Override - public void onResponse(Response response) { - ActionListener.respondAndRelease(listener, response); - } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - - @Override - public String toString() { - return "OwningChannelActionListener{" + listener + "}"; - } - -} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountAggregatorFunction.java index efc275ff6eb35..13a4204edfd8f 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountAggregatorFunction.java @@ -7,7 +7,6 @@ package org.elasticsearch.compute.aggregation; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.BooleanVector; @@ -20,7 +19,7 @@ import java.util.List; public class CountAggregatorFunction implements AggregatorFunction { - public static AggregatorFunctionSupplier supplier(BigArrays bigArrays, List channels) { + public static AggregatorFunctionSupplier supplier(List channels) { return new AggregatorFunctionSupplier() { @Override public AggregatorFunction aggregator(DriverContext driverContext) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctBooleanAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctBooleanAggregator.java index d083a48fffb7a..218af8fcb705e 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctBooleanAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctBooleanAggregator.java @@ -13,7 +13,6 @@ import org.elasticsearch.compute.ann.GroupingAggregator; import org.elasticsearch.compute.ann.IntermediateState; import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.operator.DriverContext; @@ -33,10 +32,6 @@ public static void combine(SingleState current, boolean v) { current.bits |= v ? BIT_TRUE : BIT_FALSE; } - public static void combineStates(SingleState current, SingleState state) { - current.bits |= state.bits; - } - public static void combineIntermediate(SingleState current, boolean fbit, boolean tbit) { if (fbit) current.bits |= BIT_FALSE; if (tbit) current.bits |= BIT_TRUE; @@ -44,7 +39,7 @@ public static void combineIntermediate(SingleState current, boolean fbit, boolea public static Block evaluateFinal(SingleState state, DriverContext driverContext) { long result = ((state.bits & BIT_TRUE) >> 1) + (state.bits & BIT_FALSE); - return LongBlock.newConstantBlockWith(result, 1, driverContext.blockFactory()); + return driverContext.blockFactory().newConstantLongBlockWith(result, 1); } public static GroupingState initGrouping(BigArrays bigArrays) { @@ -65,7 +60,7 @@ public static void combineIntermediate(GroupingState current, int groupId, boole } public static Block evaluateFinal(GroupingState state, IntVector selected, DriverContext driverContext) { - LongBlock.Builder builder = LongBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory()); + LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(selected.getPositionCount()); for (int i = 0; i < selected.getPositionCount(); i++) { int group = selected.getInt(i); long count = (state.bits.get(2 * group) ? 1 : 0) + (state.bits.get(2 * group + 1) ? 1 : 0); @@ -135,8 +130,8 @@ void combineStates(int currentGroupId, GroupingState state) { public void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { assert blocks.length >= offset + 2; try ( - var fbitBuilder = BooleanBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory()); - var tbitBuilder = BooleanBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory()) + var fbitBuilder = driverContext.blockFactory().newBooleanBlockBuilder(selected.getPositionCount()); + var tbitBuilder = driverContext.blockFactory().newBooleanBlockBuilder(selected.getPositionCount()) ) { for (int i = 0; i < selected.getPositionCount(); i++) { int group = selected.getInt(i); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctBytesRefAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctBytesRefAggregator.java index 89ad27f1fef28..13a9e00bb28ab 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctBytesRefAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctBytesRefAggregator.java @@ -29,17 +29,13 @@ public static void combine(HllStates.SingleState current, BytesRef v) { current.collect(v); } - public static void combineStates(HllStates.SingleState current, HllStates.SingleState state) { - current.merge(0, state.hll, 0); - } - public static void combineIntermediate(HllStates.SingleState current, BytesRef inValue) { current.merge(0, inValue, 0); } public static Block evaluateFinal(HllStates.SingleState state, DriverContext driverContext) { long result = state.cardinality(); - return LongBlock.newConstantBlockWith(result, 1, driverContext.blockFactory()); + return driverContext.blockFactory().newConstantLongBlockWith(result, 1); } public static HllStates.GroupingState initGrouping(BigArrays bigArrays, int precision) { @@ -64,7 +60,7 @@ public static void combineStates( } public static Block evaluateFinal(HllStates.GroupingState state, IntVector selected, DriverContext driverContext) { - try (LongBlock.Builder builder = LongBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory())) { + try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(selected.getPositionCount())) { for (int i = 0; i < selected.getPositionCount(); i++) { int group = selected.getInt(i); long count = state.cardinality(group); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctDoubleAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctDoubleAggregator.java index 86b3f9997246e..46a0d24cec8c4 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctDoubleAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctDoubleAggregator.java @@ -29,17 +29,13 @@ public static void combine(HllStates.SingleState current, double v) { current.collect(v); } - public static void combineStates(HllStates.SingleState current, HllStates.SingleState state) { - current.merge(0, state.hll, 0); - } - public static void combineIntermediate(HllStates.SingleState current, BytesRef inValue) { current.merge(0, inValue, 0); } public static Block evaluateFinal(HllStates.SingleState state, DriverContext driverContext) { long result = state.cardinality(); - return LongBlock.newConstantBlockWith(result, 1, driverContext.blockFactory()); + return driverContext.blockFactory().newConstantLongBlockWith(result, 1); } public static HllStates.GroupingState initGrouping(BigArrays bigArrays, int precision) { @@ -64,7 +60,7 @@ public static void combineStates( } public static Block evaluateFinal(HllStates.GroupingState state, IntVector selected, DriverContext driverContext) { - try (LongBlock.Builder builder = LongBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory())) { + try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(selected.getPositionCount())) { for (int i = 0; i < selected.getPositionCount(); i++) { int group = selected.getInt(i); long count = state.cardinality(group); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctIntAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctIntAggregator.java index 993284b0c57c3..9c29eb98f2987 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctIntAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctIntAggregator.java @@ -29,17 +29,13 @@ public static void combine(HllStates.SingleState current, int v) { current.collect(v); } - public static void combineStates(HllStates.SingleState current, HllStates.SingleState state) { - current.merge(0, state.hll, 0); - } - public static void combineIntermediate(HllStates.SingleState current, BytesRef inValue) { current.merge(0, inValue, 0); } public static Block evaluateFinal(HllStates.SingleState state, DriverContext driverContext) { long result = state.cardinality(); - return LongBlock.newConstantBlockWith(result, 1, driverContext.blockFactory()); + return driverContext.blockFactory().newConstantLongBlockWith(result, 1); } public static HllStates.GroupingState initGrouping(BigArrays bigArrays, int precision) { @@ -64,7 +60,7 @@ public static void combineStates( } public static Block evaluateFinal(HllStates.GroupingState state, IntVector selected, DriverContext driverContext) { - try (LongBlock.Builder builder = LongBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory())) { + try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(selected.getPositionCount())) { for (int i = 0; i < selected.getPositionCount(); i++) { int group = selected.getInt(i); long count = state.cardinality(group); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctLongAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctLongAggregator.java index a09c8df3b0fc3..59570e2f5a7ef 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctLongAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctLongAggregator.java @@ -29,17 +29,13 @@ public static void combine(HllStates.SingleState current, long v) { current.collect(v); } - public static void combineStates(HllStates.SingleState current, HllStates.SingleState state) { - current.merge(0, state.hll, 0); - } - public static void combineIntermediate(HllStates.SingleState current, BytesRef inValue) { current.merge(0, inValue, 0); } public static Block evaluateFinal(HllStates.SingleState state, DriverContext driverContext) { long result = state.cardinality(); - return LongBlock.newConstantBlockWith(result, 1, driverContext.blockFactory()); + return driverContext.blockFactory().newConstantLongBlockWith(result, 1); } public static HllStates.GroupingState initGrouping(BigArrays bigArrays, int precision) { @@ -64,7 +60,7 @@ public static void combineStates( } public static Block evaluateFinal(HllStates.GroupingState state, IntVector selected, DriverContext driverContext) { - try (LongBlock.Builder builder = LongBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory())) { + try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(selected.getPositionCount())) { for (int i = 0; i < selected.getPositionCount(); i++) { int group = selected.getInt(i); long count = state.cardinality(group); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountGroupingAggregatorFunction.java index 995dc5e15740f..5dba070172ae9 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountGroupingAggregatorFunction.java @@ -181,7 +181,7 @@ public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) @Override public void evaluateFinal(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { - try (LongVector.Builder builder = LongVector.newVectorBuilder(selected.getPositionCount(), driverContext.blockFactory())) { + try (LongVector.Builder builder = driverContext.blockFactory().newLongVectorFixedBuilder(selected.getPositionCount())) { for (int i = 0; i < selected.getPositionCount(); i++) { int si = selected.getInt(i); builder.appendLong(state.hasValue(si) ? state.get(si) : 0); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/HllStates.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/HllStates.java index 66844f002111e..a8102efa61746 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/HllStates.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/HllStates.java @@ -16,7 +16,6 @@ import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.core.Releasables; @@ -179,7 +178,7 @@ void merge(int groupId, BytesRef other, int otherGroup) { @Override public void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { assert blocks.length >= offset + 1; - try (var builder = BytesRefBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory())) { + try (var builder = driverContext.blockFactory().newBytesRefBlockBuilder(selected.getPositionCount())) { for (int i = 0; i < selected.getPositionCount(); i++) { int group = selected.getInt(i); builder.appendBytesRef(serializeHLL(group, hll)); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleAggregator.java index 2d73c323e9556..db0d57b887008 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleAggregator.java @@ -28,10 +28,6 @@ public static void combine(QuantileStates.SingleState current, double v) { current.add(v); } - public static void combineStates(QuantileStates.SingleState current, QuantileStates.SingleState state) { - current.add(state); - } - public static void combineIntermediate(QuantileStates.SingleState state, BytesRef inValue) { state.add(inValue); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntAggregator.java index b4696f0ab1934..a57e28aebd437 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntAggregator.java @@ -28,10 +28,6 @@ public static void combine(QuantileStates.SingleState current, int v) { current.add(v); } - public static void combineStates(QuantileStates.SingleState current, QuantileStates.SingleState state) { - current.add(state); - } - public static void combineIntermediate(QuantileStates.SingleState state, BytesRef inValue) { state.add(inValue); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongAggregator.java index bbd9f1821b681..54340f809e4cd 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongAggregator.java @@ -32,10 +32,6 @@ public static void combineIntermediate(QuantileStates.SingleState state, BytesRe state.add(inValue); } - public static void combineStates(QuantileStates.SingleState current, QuantileStates.SingleState state) { - current.add(state); - } - public static Block evaluateFinal(QuantileStates.SingleState state, DriverContext driverContext) { return state.evaluateMedianAbsoluteDeviation(driverContext); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/PercentileDoubleAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/PercentileDoubleAggregator.java index 3020a920ebddb..1cff8d89b7541 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/PercentileDoubleAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/PercentileDoubleAggregator.java @@ -28,10 +28,6 @@ public static void combine(QuantileStates.SingleState current, double v) { current.add(v); } - public static void combineStates(QuantileStates.SingleState current, QuantileStates.SingleState state) { - current.add(state); - } - public static void combineIntermediate(QuantileStates.SingleState state, BytesRef inValue) { state.add(inValue); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/PercentileIntAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/PercentileIntAggregator.java index 4ccd409cc8ccf..d93dc7099fffe 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/PercentileIntAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/PercentileIntAggregator.java @@ -28,10 +28,6 @@ public static void combine(QuantileStates.SingleState current, int v) { current.add(v); } - public static void combineStates(QuantileStates.SingleState current, QuantileStates.SingleState state) { - current.add(state); - } - public static void combineIntermediate(QuantileStates.SingleState state, BytesRef inValue) { state.add(inValue); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/PercentileLongAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/PercentileLongAggregator.java index 2a0eb3a060930..9d900069d15ae 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/PercentileLongAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/PercentileLongAggregator.java @@ -28,10 +28,6 @@ public static void combine(QuantileStates.SingleState current, long v) { current.add(v); } - public static void combineStates(QuantileStates.SingleState current, QuantileStates.SingleState state) { - current.add(state); - } - public static void combineIntermediate(QuantileStates.SingleState state, BytesRef inValue) { state.add(inValue); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/QuantileStates.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/QuantileStates.java index 0b5b89425ed46..0ba7afb0d5e68 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/QuantileStates.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/QuantileStates.java @@ -72,10 +72,6 @@ void add(double v) { digest.add(v); } - void add(SingleState other) { - digest.add(other.digest); - } - void add(BytesRef other) { digest.add(deserializeDigest(other)); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/SumDoubleAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/SumDoubleAggregator.java index 4c2c38da28b75..5e46225a873f8 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/SumDoubleAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/SumDoubleAggregator.java @@ -14,7 +14,6 @@ import org.elasticsearch.compute.ann.IntermediateState; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; -import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.DoubleBlock; import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.operator.DriverContext; @@ -42,10 +41,6 @@ public static void combine(SumState current, double value, double delta) { current.add(value, delta); } - public static void combineStates(SumState current, SumState state) { - current.add(state.value(), state.delta()); - } - public static void combineIntermediate(SumState state, double inValue, double inDelta, boolean seen) { if (seen) { combine(state, inValue, inDelta); @@ -63,7 +58,7 @@ public static void evaluateIntermediate(SumState state, DriverContext driverCont public static Block evaluateFinal(SumState state, DriverContext driverContext) { double result = state.value(); - return DoubleBlock.newConstantBlockWith(result, 1, driverContext.blockFactory()); + return driverContext.blockFactory().newConstantDoubleBlockWith(result, 1); } public static GroupingSumState initGrouping(BigArrays bigArrays) { @@ -95,9 +90,9 @@ public static void evaluateIntermediate( ) { assert blocks.length >= offset + 3; try ( - var valuesBuilder = DoubleBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory()); - var deltaBuilder = DoubleBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory()); - var seenBuilder = BooleanBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory()) + var valuesBuilder = driverContext.blockFactory().newDoubleBlockBuilder(selected.getPositionCount()); + var deltaBuilder = driverContext.blockFactory().newDoubleBlockBuilder(selected.getPositionCount()); + var seenBuilder = driverContext.blockFactory().newBooleanBlockBuilder(selected.getPositionCount()) ) { for (int i = 0; i < selected.getPositionCount(); i++) { int group = selected.getInt(i); @@ -117,7 +112,7 @@ public static void evaluateIntermediate( } public static Block evaluateFinal(GroupingSumState state, IntVector selected, DriverContext driverContext) { - try (DoubleBlock.Builder builder = DoubleBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory())) { + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(selected.getPositionCount())) { for (int i = 0; i < selected.getPositionCount(); i++) { int si = selected.getInt(i); if (state.hasValue(si) && si < state.values.size()) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-ArrayState.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-ArrayState.java.st index 42f86580a228d..e81af4841d1a4 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-ArrayState.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-ArrayState.java.st @@ -10,7 +10,6 @@ package org.elasticsearch.compute.aggregation; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.$Type$Array; import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.BooleanBlock; $if(long)$ import org.elasticsearch.compute.data.IntVector; $endif$ @@ -73,14 +72,14 @@ $endif$ Block toValuesBlock(org.elasticsearch.compute.data.IntVector selected, DriverContext driverContext) { if (false == trackingGroupIds()) { - try ($Type$Vector.Builder builder = $Type$Vector.newVectorBuilder(selected.getPositionCount(), driverContext.blockFactory())) { + try ($Type$Vector.Builder builder = driverContext.blockFactory().new$Type$VectorFixedBuilder(selected.getPositionCount())) { for (int i = 0; i < selected.getPositionCount(); i++) { builder.append$Type$(values.get(selected.getInt(i))); } return builder.build().asBlock(); } } - try ($Type$Block.Builder builder = $Type$Block.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory())) { + try ($Type$Block.Builder builder = driverContext.blockFactory().new$Type$BlockBuilder(selected.getPositionCount())) { for (int i = 0; i < selected.getPositionCount(); i++) { int group = selected.getInt(i); if (hasValue(group)) { @@ -111,8 +110,8 @@ $endif$ ) { assert blocks.length >= offset + 2; try ( - var valuesBuilder = $Type$Block.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory()); - var hasValueBuilder = BooleanBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory()) + var valuesBuilder = driverContext.blockFactory().new$Type$BlockBuilder(selected.getPositionCount()); + var hasValueBuilder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount()) ) { for (int i = 0; i < selected.getPositionCount(); i++) { int group = selected.getInt(i); @@ -124,7 +123,7 @@ $endif$ hasValueBuilder.appendBoolean(hasValue(group)); } blocks[offset + 0] = valuesBuilder.build(); - blocks[offset + 1] = hasValueBuilder.build(); + blocks[offset + 1] = hasValueBuilder.build().asBlock(); } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BooleanBlockHash.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BooleanBlockHash.java index 684e6aec60b9e..aa7c737e331c7 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BooleanBlockHash.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BooleanBlockHash.java @@ -51,8 +51,8 @@ public void add(Page page, GroupingAggregatorFunction.AddInput addInput) { addInput.add(0, groupIds); } } else { - try (IntBlock groupIds = add(booleanVector).asBlock()) { - addInput.add(0, groupIds.asVector()); + try (IntVector groupIds = add(booleanVector)) { + addInput.add(0, groupIds); } } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BytesRefLongBlockHash.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BytesRefLongBlockHash.java index da2c85e532016..7ee8a7165aa17 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BytesRefLongBlockHash.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BytesRefLongBlockHash.java @@ -18,7 +18,6 @@ import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.BytesRefVector; -import org.elasticsearch.compute.data.IntArrayVector; import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.LongVector; @@ -72,7 +71,9 @@ public void add(Page page, GroupingAggregatorFunction.AddInput addInput) { BytesRefVector vector1 = block1.asVector(); LongVector vector2 = block2.asVector(); if (vector1 != null && vector2 != null) { - addInput.add(0, add(vector1, vector2)); + try (IntVector ords = add(vector1, vector2)) { + addInput.add(0, ords); + } } else { try (AddWork work = new AddWork(block1, block2, addInput)) { work.add(); @@ -88,7 +89,7 @@ public IntVector add(BytesRefVector vector1, LongVector vector2) { long hash1 = hashOrdToGroup(bytesHash.add(vector1.getBytesRef(i, scratch))); ords[i] = Math.toIntExact(hashOrdToGroup(finalHash.add(hash1, vector2.getLong(i)))); } - return new IntArrayVector(ords, positions); + return blockFactory.newIntArrayVector(ords, positions); } private static final long[] EMPTY = new long[0]; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/LongLongBlockHash.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/LongLongBlockHash.java index ce53f0bb8e7f4..49b16198a5d77 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/LongLongBlockHash.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/LongLongBlockHash.java @@ -65,7 +65,7 @@ public void add(Page page, GroupingAggregatorFunction.AddInput addInput) { private IntVector add(LongVector vector1, LongVector vector2) { int positions = vector1.getPositionCount(); - try (var builder = IntVector.newVectorFixedBuilder(positions, blockFactory)) { + try (var builder = blockFactory.newIntVectorFixedBuilder(positions)) { for (int i = 0; i < positions; i++) { builder.appendInt(Math.toIntExact(hashOrdToGroup(hash.add(vector1.getLong(i), vector2.getLong(i))))); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractArrayBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractArrayBlock.java index fe1ecbec92e5b..d6046f0bda085 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractArrayBlock.java @@ -52,7 +52,7 @@ public final MvOrdering mvOrdering() { } protected BitSet shiftNullsToExpandedPositions() { - BitSet expanded = new BitSet(getTotalValueCount()); + BitSet expanded = new BitSet(nullsMask.size()); int next = -1; while ((next = nullsMask.nextSetBit(next + 1)) != -1) { expanded.set(getFirstValueIndex(next)); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractBlock.java index 177e3fb6798d1..0c5207133f71d 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractBlock.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractBlock.java @@ -11,8 +11,7 @@ import java.util.BitSet; -abstract class AbstractBlock implements Block { - private int references = 1; +abstract class AbstractBlock extends AbstractNonThreadSafeRefCounted implements Block { private final int positionCount; @Nullable @@ -32,6 +31,7 @@ protected AbstractBlock(int positionCount, BlockFactory blockFactory) { this.blockFactory = blockFactory; this.firstValueIndexes = null; this.nullsMask = null; + assert assertInvariants(); } /** @@ -44,6 +44,26 @@ protected AbstractBlock(int positionCount, @Nullable int[] firstValueIndexes, @N this.firstValueIndexes = firstValueIndexes; this.nullsMask = nullsMask == null || nullsMask.isEmpty() ? null : nullsMask; assert nullsMask != null || firstValueIndexes != null : "Create VectorBlock instead"; + assert assertInvariants(); + } + + private boolean assertInvariants() { + if (firstValueIndexes != null) { + assert firstValueIndexes.length == getPositionCount() + 1; + for (int i = 0; i < getPositionCount(); i++) { + assert (firstValueIndexes[i + 1] - firstValueIndexes[i]) >= 0; + } + } + if (nullsMask != null) { + assert nullsMask.nextSetBit(getPositionCount() + 1) == -1; + } + if (firstValueIndexes != null && nullsMask != null) { + for (int i = 0; i < getPositionCount(); i++) { + // Either we have multi-values or a null but never both. + assert ((nullsMask.get(i) == false) || (firstValueIndexes[i + 1] - firstValueIndexes[i]) == 1); + } + } + return true; } @Override @@ -101,55 +121,7 @@ public void allowPassingToDifferentDriver() { } @Override - public boolean isReleased() { + public final boolean isReleased() { return hasReferences() == false; } - - @Override - public final void incRef() { - if (isReleased()) { - throw new IllegalStateException("can't increase refCount on already released block [" + this + "]"); - } - references++; - } - - @Override - public final boolean tryIncRef() { - if (isReleased()) { - return false; - } - references++; - return true; - } - - @Override - public final boolean decRef() { - if (isReleased()) { - throw new IllegalStateException("can't release already released block [" + this + "]"); - } - - references--; - - if (references <= 0) { - closeInternal(); - return true; - } - return false; - } - - @Override - public final boolean hasReferences() { - return references >= 1; - } - - @Override - public final void close() { - decRef(); - } - - /** - * This is called when the number of references reaches zero. - * It must release any resources held by the block (adjusting circuit breakers if needed). - */ - protected abstract void closeInternal(); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractBlockBuilder.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractBlockBuilder.java index d6d6584e1b534..24303ff0ea0a4 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractBlockBuilder.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractBlockBuilder.java @@ -165,7 +165,7 @@ static int calculateNewArraySize(int currentSize) { } protected void adjustBreaker(long deltaBytes) { - blockFactory.adjustBreaker(deltaBytes, false); + blockFactory.adjustBreaker(deltaBytes); estimatedBytes += deltaBytes; assert estimatedBytes >= 0; } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractNonThreadSafeRefCounted.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractNonThreadSafeRefCounted.java new file mode 100644 index 0000000000000..2dfd8c3eca5ac --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractNonThreadSafeRefCounted.java @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data; + +import org.elasticsearch.core.RefCounted; +import org.elasticsearch.core.Releasable; + +/** + * Releasable, non-threadsafe version of {@link org.elasticsearch.core.AbstractRefCounted}. + * Calls to {@link AbstractNonThreadSafeRefCounted#decRef()} and {@link AbstractNonThreadSafeRefCounted#close()} are equivalent. + */ +abstract class AbstractNonThreadSafeRefCounted implements RefCounted, Releasable { + private int references = 1; + + @Override + public final void incRef() { + if (hasReferences() == false) { + throw new IllegalStateException("can't increase refCount on already released object [" + this + "]"); + } + references++; + } + + @Override + public final boolean tryIncRef() { + if (hasReferences() == false) { + return false; + } + references++; + return true; + } + + @Override + public final boolean decRef() { + if (hasReferences() == false) { + throw new IllegalStateException("can't release already released object [" + this + "]"); + } + + references--; + + if (references <= 0) { + closeInternal(); + return true; + } + return false; + } + + @Override + public final boolean hasReferences() { + return references >= 1; + } + + @Override + public final void close() { + decRef(); + } + + /** + * This is called when the number of references reaches zero. + * This is where resources should be released (adjusting circuit breakers if needed). + */ + protected abstract void closeInternal(); +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractVector.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractVector.java index 33ef14cfb4ad8..1eb2c09f78511 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractVector.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractVector.java @@ -10,11 +10,10 @@ /** * A dense Vector of single values. */ -abstract class AbstractVector implements Vector { +abstract class AbstractVector extends AbstractNonThreadSafeRefCounted implements Vector { private final int positionCount; private BlockFactory blockFactory; - protected boolean released; protected AbstractVector(int positionCount, BlockFactory blockFactory) { this.positionCount = positionCount; @@ -41,16 +40,12 @@ public void allowPassingToDifferentDriver() { } @Override - public void close() { - if (released) { - throw new IllegalStateException("can't release already released vector [" + this + "]"); - } - released = true; - blockFactory.adjustBreaker(-ramBytesUsed(), true); + protected void closeInternal() { + blockFactory.adjustBreaker(-ramBytesUsed()); } @Override public final boolean isReleased() { - return released; + return hasReferences() == false; } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractVectorBuilder.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractVectorBuilder.java index 6a8cfede7716a..0f86a79700b4b 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractVectorBuilder.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractVectorBuilder.java @@ -48,7 +48,7 @@ static int calculateNewArraySize(int currentSize) { } protected void adjustBreaker(long deltaBytes) { - blockFactory.adjustBreaker(deltaBytes, false); + blockFactory.adjustBreaker(deltaBytes); estimatedBytes += deltaBytes; } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java index 964e510de9a20..c89a0ce260c67 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java @@ -159,25 +159,6 @@ default boolean mvSortedAscending() { */ Block expand(); - /** - * {@return a constant null block with the given number of positions, using the non-breaking block factory}. - * @deprecated use {@link BlockFactory#newConstantNullBlock} - */ - // Eventually, this should use the GLOBAL breaking instance - @Deprecated - static Block constantNullBlock(int positions) { - return constantNullBlock(positions, BlockFactory.getNonBreakingInstance()); - } - - /** - * {@return a constant null block with the given number of positions}. - * @deprecated use {@link BlockFactory#newConstantNullBlock} - */ - @Deprecated - static Block constantNullBlock(int positions, BlockFactory blockFactory) { - return blockFactory.newConstantNullBlock(positions); - } - /** * Builds {@link Block}s. Typically, you use one of it's direct supinterfaces like {@link IntBlock.Builder}. * This is {@link Releasable} and should be released after building the block or if building the block fails. diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockFactory.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockFactory.java index 092f66a7d4427..7b91ff6a645ae 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockFactory.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockFactory.java @@ -10,7 +10,6 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreakingException; -import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.BytesRefArray; @@ -25,35 +24,35 @@ public class BlockFactory { public static final String LOCAL_BREAKER_OVER_RESERVED_MAX_SIZE_SETTING = "esql.block_factory.local_breaker.max_over_reserved"; public static final ByteSizeValue LOCAL_BREAKER_OVER_RESERVED_DEFAULT_MAX_SIZE = ByteSizeValue.ofKb(16); - private static final BlockFactory NON_BREAKING = BlockFactory.getInstance( - new NoopCircuitBreaker("noop-esql-breaker"), - BigArrays.NON_RECYCLING_INSTANCE - ); + public static final String MAX_BLOCK_PRIMITIVE_ARRAY_SIZE_SETTING = "esql.block_factory.max_block_primitive_array_size"; + public static final ByteSizeValue DEFAULT_MAX_BLOCK_PRIMITIVE_ARRAY_SIZE = ByteSizeValue.ofKb(512); private final CircuitBreaker breaker; private final BigArrays bigArrays; + private final long maxPrimitiveArrayBytes; private final BlockFactory parent; public BlockFactory(CircuitBreaker breaker, BigArrays bigArrays) { - this(breaker, bigArrays, null); + this(breaker, bigArrays, DEFAULT_MAX_BLOCK_PRIMITIVE_ARRAY_SIZE); } - protected BlockFactory(CircuitBreaker breaker, BigArrays bigArrays, BlockFactory parent) { + public BlockFactory(CircuitBreaker breaker, BigArrays bigArrays, ByteSizeValue maxPrimitiveArraySize) { + this(breaker, bigArrays, maxPrimitiveArraySize, null); + } + + protected BlockFactory(CircuitBreaker breaker, BigArrays bigArrays, ByteSizeValue maxPrimitiveArraySize, BlockFactory parent) { + assert breaker instanceof LocalCircuitBreaker == false + || (parent != null && ((LocalCircuitBreaker) breaker).parentBreaker() == parent.breaker) + : "use local breaker without parent block factory"; this.breaker = breaker; this.bigArrays = bigArrays; this.parent = parent; - } - - /** - * Returns the Non-Breaking block factory. - */ - public static BlockFactory getNonBreakingInstance() { - return NON_BREAKING; + this.maxPrimitiveArrayBytes = maxPrimitiveArraySize.getBytes(); } public static BlockFactory getInstance(CircuitBreaker breaker, BigArrays bigArrays) { - return new BlockFactory(breaker, bigArrays); + return new BlockFactory(breaker, bigArrays, DEFAULT_MAX_BLOCK_PRIMITIVE_ARRAY_SIZE, null); } // For testing @@ -74,30 +73,19 @@ public BlockFactory newChildFactory(LocalCircuitBreaker childBreaker) { if (childBreaker.parentBreaker() != breaker) { throw new IllegalStateException("Different parent breaker"); } - return new BlockFactory(childBreaker, bigArrays, this); + return new BlockFactory(childBreaker, bigArrays, ByteSizeValue.ofBytes(maxPrimitiveArrayBytes), this); } /** * Adjust the circuit breaker with the given delta, if the delta is negative, the breaker will - * be adjusted without tripping. If the data was already created before calling this method, - * and the breaker trips, we add the delta without breaking to account for the created data. - * If the data has not been created yet, we do not add the delta to the breaker if it trips. + * be adjusted without tripping. + * @throws CircuitBreakingException if the breaker was put above its limit */ - void adjustBreaker(final long delta, final boolean isDataAlreadyCreated) { + void adjustBreaker(final long delta) throws CircuitBreakingException { // checking breaker means potentially tripping, but it doesn't // have to if the delta is negative if (delta > 0) { - try { - breaker.addEstimateBytesAndMaybeBreak(delta, ""); - } catch (CircuitBreakingException e) { - // if (isDataAlreadyCreated) { // TODO: remove isDataAlreadyCreated - // since we've already created the data, we need to - // add it so closing the stream re-adjusts properly - // breaker.addWithoutBreaking(delta); - // } - // re-throw the original exception - throw e; - } + breaker.addEstimateBytesAndMaybeBreak(delta, ""); } else { breaker.addWithoutBreaking(delta); } @@ -106,25 +94,25 @@ void adjustBreaker(final long delta, final boolean isDataAlreadyCreated) { /** Pre-adjusts the breaker for the given position count and element type. Returns the pre-adjusted amount. */ public long preAdjustBreakerForBoolean(int positionCount) { long bytes = (long) positionCount * Byte.BYTES; - adjustBreaker(bytes, false); + adjustBreaker(bytes); return bytes; } public long preAdjustBreakerForInt(int positionCount) { long bytes = (long) positionCount * Integer.BYTES; - adjustBreaker(bytes, false); + adjustBreaker(bytes); return bytes; } public long preAdjustBreakerForLong(int positionCount) { long bytes = (long) positionCount * Long.BYTES; - adjustBreaker(bytes, false); + adjustBreaker(bytes); return bytes; } public long preAdjustBreakerForDouble(int positionCount) { long bytes = (long) positionCount * Double.BYTES; - adjustBreaker(bytes, false); + adjustBreaker(bytes); return bytes; } @@ -145,7 +133,7 @@ public final BooleanBlock newBooleanArrayBlock(boolean[] values, int pc, int[] f public BooleanBlock newBooleanArrayBlock(boolean[] values, int pc, int[] fvi, BitSet nulls, MvOrdering mvOrder, long preAdjustedBytes) { var b = new BooleanArrayBlock(values, pc, fvi, nulls, mvOrder, this); - adjustBreaker(b.ramBytesUsed() - preAdjustedBytes, true); + adjustBreaker(b.ramBytesUsed() - preAdjustedBytes); return b; } @@ -159,7 +147,7 @@ public final BooleanVector newBooleanArrayVector(boolean[] values, int positionC public BooleanVector newBooleanArrayVector(boolean[] values, int positionCount, long preAdjustedBytes) { var b = new BooleanArrayVector(values, positionCount, this); - adjustBreaker(b.ramBytesUsed() - preAdjustedBytes, true); + adjustBreaker(b.ramBytesUsed() - preAdjustedBytes); return b; } @@ -169,12 +157,12 @@ public final BooleanBlock newConstantBooleanBlockWith(boolean value, int positio public BooleanBlock newConstantBooleanBlockWith(boolean value, int positions, long preAdjustedBytes) { var b = new ConstantBooleanVector(value, positions, this).asBlock(); - adjustBreaker(b.ramBytesUsed() - preAdjustedBytes, true); + adjustBreaker(b.ramBytesUsed() - preAdjustedBytes); return b; } public BooleanVector newConstantBooleanVector(boolean value, int positions) { - adjustBreaker(ConstantBooleanVector.RAM_BYTES_USED, false); + adjustBreaker(ConstantBooleanVector.RAM_BYTES_USED); var v = new ConstantBooleanVector(value, positions, this); assert v.ramBytesUsed() == ConstantBooleanVector.RAM_BYTES_USED; return v; @@ -190,7 +178,7 @@ public final IntBlock newIntArrayBlock(int[] values, int positionCount, int[] fi public IntBlock newIntArrayBlock(int[] values, int pc, int[] fvi, BitSet nulls, MvOrdering mvOrdering, long preAdjustedBytes) { var b = new IntArrayBlock(values, pc, fvi, nulls, mvOrdering, this); - adjustBreaker(b.ramBytesUsed() - preAdjustedBytes, true); + adjustBreaker(b.ramBytesUsed() - preAdjustedBytes); return b; } @@ -226,7 +214,7 @@ public final IntVector newIntArrayVector(int[] values, int positionCount) { */ public IntVector newIntArrayVector(int[] values, int positionCount, long preAdjustedBytes) { var b = new IntArrayVector(values, positionCount, this); - adjustBreaker(b.ramBytesUsed() - preAdjustedBytes, true); + adjustBreaker(b.ramBytesUsed() - preAdjustedBytes); return b; } @@ -236,12 +224,12 @@ public final IntBlock newConstantIntBlockWith(int value, int positions) { public IntBlock newConstantIntBlockWith(int value, int positions, long preAdjustedBytes) { var b = new ConstantIntVector(value, positions, this).asBlock(); - adjustBreaker(b.ramBytesUsed() - preAdjustedBytes, true); + adjustBreaker(b.ramBytesUsed() - preAdjustedBytes); return b; } public IntVector newConstantIntVector(int value, int positions) { - adjustBreaker(ConstantIntVector.RAM_BYTES_USED, false); + adjustBreaker(ConstantIntVector.RAM_BYTES_USED); var v = new ConstantIntVector(value, positions, this); assert v.ramBytesUsed() == ConstantIntVector.RAM_BYTES_USED; return v; @@ -257,7 +245,7 @@ public final LongBlock newLongArrayBlock(long[] values, int pc, int[] firstValue public LongBlock newLongArrayBlock(long[] values, int pc, int[] fvi, BitSet nulls, MvOrdering mvOrdering, long preAdjustedBytes) { var b = new LongArrayBlock(values, pc, fvi, nulls, mvOrdering, this); - adjustBreaker(b.ramBytesUsed() - preAdjustedBytes, true); + adjustBreaker(b.ramBytesUsed() - preAdjustedBytes); return b; } @@ -278,7 +266,7 @@ public final LongVector newLongArrayVector(long[] values, int positionCount) { public LongVector newLongArrayVector(long[] values, int positionCount, long preAdjustedBytes) { var b = new LongArrayVector(values, positionCount, this); - adjustBreaker(b.ramBytesUsed() - preAdjustedBytes, true); + adjustBreaker(b.ramBytesUsed() - preAdjustedBytes); return b; } @@ -288,12 +276,12 @@ public final LongBlock newConstantLongBlockWith(long value, int positions) { public LongBlock newConstantLongBlockWith(long value, int positions, long preAdjustedBytes) { var b = new ConstantLongVector(value, positions, this).asBlock(); - adjustBreaker(b.ramBytesUsed() - preAdjustedBytes, true); + adjustBreaker(b.ramBytesUsed() - preAdjustedBytes); return b; } public LongVector newConstantLongVector(long value, int positions) { - adjustBreaker(ConstantLongVector.RAM_BYTES_USED, false); + adjustBreaker(ConstantLongVector.RAM_BYTES_USED); var v = new ConstantLongVector(value, positions, this); assert v.ramBytesUsed() == ConstantLongVector.RAM_BYTES_USED; return v; @@ -310,7 +298,7 @@ public final DoubleBlock newDoubleArrayBlock(double[] values, int pc, int[] firs public DoubleBlock newDoubleArrayBlock(double[] values, int pc, int[] fvi, BitSet nulls, MvOrdering mvOrdering, long preAdjustedBytes) { var b = new DoubleArrayBlock(values, pc, fvi, nulls, mvOrdering, this); - adjustBreaker(b.ramBytesUsed() - preAdjustedBytes, true); + adjustBreaker(b.ramBytesUsed() - preAdjustedBytes); return b; } @@ -331,7 +319,7 @@ public final DoubleVector newDoubleArrayVector(double[] values, int positionCoun public DoubleVector newDoubleArrayVector(double[] values, int positionCount, long preAdjustedBytes) { var b = new DoubleArrayVector(values, positionCount, this); - adjustBreaker(b.ramBytesUsed() - preAdjustedBytes, true); + adjustBreaker(b.ramBytesUsed() - preAdjustedBytes); return b; } @@ -341,12 +329,12 @@ public final DoubleBlock newConstantDoubleBlockWith(double value, int positions) public DoubleBlock newConstantDoubleBlockWith(double value, int positions, long preAdjustedBytes) { var b = new ConstantDoubleVector(value, positions, this).asBlock(); - adjustBreaker(b.ramBytesUsed() - preAdjustedBytes, true); + adjustBreaker(b.ramBytesUsed() - preAdjustedBytes); return b; } public DoubleVector newConstantDoubleVector(double value, int positions) { - adjustBreaker(ConstantDoubleVector.RAM_BYTES_USED, false); + adjustBreaker(ConstantDoubleVector.RAM_BYTES_USED); var v = new ConstantDoubleVector(value, positions, this); assert v.ramBytesUsed() == ConstantDoubleVector.RAM_BYTES_USED; return v; @@ -358,7 +346,7 @@ public BytesRefBlock.Builder newBytesRefBlockBuilder(int estimatedSize) { public BytesRefBlock newBytesRefArrayBlock(BytesRefArray values, int pc, int[] firstValueIndexes, BitSet nulls, MvOrdering mvOrdering) { var b = new BytesRefArrayBlock(values, pc, firstValueIndexes, nulls, mvOrdering, this); - adjustBreaker(b.ramBytesUsed() - values.bigArraysRamBytesUsed(), true); + adjustBreaker(b.ramBytesUsed() - values.bigArraysRamBytesUsed()); return b; } @@ -368,19 +356,19 @@ public BytesRefVector.Builder newBytesRefVectorBuilder(int estimatedSize) { public BytesRefVector newBytesRefArrayVector(BytesRefArray values, int positionCount) { var b = new BytesRefArrayVector(values, positionCount, this); - adjustBreaker(b.ramBytesUsed() - values.bigArraysRamBytesUsed(), true); + adjustBreaker(b.ramBytesUsed() - values.bigArraysRamBytesUsed()); return b; } public BytesRefBlock newConstantBytesRefBlockWith(BytesRef value, int positions) { var b = new ConstantBytesRefVector(value, positions, this).asBlock(); - adjustBreaker(b.ramBytesUsed(), true); + adjustBreaker(b.ramBytesUsed()); return b; } public BytesRefVector newConstantBytesRefVector(BytesRef value, int positions) { long preadjusted = ConstantBytesRefVector.ramBytesUsed(value); - adjustBreaker(preadjusted, false); + adjustBreaker(preadjusted); var v = new ConstantBytesRefVector(value, positions, this); assert v.ramBytesUsed() == preadjusted; return v; @@ -388,7 +376,14 @@ public BytesRefVector newConstantBytesRefVector(BytesRef value, int positions) { public Block newConstantNullBlock(int positions) { var b = new ConstantNullBlock(positions, this); - adjustBreaker(b.ramBytesUsed(), true); + adjustBreaker(b.ramBytesUsed()); return b; } + + /** + * Returns the maximum number of bytes that a Block should be backed by a primitive array before switching to using BigArrays. + */ + public long maxPrimitiveArrayBytes() { + return maxPrimitiveArrayBytes; + } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockRamUsageEstimator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockRamUsageEstimator.java index bdc4dbef15bd2..93fd02c3cd879 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockRamUsageEstimator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockRamUsageEstimator.java @@ -23,6 +23,12 @@ public static long sizeOf(@Nullable int[] arr) { /** Returns the size in bytes used by the bitset. Otherwise, returns 0 if null. Not exact, but good enough */ public static long sizeOfBitSet(@Nullable BitSet bitset) { - return bitset == null ? 0 : BITSET_BASE_RAM_USAGE + (bitset.size() / Byte.SIZE); + return bitset == null ? 0 : sizeOfBitSet(bitset.size()); + } + + public static long sizeOfBitSet(long size) { + // BitSet is normally made up of words, represented by longs. So we need to divide and round up. + long wordCount = (size + Long.SIZE - 1) / Long.SIZE; + return BITSET_BASE_RAM_USAGE + wordCount * Long.BYTES; } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockUtils.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockUtils.java index 405dd088bf3a5..03c1ff05ae99e 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockUtils.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockUtils.java @@ -216,7 +216,7 @@ public static void appendValue(Block.Builder builder, Object val, ElementType ty public static Block constantBlock(BlockFactory blockFactory, Object val, int size) { if (val == null) { - return Block.constantNullBlock(size); + return blockFactory.newConstantNullBlock(size); } return constantBlock(blockFactory, fromJava(val.getClass()), val, size); } @@ -224,12 +224,12 @@ public static Block constantBlock(BlockFactory blockFactory, Object val, int siz // TODO: allow null values private static Block constantBlock(BlockFactory blockFactory, ElementType type, Object val, int size) { return switch (type) { - case NULL -> Block.constantNullBlock(size); - case LONG -> LongBlock.newConstantBlockWith((long) val, size, blockFactory); - case INT -> IntBlock.newConstantBlockWith((int) val, size, blockFactory); - case BYTES_REF -> BytesRefBlock.newConstantBlockWith(toBytesRef(val), size, blockFactory); - case DOUBLE -> DoubleBlock.newConstantBlockWith((double) val, size, blockFactory); - case BOOLEAN -> BooleanBlock.newConstantBlockWith((boolean) val, size, blockFactory); + case NULL -> blockFactory.newConstantNullBlock(size); + case LONG -> blockFactory.newConstantLongBlockWith((long) val, size); + case INT -> blockFactory.newConstantIntBlockWith((int) val, size); + case BYTES_REF -> blockFactory.newConstantBytesRefBlockWith(toBytesRef(val), size); + case DOUBLE -> blockFactory.newConstantDoubleBlockWith((double) val, size); + case BOOLEAN -> blockFactory.newConstantBooleanBlockWith((boolean) val, size); default -> throw new UnsupportedOperationException("unsupported element type [" + type + "]"); }; } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java index 29e39f43cddc2..4cf8a688bd85a 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java @@ -19,15 +19,10 @@ /** * Block implementation representing a constant null value. */ -public final class ConstantNullBlock extends AbstractBlock implements BooleanBlock, IntBlock, LongBlock, DoubleBlock, BytesRefBlock { +final class ConstantNullBlock extends AbstractBlock implements BooleanBlock, IntBlock, LongBlock, DoubleBlock, BytesRefBlock { private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(ConstantNullBlock.class); - // Eventually, this should use the GLOBAL breaking instance - ConstantNullBlock(int positionCount) { - this(positionCount, BlockFactory.getNonBreakingInstance()); - } - ConstantNullBlock(int positionCount, BlockFactory blockFactory) { super(positionCount, blockFactory); } @@ -83,8 +78,9 @@ public String getWriteableName() { return "ConstantNullBlock"; } - static ConstantNullBlock of(StreamInput in) throws IOException { - return new ConstantNullBlock(in.readVInt()); + static Block of(StreamInput in) throws IOException { + BlockFactory blockFactory = ((BlockStreamInput) in).blockFactory(); + return blockFactory.newConstantNullBlock(in.readVInt()); } @Override @@ -128,7 +124,7 @@ public String toString() { @Override public void closeInternal() { - blockFactory().adjustBreaker(-ramBytesUsed(), true); + blockFactory().adjustBreaker(-ramBytesUsed()); } static class Builder implements Block.Builder { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocBlock.java index d45314f5c8a78..8c75c8216c59e 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocBlock.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocBlock.java @@ -7,7 +7,6 @@ package org.elasticsearch.compute.data; -import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Releasables; @@ -18,8 +17,6 @@ */ public class DocBlock extends AbstractVectorBlock implements Block { - private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(DocBlock.class); - private final DocVector vector; DocBlock(DocVector vector) { @@ -67,12 +64,7 @@ public boolean equals(Object obj) { @Override public long ramBytesUsed() { - return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(vector); - } - - @Override - public boolean isReleased() { - return super.isReleased() || vector.isReleased(); + return vector.ramBytesUsed(); } @Override @@ -84,8 +76,8 @@ public void closeInternal() { /** * A builder the for {@link DocBlock}. */ - public static Builder newBlockBuilder(int estimatedSize, BlockFactory blockFactory) { - return new Builder(estimatedSize, blockFactory); + public static Builder newBlockBuilder(BlockFactory blockFactory, int estimatedSize) { + return new Builder(blockFactory, estimatedSize); } public static class Builder implements Block.Builder { @@ -93,10 +85,22 @@ public static class Builder implements Block.Builder { private final IntVector.Builder segments; private final IntVector.Builder docs; - private Builder(int estimatedSize, BlockFactory blockFactory) { - shards = IntVector.newVectorBuilder(estimatedSize, blockFactory); - segments = IntVector.newVectorBuilder(estimatedSize, blockFactory); - docs = IntVector.newVectorBuilder(estimatedSize, blockFactory); + private Builder(BlockFactory blockFactory, int estimatedSize) { + IntVector.Builder shards = null; + IntVector.Builder segments = null; + IntVector.Builder docs = null; + try { + shards = blockFactory.newIntVectorBuilder(estimatedSize); + segments = blockFactory.newIntVectorBuilder(estimatedSize); + docs = blockFactory.newIntVectorBuilder(estimatedSize); + } finally { + if (docs == null) { + Releasables.closeExpectNoException(shards, segments, docs); + } + } + this.shards = shards; + this.segments = segments; + this.docs = docs; } public Builder appendShard(int shard) { @@ -159,7 +163,21 @@ public Block.Builder mvOrdering(MvOrdering mvOrdering) { @Override public DocBlock build() { // Pass null for singleSegmentNonDecreasing so we calculate it when we first need it. - return new DocVector(shards.build(), segments.build(), docs.build(), null).asBlock(); + IntVector shards = null; + IntVector segments = null; + IntVector docs = null; + DocVector result = null; + try { + shards = this.shards.build(); + segments = this.segments.build(); + docs = this.docs.build(); + result = new DocVector(shards, segments, docs, null); + return result.asBlock(); + } finally { + if (result == null) { + Releasables.closeExpectNoException(shards, segments, docs); + } + } } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocVector.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocVector.java index 3097dc73fb814..9893ea1826945 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocVector.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocVector.java @@ -46,10 +46,8 @@ public final class DocVector extends AbstractVector implements Vector { */ private int[] shardSegmentDocMapBackwards; - final DocBlock block; - public DocVector(IntVector shards, IntVector segments, IntVector docs, Boolean singleSegmentNonDecreasing) { - super(shards.getPositionCount(), null); + super(shards.getPositionCount(), shards.blockFactory()); this.shards = shards; this.segments = segments; this.docs = docs; @@ -64,7 +62,7 @@ public DocVector(IntVector shards, IntVector segments, IntVector docs, Boolean s "invalid position count [" + shards.getPositionCount() + " != " + docs.getPositionCount() + "]" ); } - block = new DocBlock(this); + blockFactory().adjustBreaker(BASE_RAM_BYTES_USED); } public IntVector shards() { @@ -130,53 +128,85 @@ private void buildShardSegmentDocMapIfMissing() { return; } - int[] forwards = shardSegmentDocMapForwards = new int[shards.getPositionCount()]; - for (int p = 0; p < forwards.length; p++) { - forwards[p] = p; - } - new IntroSorter() { - int pivot; - - @Override - protected void setPivot(int i) { - pivot = forwards[i]; + boolean success = false; + long estimatedSize = sizeOfSegmentDocMap(); + blockFactory().adjustBreaker(estimatedSize); + int[] forwards = null; + int[] backwards = null; + try { + int[] finalForwards = forwards = new int[shards.getPositionCount()]; + for (int p = 0; p < forwards.length; p++) { + forwards[p] = p; } + new IntroSorter() { + int pivot; - @Override - protected int comparePivot(int j) { - int cmp = Integer.compare(shards.getInt(pivot), shards.getInt(forwards[j])); - if (cmp != 0) { - return cmp; + @Override + protected void setPivot(int i) { + pivot = finalForwards[i]; } - cmp = Integer.compare(segments.getInt(pivot), segments.getInt(forwards[j])); - if (cmp != 0) { - return cmp; + + @Override + protected int comparePivot(int j) { + int cmp = Integer.compare(shards.getInt(pivot), shards.getInt(finalForwards[j])); + if (cmp != 0) { + return cmp; + } + cmp = Integer.compare(segments.getInt(pivot), segments.getInt(finalForwards[j])); + if (cmp != 0) { + return cmp; + } + return Integer.compare(docs.getInt(pivot), docs.getInt(finalForwards[j])); } - return Integer.compare(docs.getInt(pivot), docs.getInt(forwards[j])); - } - @Override - protected void swap(int i, int j) { - int tmp = forwards[i]; - forwards[i] = forwards[j]; - forwards[j] = tmp; - } - }.sort(0, forwards.length); + @Override + protected void swap(int i, int j) { + int tmp = finalForwards[i]; + finalForwards[i] = finalForwards[j]; + finalForwards[j] = tmp; + } + }.sort(0, forwards.length); - int[] backwards = shardSegmentDocMapBackwards = new int[forwards.length]; - for (int p = 0; p < forwards.length; p++) { - backwards[forwards[p]] = p; + backwards = new int[forwards.length]; + for (int p = 0; p < forwards.length; p++) { + backwards[forwards[p]] = p; + } + success = true; + shardSegmentDocMapForwards = forwards; + shardSegmentDocMapBackwards = backwards; + } finally { + if (success == false) { + blockFactory().adjustBreaker(-estimatedSize); + } } } + private long sizeOfSegmentDocMap() { + return 2 * (((long) RamUsageEstimator.NUM_BYTES_ARRAY_HEADER) + ((long) Integer.BYTES) * shards.getPositionCount()); + } + @Override public DocBlock asBlock() { - return block; + return new DocBlock(this); } @Override public DocVector filter(int... positions) { - return new DocVector(shards.filter(positions), segments.filter(positions), docs.filter(positions), null); + IntVector filteredShards = null; + IntVector filteredSegments = null; + IntVector filteredDocs = null; + DocVector result = null; + try { + filteredShards = shards.filter(positions); + filteredSegments = segments.filter(positions); + filteredDocs = docs.filter(positions); + result = new DocVector(filteredShards, filteredSegments, filteredDocs, null); + return result; + } finally { + if (result == null) { + Releasables.closeExpectNoException(filteredShards, filteredSegments, filteredDocs); + } + } } @Override @@ -225,14 +255,19 @@ public long ramBytesUsed() { @Override public void allowPassingToDifferentDriver() { + super.allowPassingToDifferentDriver(); shards.allowPassingToDifferentDriver(); segments.allowPassingToDifferentDriver(); docs.allowPassingToDifferentDriver(); } @Override - public void close() { - released = true; - Releasables.closeExpectNoException(shards.asBlock(), segments.asBlock(), docs.asBlock()); // Ugh! we always close blocks + public void closeInternal() { + Releasables.closeExpectNoException( + () -> blockFactory().adjustBreaker(-BASE_RAM_BYTES_USED - (shardSegmentDocMapForwards == null ? 0 : sizeOfSegmentDocMap())), + shards, + segments, + docs + ); } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ElementType.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ElementType.java index 324b6ee963596..2f7d65c8719e6 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ElementType.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ElementType.java @@ -13,16 +13,16 @@ * The type of elements in {@link Block} and {@link Vector} */ public enum ElementType { - BOOLEAN(BooleanBlock::newBlockBuilder), - INT(IntBlock::newBlockBuilder), - LONG(LongBlock::newBlockBuilder), - DOUBLE(DoubleBlock::newBlockBuilder), + BOOLEAN(BlockFactory::newBooleanBlockBuilder), + INT(BlockFactory::newIntBlockBuilder), + LONG(BlockFactory::newLongBlockBuilder), + DOUBLE(BlockFactory::newDoubleBlockBuilder), /** * Blocks containing only null values. */ - NULL((estimatedSize, blockFactory) -> new ConstantNullBlock.Builder(blockFactory)), + NULL((blockFactory, estimatedSize) -> new ConstantNullBlock.Builder(blockFactory)), - BYTES_REF(BytesRefBlock::newBlockBuilder), + BYTES_REF(BlockFactory::newBytesRefBlockBuilder), /** * Blocks that reference individual lucene documents. @@ -32,10 +32,10 @@ public enum ElementType { /** * Intermediate blocks which don't support retrieving elements. */ - UNKNOWN((estimatedSize, blockFactory) -> { throw new UnsupportedOperationException("can't build null blocks"); }); + UNKNOWN((blockFactory, estimatedSize) -> { throw new UnsupportedOperationException("can't build null blocks"); }); - interface BuilderSupplier { - Block.Builder newBlockBuilder(int estimatedSize, BlockFactory blockFactory); + private interface BuilderSupplier { + Block.Builder newBlockBuilder(BlockFactory blockFactory, int estimatedSize); } private final BuilderSupplier builder; @@ -44,20 +44,11 @@ interface BuilderSupplier { this.builder = builder; } - /** - * Create a new {@link Block.Builder} for blocks of this type. - * @deprecated use {@link #newBlockBuilder(int, BlockFactory)} - */ - @Deprecated - public Block.Builder newBlockBuilder(int estimatedSize) { - return builder.newBlockBuilder(estimatedSize, BlockFactory.getNonBreakingInstance()); - } - /** * Create a new {@link Block.Builder} for blocks of this type. */ public Block.Builder newBlockBuilder(int estimatedSize, BlockFactory blockFactory) { - return builder.newBlockBuilder(estimatedSize, blockFactory); + return builder.newBlockBuilder(blockFactory, estimatedSize); } public static ElementType fromJava(Class type) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/SingletonOrdinalsBuilder.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/SingletonOrdinalsBuilder.java index 703d882b91029..fb83432ba0565 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/SingletonOrdinalsBuilder.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/SingletonOrdinalsBuilder.java @@ -27,7 +27,7 @@ public class SingletonOrdinalsBuilder implements BlockLoader.SingletonOrdinalsBu public SingletonOrdinalsBuilder(BlockFactory blockFactory, SortedDocValues docValues, int count) { this.blockFactory = blockFactory; this.docValues = docValues; - blockFactory.adjustBreaker(ordsSize(count), false); + blockFactory.adjustBreaker(ordsSize(count)); this.ords = new int[count]; } @@ -62,7 +62,7 @@ public BytesRefBlock build() { try { long breakerSize = ordsSize(ords.length); // Increment breaker for sorted ords. - blockFactory.adjustBreaker(breakerSize, false); + blockFactory.adjustBreaker(breakerSize); try { int[] sortedOrds = ords.clone(); Arrays.sort(sortedOrds); @@ -70,7 +70,7 @@ public BytesRefBlock build() { try (BreakingBytesRefBuilder copies = new BreakingBytesRefBuilder(blockFactory.breaker(), "ords")) { long offsetsAndLength = RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (uniqueCount + 1) * Integer.BYTES; - blockFactory.adjustBreaker(offsetsAndLength, false); + blockFactory.adjustBreaker(offsetsAndLength); breakerSize += offsetsAndLength; int[] offsets = new int[uniqueCount + 1]; for (int o = 0; o < uniqueCount; o++) { @@ -102,7 +102,7 @@ public BytesRefBlock build() { } } } finally { - blockFactory.adjustBreaker(-breakerSize, false); + blockFactory.adjustBreaker(-breakerSize); } } catch (IOException e) { throw new UncheckedIOException("error resolving ordinals", e); @@ -111,7 +111,7 @@ public BytesRefBlock build() { @Override public void close() { - blockFactory.adjustBreaker(-ordsSize(ords.length), false); + blockFactory.adjustBreaker(-ordsSize(ords.length)); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Vector.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Vector.java index 0ca06498f7129..fc09f636ac700 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Vector.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Vector.java @@ -8,15 +8,16 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.Accountable; +import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.Releasable; /** * A dense Vector of single values. */ -public interface Vector extends Accountable, Releasable { +public interface Vector extends Accountable, RefCounted, Releasable { /** - * {@return Returns a Block view over this vector.} + * {@return Returns a new Block containing this vector.} */ Block asBlock(); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st index 03397e1a2e5ad..e24d355bf2c24 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st @@ -15,32 +15,44 @@ import org.elasticsearch.core.Releasables; $else$ import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.core.Releasables; -import java.util.Arrays; $endif$ import java.util.BitSet; /** - * Block implementation that stores an array of $type$. + * Block implementation that stores values in a {@link $Type$ArrayVector}. +$if(BytesRef)$ + * Does not take ownership of the given {@link BytesRefArray} and does not adjust circuit breakers to account for it. +$endif$ * This class is generated. Do not edit it. */ -public final class $Type$ArrayBlock extends AbstractArrayBlock implements $Type$Block { +final class $Type$ArrayBlock extends AbstractArrayBlock implements $Type$Block { private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance($Type$ArrayBlock.class); -$if(BytesRef)$ - private final BytesRefArray values; + private final $Type$ArrayVector vector; -$else$ - private final $type$[] values; -$endif$ - - public $Type$ArrayBlock($if(BytesRef)$BytesRefArray$else$$type$[]$endif$ values, int positionCount, int[] firstValueIndexes, BitSet nulls, MvOrdering mvOrdering) { - this(values, positionCount, firstValueIndexes, nulls, mvOrdering, BlockFactory.getNonBreakingInstance()); + $Type$ArrayBlock( + $if(BytesRef)$BytesRefArray$else$$type$[]$endif$ values, + int positionCount, + int[] firstValueIndexes, + BitSet nulls, + MvOrdering mvOrdering, + BlockFactory blockFactory + ) { + this( + new $Type$ArrayVector(values, firstValueIndexes == null ? positionCount : firstValueIndexes[positionCount], blockFactory), + positionCount, + firstValueIndexes, + nulls, + mvOrdering, + blockFactory + ); } - public $Type$ArrayBlock( - $if(BytesRef)$BytesRefArray$else$$type$[]$endif$ values, + private $Type$ArrayBlock( + $Type$ArrayVector vector, int positionCount, int[] firstValueIndexes, BitSet nulls, @@ -48,7 +60,10 @@ $endif$ BlockFactory blockFactory ) { super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); - this.values = values; + this.vector = vector; + assert firstValueIndexes == null + ? vector.getPositionCount() == getPositionCount() + : firstValueIndexes[getPositionCount()] == vector.getPositionCount(); } @Override @@ -59,15 +74,16 @@ $endif$ @Override $if(BytesRef)$ public BytesRef getBytesRef(int valueIndex, BytesRef dest) { - return values.get(valueIndex, dest); + return vector.getBytesRef(valueIndex, dest); $else$ public $type$ get$Type$(int valueIndex) { - return values[valueIndex]; + return vector.get$Type$(valueIndex); $endif$ } @Override public $Type$Block filter(int... positions) { + // TODO use reference counting to share the vector $if(BytesRef)$ final BytesRef scratch = new BytesRef(); $endif$ @@ -104,38 +120,37 @@ $endif$ incRef(); return this; } - // TODO use reference counting to share the values -$if(BytesRef)$ - final BytesRef scratch = new BytesRef(); -$endif$ - try (var builder = blockFactory().new$Type$BlockBuilder(firstValueIndexes[getPositionCount()])) { - for (int pos = 0; pos < getPositionCount(); pos++) { - if (isNull(pos)) { - builder.appendNull(); - continue; - } - int first = getFirstValueIndex(pos); - int end = first + getValueCount(pos); - for (int i = first; i < end; i++) { -$if(BytesRef)$ - builder.append$Type$(get$Type$(i, scratch)); -$else$ - builder.append$Type$(get$Type$(i)); -$endif$ - } - } - return builder.mvOrdering(MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING).build(); + if (nullsMask == null) { + vector.incRef(); + return vector.asBlock(); } + + // The following line is correct because positions with multi-values are never null. + int expandedPositionCount = vector.getPositionCount(); + long bitSetRamUsedEstimate = Math.max(nullsMask.size(), BlockRamUsageEstimator.sizeOfBitSet(expandedPositionCount)); + blockFactory().adjustBreaker(bitSetRamUsedEstimate); + + $Type$ArrayBlock expanded = new $Type$ArrayBlock( + vector, + expandedPositionCount, + null, + shiftNullsToExpandedPositions(), + MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, + blockFactory() + ); + blockFactory().adjustBreaker(expanded.ramBytesUsedOnlyBlock() - bitSetRamUsedEstimate); + // We need to incRef after adjusting any breakers, otherwise we might leak the vector if the breaker trips. + vector.incRef(); + return expanded; } - public static long ramBytesEstimated($if(BytesRef)$BytesRefArray$else$$type$[]$endif$ values, int[] firstValueIndexes, BitSet nullsMask) { - return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(values) + BlockRamUsageEstimator.sizeOf(firstValueIndexes) - + BlockRamUsageEstimator.sizeOfBitSet(nullsMask); + private long ramBytesUsedOnlyBlock() { + return BASE_RAM_BYTES_USED + BlockRamUsageEstimator.sizeOf(firstValueIndexes) + BlockRamUsageEstimator.sizeOfBitSet(nullsMask); } @Override public long ramBytesUsed() { - return ramBytesEstimated(values, firstValueIndexes, nullsMask); + return ramBytesUsedOnlyBlock() + vector.ramBytesUsed(); } @Override @@ -158,23 +173,20 @@ $endif$ + getPositionCount() + ", mvOrdering=" + mvOrdering() -$if(BytesRef)$ - + ", values=" - + values.size() -$else$ - + ", values=" - + Arrays.toString(values) -$endif$ + + ", vector=" + + vector + ']'; } + @Override + public void allowPassingToDifferentDriver() { + super.allowPassingToDifferentDriver(); + vector.allowPassingToDifferentDriver(); + } + @Override public void closeInternal() { - $if(BytesRef)$ - blockFactory().adjustBreaker(-ramBytesUsed() + values.bigArraysRamBytesUsed(), true); - Releasables.closeExpectNoException(values); - $else$ - blockFactory().adjustBreaker(-ramBytesUsed(), true); - $endif$ + blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock()); + Releasables.closeExpectNoException(vector); } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayVector.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayVector.java.st index 4dd903945d04f..8815a3e463a65 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayVector.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayVector.java.st @@ -21,9 +21,12 @@ $endif$ /** * Vector implementation that stores an array of $type$ values. +$if(BytesRef)$ + * Does not take ownership of the given {@link BytesRefArray} and does not adjust circuit breakers to account for it. +$endif$ * This class is generated. Do not edit it. */ -public final class $Type$ArrayVector extends AbstractVector implements $Type$Vector { +final class $Type$ArrayVector extends AbstractVector implements $Type$Vector { static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance($Type$ArrayVector.class); @@ -34,21 +37,14 @@ $else$ private final $type$[] values; $endif$ - private final $Type$Block block; - - public $Type$ArrayVector($if(BytesRef)$BytesRefArray$else$$type$[]$endif$ values, int positionCount) { - this(values, positionCount, BlockFactory.getNonBreakingInstance()); - } - - public $Type$ArrayVector($if(BytesRef)$BytesRefArray$else$$type$[]$endif$ values, int positionCount, BlockFactory blockFactory) { + $Type$ArrayVector($if(BytesRef)$BytesRefArray$else$$type$[]$endif$ values, int positionCount, BlockFactory blockFactory) { super(positionCount, blockFactory); this.values = values; - this.block = new $Type$VectorBlock(this); } @Override public $Type$Block asBlock() { - return block; + return new $Type$VectorBlock(this); } $if(BytesRef)$ @@ -124,12 +120,10 @@ $endif$ $if(BytesRef)$ @Override - public void close() { - if (released) { - throw new IllegalStateException("can't release already released vector [" + this + "]"); - } - released = true; - blockFactory().adjustBreaker(-ramBytesUsed() + values.bigArraysRamBytesUsed(), true); + public void closeInternal() { + // The circuit breaker that tracks the values {@link BytesRefArray} is adjusted outside + // of this class. + blockFactory().adjustBreaker(-ramBytesUsed() + values.bigArraysRamBytesUsed()); Releasables.closeExpectNoException(values); } $endif$ diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayBlock.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayBlock.java.st new file mode 100644 index 0000000000000..71d6005a9fc17 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayBlock.java.st @@ -0,0 +1,174 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data; + +import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.util.$Array$; +import org.elasticsearch.core.Releasables; + +import java.util.BitSet; + +/** + * Block implementation that stores values in a {@link $Type$BigArrayVector}. Does not take ownership of the given + * {@link $Array$} and does not adjust circuit breakers to account for it. + * This class is generated. Do not edit it. + */ +public final class $Type$BigArrayBlock extends AbstractArrayBlock implements $Type$Block { + + private static final long BASE_RAM_BYTES_USED = 0; // TODO: fix this + private final $Type$BigArrayVector vector; + + public $Type$BigArrayBlock( + $Array$ values, + int positionCount, + int[] firstValueIndexes, + BitSet nulls, + MvOrdering mvOrdering, + BlockFactory blockFactory + ) { + this( + new $Type$BigArrayVector(values, firstValueIndexes == null ? positionCount : firstValueIndexes[positionCount], blockFactory), + positionCount, + firstValueIndexes, + nulls, + mvOrdering, + blockFactory + ); + } + + private $Type$BigArrayBlock( + $Type$BigArrayVector vector, + int positionCount, + int[] firstValueIndexes, + BitSet nulls, + MvOrdering mvOrdering, + BlockFactory blockFactory + ) { + super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); + this.vector = vector; + assert firstValueIndexes == null + ? vector.getPositionCount() == getPositionCount() + : firstValueIndexes[getPositionCount()] == vector.getPositionCount(); + } + + @Override + public $Type$Vector asVector() { + return null; + } + + @Override + public $type$ get$Type$(int valueIndex) { + return vector.get$Type$(valueIndex); + } + + @Override + public $Type$Block filter(int... positions) { + // TODO use reference counting to share the vector + try (var builder = blockFactory().new$Type$BlockBuilder(positions.length)) { + for (int pos : positions) { + if (isNull(pos)) { + builder.appendNull(); + continue; + } + int valueCount = getValueCount(pos); + int first = getFirstValueIndex(pos); + if (valueCount == 1) { + builder.append$Type$(get$Type$(getFirstValueIndex(pos)$if(BytesRef)$, scratch$endif$)); + } else { + builder.beginPositionEntry(); + for (int c = 0; c < valueCount; c++) { + builder.append$Type$(get$Type$(first + c$if(BytesRef)$, scratch$endif$)); + } + builder.endPositionEntry(); + } + } + return builder.mvOrdering(mvOrdering()).build(); + } + } + + @Override + public ElementType elementType() { + return ElementType.$TYPE$; + } + + @Override + public $Type$Block expand() { + if (firstValueIndexes == null) { + incRef(); + return this; + } + if (nullsMask == null) { + vector.incRef(); + return vector.asBlock(); + } + + // The following line is correct because positions with multi-values are never null. + int expandedPositionCount = vector.getPositionCount(); + long bitSetRamUsedEstimate = Math.max(nullsMask.size(), BlockRamUsageEstimator.sizeOfBitSet(expandedPositionCount)); + blockFactory().adjustBreaker(bitSetRamUsedEstimate); + + $Type$BigArrayBlock expanded = new $Type$BigArrayBlock( + vector, + expandedPositionCount, + null, + shiftNullsToExpandedPositions(), + MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, + blockFactory() + ); + blockFactory().adjustBreaker(expanded.ramBytesUsedOnlyBlock() - bitSetRamUsedEstimate); + // We need to incRef after adjusting any breakers, otherwise we might leak the vector if the breaker trips. + vector.incRef(); + return expanded; + } + + private long ramBytesUsedOnlyBlock() { + return BASE_RAM_BYTES_USED + BlockRamUsageEstimator.sizeOf(firstValueIndexes) + BlockRamUsageEstimator.sizeOfBitSet(nullsMask); + } + + @Override + public long ramBytesUsed() { + return ramBytesUsedOnlyBlock() + RamUsageEstimator.sizeOf(vector); + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof $Type$Block that) { + return $Type$Block.equals(this, that); + } + return false; + } + + @Override + public int hashCode() { + return $Type$Block.hash(this); + } + + @Override + public String toString() { + return getClass().getSimpleName() + + "[positions=" + + getPositionCount() + + ", mvOrdering=" + + mvOrdering() + + ", ramBytesUsed=" + + vector.ramBytesUsed() + + ']'; + } + + @Override + public void allowPassingToDifferentDriver() { + super.allowPassingToDifferentDriver(); + vector.allowPassingToDifferentDriver(); + } + + @Override + public void closeInternal() { + blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock()); + Releasables.closeExpectNoException(vector); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayVector.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayVector.java.st index 6a231d9ff6bf3..addca35643dd6 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayVector.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayVector.java.st @@ -8,34 +8,28 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.RamUsageEstimator; -import org.elasticsearch.common.util.$if(boolean)$Bit$else$$Type$$endif$Array; +import org.elasticsearch.common.util.$Array$; import org.elasticsearch.core.Releasable; /** - * Vector implementation that defers to an enclosed $Type$Array. + * Vector implementation that defers to an enclosed {@link $if(boolean)$Bit$else$$Type$$endif$Array}. + * Does not take ownership of the array and does not adjust circuit breakers to account for it. * This class is generated. Do not edit it. */ public final class $Type$BigArrayVector extends AbstractVector implements $Type$Vector, Releasable { - private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance($Type$BigArrayVector.class); + private static final long BASE_RAM_BYTES_USED = 0; // FIXME - private final $if(boolean)$Bit$else$$Type$$endif$Array values; + private final $Array$ values; - private final $Type$Block block; - - public $Type$BigArrayVector($if(boolean)$Bit$else$$Type$$endif$Array values, int positionCount) { - this(values, positionCount, BlockFactory.getNonBreakingInstance()); - } - - public $Type$BigArrayVector($if(boolean)$Bit$else$$Type$$endif$Array values, int positionCount, BlockFactory blockFactory) { + public $Type$BigArrayVector($Array$ values, int positionCount, BlockFactory blockFactory) { super(positionCount, blockFactory); this.values = values; - this.block = new $Type$VectorBlock(this); } @Override public $Type$Block asBlock() { - return block; + return new $Type$VectorBlock(this); } @Override @@ -61,28 +55,29 @@ public final class $Type$BigArrayVector extends AbstractVector implements $Type$ @Override public $Type$Vector filter(int... positions) { var blockFactory = blockFactory(); - $if(boolean)$ + $if(boolean)$ final BitArray filtered = new BitArray(positions.length, blockFactory.bigArrays()); + $else$ + final $Array$ filtered = blockFactory.bigArrays().new$Array$(positions.length); + $endif$ + $if(boolean)$ for (int i = 0; i < positions.length; i++) { if (values.get(positions[i])) { filtered.set(i); } } - $else$ - final $Type$Array filtered = blockFactory.bigArrays().new$Type$Array(positions.length, true); + $else$ for (int i = 0; i < positions.length; i++) { filtered.set(i, values.get(positions[i])); } - $endif$ + $endif$ return new $Type$BigArrayVector(filtered, positions.length, blockFactory); } @Override - public void close() { - if (released) { - throw new IllegalStateException("can't release already released vector [" + this + "]"); - } - released = true; + public void closeInternal() { + // The circuit breaker that tracks the values {@link $if(boolean)$Bit$else$$Type$$endif$Array} is adjusted outside + // of this class. values.close(); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st index 2ff537016459c..c5fd7e8302776 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st @@ -9,7 +9,6 @@ package org.elasticsearch.compute.data; $if(BytesRef)$ import org.apache.lucene.util.BytesRef; -$else$ $endif$ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; @@ -22,7 +21,7 @@ import java.io.IOException; * Block that stores $type$ values. * This class is generated. Do not edit it. */ -public sealed interface $Type$Block extends Block permits $Type$ArrayBlock, $Type$VectorBlock, ConstantNullBlock { +public sealed interface $Type$Block extends Block permits $Type$ArrayBlock, $Type$VectorBlock, ConstantNullBlock$if(BytesRef)$$else$, $Type$BigArrayBlock$endif$ { $if(BytesRef)$ BytesRef NULL_VALUE = new BytesRef(); @@ -104,11 +103,11 @@ $endif$ final int valueCount = getValueCount(pos); out.writeVInt(valueCount); for (int valueIndex = 0; valueIndex < valueCount; valueIndex++) { - $if(BytesRef)$ +$if(BytesRef)$ out.write$Type$(get$Type$(getFirstValueIndex(pos) + valueIndex, new BytesRef())); - $else$ +$else$ out.write$Type$(get$Type$(getFirstValueIndex(pos) + valueIndex)); - $endif$ +$endif$ } } } @@ -203,44 +202,6 @@ $endif$ return result; } - /** - * Returns a builder using the {@link BlockFactory#getNonBreakingInstance non-breaking block factory}. - * @deprecated use {@link BlockFactory#new$Type$BlockBuilder} - */ - // Eventually, we want to remove this entirely, always passing an explicit BlockFactory - @Deprecated - static Builder newBlockBuilder(int estimatedSize) { - return newBlockBuilder(estimatedSize, BlockFactory.getNonBreakingInstance()); - } - - /** - * Returns a builder. - * @deprecated use {@link BlockFactory#new$Type$BlockBuilder} - */ - @Deprecated - static Builder newBlockBuilder(int estimatedSize, BlockFactory blockFactory) { - return blockFactory.new$Type$BlockBuilder(estimatedSize); - } - - /** - * Returns a constant block built by the {@link BlockFactory#getNonBreakingInstance non-breaking block factory}. - * @deprecated use {@link BlockFactory#newConstant$Type$BlockWith} - */ - // Eventually, we want to remove this entirely, always passing an explicit BlockFactory - @Deprecated - static $Type$Block newConstantBlockWith($type$ value, int positions) { - return newConstantBlockWith(value, positions, BlockFactory.getNonBreakingInstance()); - } - - /** - * Returns a constant block. - * @deprecated use {@link BlockFactory#newConstant$Type$BlockWith} - */ - @Deprecated - static $Type$Block newConstantBlockWith($type$ value, int positions, BlockFactory blockFactory) { - return blockFactory.newConstant$Type$BlockWith(value, positions); - } - /** * Builder for {@link $Type$Block} */ diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BlockBuilder.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BlockBuilder.java.st index 61527f166cfa9..f4ee6c145f3ed 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BlockBuilder.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BlockBuilder.java.st @@ -17,6 +17,7 @@ import org.elasticsearch.core.Releasables; $else$ import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.common.util.$Array$; import java.util.Arrays; $endif$ @@ -246,60 +247,107 @@ $endif$ return this; } +$if(BytesRef)$ + private $Type$Block buildFromBytesArray() { + assert estimatedBytes == 0 || firstValueIndexes != null; + final $Type$Block theBlock; + if (hasNonNullValue && positionCount == 1 && valueCount == 1) { + theBlock = new ConstantBytesRefVector(BytesRef.deepCopyOf(values.get(0, new BytesRef())), 1, blockFactory).asBlock(); + /* + * Update the breaker with the actual bytes used. + * We pass false below even though we've used the bytes. That's weird, + * but if we break here we will throw away the used memory, letting + * it be deallocated. The exception will bubble up and the builder will + * still technically be open, meaning the calling code should close it + * which will return all used memory to the breaker. + */ + blockFactory.adjustBreaker(theBlock.ramBytesUsed() - estimatedBytes); + Releasables.closeExpectNoException(values); + } else { + if (isDense() && singleValued()) { + theBlock = new $Type$ArrayVector(values, positionCount, blockFactory).asBlock(); + } else { + theBlock = new $Type$ArrayBlock(values, positionCount, firstValueIndexes, nullsMask, mvOrdering, blockFactory); + } + /* + * Update the breaker with the actual bytes used. + * We pass false below even though we've used the bytes. That's weird, + * but if we break here we will throw away the used memory, letting + * it be deallocated. The exception will bubble up and the builder will + * still technically be open, meaning the calling code should close it + * which will return all used memory to the breaker. + */ + blockFactory.adjustBreaker(theBlock.ramBytesUsed() - estimatedBytes - values.bigArraysRamBytesUsed()); + } + return theBlock; + } + +$else$ + private $Type$Block buildBigArraysBlock() { + final $Type$Block theBlock; + $if(boolean)$ + final BitArray array = new BitArray(valueCount, blockFactory.bigArrays()); + for (int i = 0; i < valueCount; i++) { + if (values[i]) { + array.set(i); + } + } + $else$ + final $Array$ array = blockFactory.bigArrays().new$Array$(valueCount, false); + for (int i = 0; i < valueCount; i++) { + array.set(i, values[i]); + } + $endif$ + if (isDense() && singleValued()) { + theBlock = new $Type$BigArrayVector(array, positionCount, blockFactory).asBlock(); + } else { + theBlock = new $Type$BigArrayBlock(array, positionCount, firstValueIndexes, nullsMask, mvOrdering, blockFactory); + } + /* + * Update the breaker with the actual bytes used. + * We pass false below even though we've used the bytes. That's weird, + * but if we break here we will throw away the used memory, letting + * it be deallocated. The exception will bubble up and the builder will + * still technically be open, meaning the calling code should close it + * which will return all used memory to the breaker. + */ + blockFactory.adjustBreaker(theBlock.ramBytesUsed() - estimatedBytes - array.ramBytesUsed()); + return theBlock; + } +$endif$ + @Override public $Type$Block build() { try { finish(); $Type$Block theBlock; $if(BytesRef)$ - assert estimatedBytes == 0 || firstValueIndexes != null; - if (hasNonNullValue && positionCount == 1 && valueCount == 1) { - theBlock = new ConstantBytesRefVector(BytesRef.deepCopyOf(values.get(0, new BytesRef())), 1, blockFactory).asBlock(); - /* - * Update the breaker with the actual bytes used. - * We pass false below even though we've used the bytes. That's weird, - * but if we break here we will throw away the used memory, letting - * it be deallocated. The exception will bubble up and the builder will - * still technically be open, meaning the calling code should close it - * which will return all used memory to the breaker. - */ - blockFactory.adjustBreaker(theBlock.ramBytesUsed() - estimatedBytes, false); - Releasables.closeExpectNoException(values); - } else { - if (isDense() && singleValued()) { - theBlock = new $Type$ArrayVector(values, positionCount, blockFactory).asBlock(); - } else { - theBlock = new $Type$ArrayBlock(values, positionCount, firstValueIndexes, nullsMask, mvOrdering, blockFactory); - } - /* - * Update the breaker with the actual bytes used. - * We pass false below even though we've used the bytes. That's weird, - * but if we break here we will throw away the used memory, letting - * it be deallocated. The exception will bubble up and the builder will - * still technically be open, meaning the calling code should close it - * which will return all used memory to the breaker. - */ - blockFactory.adjustBreaker(theBlock.ramBytesUsed() - estimatedBytes - values.bigArraysRamBytesUsed(), false); - } + theBlock = buildFromBytesArray(); values = null; $else$ if (hasNonNullValue && positionCount == 1 && valueCount == 1) { theBlock = blockFactory.newConstant$Type$BlockWith(values[0], 1, estimatedBytes); } else { - if (values.length - valueCount > 1024 || valueCount < (values.length / 2)) { - values = Arrays.copyOf(values, valueCount); - } - if (isDense() && singleValued()) { - theBlock = blockFactory.new$Type$ArrayVector(values, positionCount, estimatedBytes).asBlock(); + if (estimatedBytes > blockFactory.maxPrimitiveArrayBytes()) { + theBlock = buildBigArraysBlock(); } else { - theBlock = blockFactory.new$Type$ArrayBlock( - values, - positionCount, - firstValueIndexes, - nullsMask, - mvOrdering, - estimatedBytes - ); + if (values.length - valueCount > 1024 || valueCount < (values.length / 2)) { + adjustBreaker(valueCount * elementSize()); + values = Arrays.copyOf(values, valueCount); + adjustBreaker(-values.length * elementSize()); + } + if (isDense() && singleValued()) { + theBlock = blockFactory.new$Type$ArrayVector(values, positionCount, estimatedBytes).asBlock(); + } else { + theBlock = blockFactory.new$Type$ArrayBlock( + values, + positionCount, + firstValueIndexes, + nullsMask, + mvOrdering, + estimatedBytes + ); + } } } $endif$ diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ConstantVector.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ConstantVector.java.st index f685d38d6459b..625f014a20ffc 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ConstantVector.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ConstantVector.java.st @@ -16,7 +16,7 @@ import org.apache.lucene.util.RamUsageEstimator; * Vector implementation that stores a constant $type$ value. * This class is generated. Do not edit it. */ -public final class Constant$Type$Vector extends AbstractVector implements $Type$Vector { +final class Constant$Type$Vector extends AbstractVector implements $Type$Vector { $if(BytesRef)$ static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(ConstantBytesRefVector.class) + RamUsageEstimator @@ -27,16 +27,9 @@ $endif$ private final $type$ value; - private final $Type$Block block; - - public Constant$Type$Vector($type$ value, int positionCount) { - this(value, positionCount, BlockFactory.getNonBreakingInstance()); - } - - public Constant$Type$Vector($type$ value, int positionCount, BlockFactory blockFactory) { + Constant$Type$Vector($type$ value, int positionCount, BlockFactory blockFactory) { super(positionCount, blockFactory); this.value = value; - this.block = new $Type$VectorBlock(this); } @Override @@ -50,12 +43,12 @@ $endif$ @Override public $Type$Block asBlock() { - return block; + return new $Type$VectorBlock(this); } @Override public $Type$Vector filter(int... positions) { - return new Constant$Type$Vector(value, positions.length); + return blockFactory().newConstant$Type$Vector(value, positions.length); } @Override @@ -101,13 +94,4 @@ $endif$ public String toString() { return getClass().getSimpleName() + "[positions=" + getPositionCount() + ", value=" + value + ']'; } - - @Override - public void close() { - if (released) { - throw new IllegalStateException("can't release already released vector [" + this + "]"); - } - released = true; - blockFactory().adjustBreaker(-ramBytesUsed(), true); - } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Vector.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Vector.java.st index 6ec41ccdc6ab9..c303a8391ad18 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Vector.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Vector.java.st @@ -143,46 +143,6 @@ $endif$ } } - /** - * Returns a builder using the {@link BlockFactory#getNonBreakingInstance nonbreaking block factory}. - * @deprecated use {@link BlockFactory#new$Type$VectorBuilder} - */ - // Eventually, we want to remove this entirely, always passing an explicit BlockFactory - @Deprecated - static Builder newVectorBuilder(int estimatedSize) { - return newVectorBuilder(estimatedSize, BlockFactory.getNonBreakingInstance()); - } - -$if(BytesRef)$ - /** - * Creates a builder that grows as needed. - * @deprecated use {@link BlockFactory#new$Type$VectorBuilder} - */ -$else$ - /** - * Creates a builder that grows as needed. Prefer {@link #newVectorFixedBuilder} - * if you know the size up front because it's faster. - * @deprecated use {@link BlockFactory#new$Type$VectorBuilder} - */ -$endif$ - @Deprecated - static Builder newVectorBuilder(int estimatedSize, BlockFactory blockFactory) { - return blockFactory.new$Type$VectorBuilder(estimatedSize); - } - -$if(BytesRef)$ -$else$ - /** - * Creates a builder that never grows. Prefer this over {@link #newVectorBuilder} - * if you know the size up front because it's faster. - * @deprecated use {@link BlockFactory#new$Type$VectorFixedBuilder} - */ - @Deprecated - static FixedBuilder newVectorFixedBuilder(int size, BlockFactory blockFactory) { - return blockFactory.new$Type$VectorFixedBuilder(size); - } -$endif$ - $if(int)$ /** Create a vector for a range of ints. */ static IntVector range(int startInclusive, int endExclusive, BlockFactory blockFactory) { @@ -197,7 +157,11 @@ $endif$ /** * A builder that grows as needed. */ +$if(BytesRef)$ sealed interface Builder extends Vector.Builder permits $Type$VectorBuilder { +$else$ + sealed interface Builder extends Vector.Builder permits $Type$VectorBuilder, FixedBuilder { +$endif$ /** * Appends a $type$ to the current entry. */ @@ -212,14 +176,12 @@ $else$ /** * A builder that never grows. */ - sealed interface FixedBuilder extends Vector.Builder permits $Type$VectorFixedBuilder { + sealed interface FixedBuilder extends Builder permits $Type$VectorFixedBuilder { /** * Appends a $type$ to the current entry. */ - FixedBuilder append$Type$($type$ value); - @Override - $Type$Vector build(); + FixedBuilder append$Type$($type$ value); } $endif$ } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st index 8772e633ff14b..4bc3c66b65743 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st @@ -13,7 +13,7 @@ $endif$ import org.elasticsearch.core.Releasables; /** - * Block view of a $Type$Vector. + * Block view of a {@link $Type$Vector}. Cannot represent multi-values or nulls. * This class is generated. Do not edit it. */ public final class $Type$VectorBlock extends AbstractVectorBlock implements $Type$Block { @@ -81,11 +81,6 @@ $endif$ return getClass().getSimpleName() + "[vector=" + vector + "]"; } - @Override - public boolean isReleased() { - return super.isReleased() || vector.isReleased(); - } - @Override public void closeInternal() { assert (vector.isReleased() == false) : "can't release block [" + this + "] containing already released vector"; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBuilder.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBuilder.java.st index 3241a372b7d54..da074c75f7c4d 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBuilder.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBuilder.java.st @@ -97,7 +97,7 @@ $if(BytesRef)$ * still technically be open, meaning the calling code should close it * which will return all used memory to the breaker. */ - blockFactory.adjustBreaker(vector.ramBytesUsed(), false); + blockFactory.adjustBreaker(vector.ramBytesUsed()); Releasables.closeExpectNoException(values); } else { vector = new $Type$ArrayVector(values, valueCount, blockFactory); @@ -109,7 +109,7 @@ $if(BytesRef)$ * still technically be open, meaning the calling code should close it * which will return all used memory to the breaker. */ - blockFactory.adjustBreaker(vector.ramBytesUsed() - values.bigArraysRamBytesUsed(), false); + blockFactory.adjustBreaker(vector.ramBytesUsed() - values.bigArraysRamBytesUsed()); } values = null; $else$ diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorFixedBuilder.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorFixedBuilder.java.st index dfe5bb7622b2a..43401d59095f4 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorFixedBuilder.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorFixedBuilder.java.st @@ -27,7 +27,7 @@ final class $Type$VectorFixedBuilder implements $Type$Vector.FixedBuilder { $Type$VectorFixedBuilder(int size, BlockFactory blockFactory) { preAdjustedBytes = ramBytesUsed(size); - blockFactory.adjustBreaker(preAdjustedBytes, false); + blockFactory.adjustBreaker(preAdjustedBytes); this.blockFactory = blockFactory; this.values = new $type$[size]; } @@ -70,7 +70,7 @@ final class $Type$VectorFixedBuilder implements $Type$Vector.FixedBuilder { if (nextIndex >= 0) { // If nextIndex < 0 we've already built the vector nextIndex = -1; - blockFactory.adjustBreaker(-preAdjustedBytes, false); + blockFactory.adjustBreaker(-preAdjustedBytes); } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/BlockReaderFactories.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/BlockReaderFactories.java index 967111a09f564..95b3ee9c10ff0 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/BlockReaderFactories.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/BlockReaderFactories.java @@ -9,13 +9,11 @@ import org.elasticsearch.common.logging.HeaderWarning; import org.elasticsearch.index.mapper.BlockLoader; +import org.elasticsearch.index.mapper.FieldNamesFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.SearchExecutionContext; -import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.lookup.SearchLookup; -import java.util.ArrayList; -import java.util.List; import java.util.Set; /** @@ -26,56 +24,51 @@ private BlockReaderFactories() {} /** * Resolves *how* ESQL loads field values. - * @param searchContexts a search context per search index we're loading - * field from + * @param ctx a search context for the index we're loading field from * @param fieldName the name of the field to load * @param asUnsupportedSource should the field be loaded as "unsupported"? * These will always have {@code null} values */ - public static List loaders(List searchContexts, String fieldName, boolean asUnsupportedSource) { - List loaders = new ArrayList<>(searchContexts.size()); - - for (SearchContext searchContext : searchContexts) { - SearchExecutionContext ctx = searchContext.getSearchExecutionContext(); - if (asUnsupportedSource) { - loaders.add(BlockLoader.CONSTANT_NULLS); - continue; + public static BlockLoader loader(SearchExecutionContext ctx, String fieldName, boolean asUnsupportedSource) { + if (asUnsupportedSource) { + return BlockLoader.CONSTANT_NULLS; + } + MappedFieldType fieldType = ctx.getFieldType(fieldName); + if (fieldType == null) { + // the field does not exist in this context + return BlockLoader.CONSTANT_NULLS; + } + BlockLoader loader = fieldType.blockLoader(new MappedFieldType.BlockLoaderContext() { + @Override + public String indexName() { + return ctx.getFullyQualifiedIndex().getName(); } - MappedFieldType fieldType = ctx.getFieldType(fieldName); - if (fieldType == null) { - // the field does not exist in this context - loaders.add(BlockLoader.CONSTANT_NULLS); - continue; + + @Override + public SearchLookup lookup() { + return ctx.lookup(); } - BlockLoader loader = fieldType.blockLoader(new MappedFieldType.BlockLoaderContext() { - @Override - public String indexName() { - return ctx.getFullyQualifiedIndex().getName(); - } - @Override - public SearchLookup lookup() { - return ctx.lookup(); - } + @Override + public Set sourcePaths(String name) { + return ctx.sourcePath(name); + } - @Override - public Set sourcePaths(String name) { - return ctx.sourcePath(name); - } + @Override + public String parentField(String field) { + return ctx.parentPath(field); + } - @Override - public String parentField(String field) { - return ctx.parentPath(field); - } - }); - if (loader == null) { - HeaderWarning.addWarning("Field [{}] cannot be retrieved, it is unsupported or not indexed; returning null", fieldName); - loaders.add(BlockLoader.CONSTANT_NULLS); - continue; + @Override + public FieldNamesFieldMapper.FieldNamesFieldType fieldNames() { + return (FieldNamesFieldMapper.FieldNamesFieldType) ctx.lookup().fieldType(FieldNamesFieldMapper.NAME); } - loaders.add(loader); + }); + if (loader == null) { + HeaderWarning.addWarning("Field [{}] cannot be retrieved, it is unsupported or not indexed; returning null", fieldName); + return BlockLoader.CONSTANT_NULLS; } - return loaders; + return loader; } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/IdFieldIndexFieldData.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/IdFieldIndexFieldData.java deleted file mode 100644 index d91c758ab3bd9..0000000000000 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/IdFieldIndexFieldData.java +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.compute.lucene; - -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.SortField; -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.index.fielddata.IndexFieldData; -import org.elasticsearch.index.fielddata.LeafFieldData; -import org.elasticsearch.index.fielddata.SortedBinaryDocValues; -import org.elasticsearch.index.fieldvisitor.LeafStoredFieldLoader; -import org.elasticsearch.index.fieldvisitor.StoredFieldLoader; -import org.elasticsearch.index.mapper.IdFieldMapper; -import org.elasticsearch.script.field.DocValuesScriptFieldFactory; -import org.elasticsearch.search.DocValueFormat; -import org.elasticsearch.search.MultiValueMode; -import org.elasticsearch.search.aggregations.support.ValuesSourceType; -import org.elasticsearch.search.sort.BucketedSort; -import org.elasticsearch.search.sort.SortOrder; - -import java.io.IOException; -import java.util.Set; - -public class IdFieldIndexFieldData implements IndexFieldData { - - private static final String FIELD_NAME = IdFieldMapper.NAME; - private final ValuesSourceType valuesSourceType; - private final StoredFieldLoader loader; - - protected IdFieldIndexFieldData(ValuesSourceType valuesSourceType) { - this.valuesSourceType = valuesSourceType; - this.loader = StoredFieldLoader.create(false, Set.of(FIELD_NAME)); - } - - @Override - public String getFieldName() { - return FIELD_NAME; - } - - @Override - public ValuesSourceType getValuesSourceType() { - return valuesSourceType; - } - - @Override - public final IdFieldLeafFieldData load(LeafReaderContext context) { - try { - return loadDirect(context); - } catch (Exception e) { - throw ExceptionsHelper.convertToElastic(e); - } - } - - @Override - public final IdFieldLeafFieldData loadDirect(LeafReaderContext context) throws Exception { - return new IdFieldLeafFieldData(loader.getLoader(context, null)); - } - - @Override - public SortField sortField(Object missingValue, MultiValueMode sortMode, XFieldComparatorSource.Nested nested, boolean reverse) { - throw new IllegalArgumentException("not supported for stored field fallback"); - } - - @Override - public BucketedSort newBucketedSort( - BigArrays bigArrays, - Object missingValue, - MultiValueMode sortMode, - XFieldComparatorSource.Nested nested, - SortOrder sortOrder, - DocValueFormat format, - int bucketSize, - BucketedSort.ExtraData extra - ) { - throw new IllegalArgumentException("not supported for stored field fallback"); - } - - class IdFieldLeafFieldData implements LeafFieldData { - private final LeafStoredFieldLoader loader; - - protected IdFieldLeafFieldData(LeafStoredFieldLoader loader) { - this.loader = loader; - } - - @Override - public DocValuesScriptFieldFactory getScriptFieldFactory(String name) { - throw new IllegalArgumentException("not supported for _id field"); - } - - @Override - public long ramBytesUsed() { - return 0L; - } - - @Override - public void close() {} - - @Override - public SortedBinaryDocValues getBytesValues() { - return new SortedBinaryDocValues() { - private String id; - - @Override - public boolean advanceExact(int doc) throws IOException { - loader.advanceTo(doc); - id = loader.id(); - return id != null; - } - - @Override - public int docValueCount() { - return 1; - } - - @Override - public BytesRef nextValue() throws IOException { - return new BytesRef(id); - } - }; - } - } -} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneCountOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneCountOperator.java index 75bd230638928..4ed32d6552497 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneCountOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneCountOperator.java @@ -160,8 +160,8 @@ public Page getOutput() { LongBlock count = null; BooleanBlock seen = null; try { - count = LongBlock.newConstantBlockWith(totalHits, PAGE_SIZE, blockFactory); - seen = BooleanBlock.newConstantBlockWith(true, PAGE_SIZE, blockFactory); + count = blockFactory.newConstantLongBlockWith(totalHits, PAGE_SIZE); + seen = blockFactory.newConstantBooleanBlockWith(true, PAGE_SIZE); page = new Page(PAGE_SIZE, count, seen); } finally { if (page == null) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneOperator.java index 6536b08cd2419..21b2a4cfaeb0b 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneOperator.java @@ -16,6 +16,7 @@ import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; @@ -30,8 +31,13 @@ import java.io.IOException; import java.io.UncheckedIOException; +import java.util.Collections; +import java.util.HashSet; import java.util.Objects; +import java.util.Set; +import java.util.TreeSet; import java.util.function.Function; +import java.util.stream.Collectors; public abstract class LuceneOperator extends SourceOperator { private static final Logger logger = LogManager.getLogger(LuceneOperator.class); @@ -40,10 +46,16 @@ public abstract class LuceneOperator extends SourceOperator { protected final BlockFactory blockFactory; - private int processSlices; + /** + * Count of the number of slices processed. + */ + private int processedSlices; final int maxPageSize; private final LuceneSliceQueue sliceQueue; + private final Set processedQueries = new HashSet<>(); + private final Set processedShards = new HashSet<>(); + private LuceneSlice currentSlice; private int sliceIndex; @@ -52,7 +64,7 @@ public abstract class LuceneOperator extends SourceOperator { int pagesEmitted; boolean doneCollecting; - public LuceneOperator(BlockFactory blockFactory, int maxPageSize, LuceneSliceQueue sliceQueue) { + protected LuceneOperator(BlockFactory blockFactory, int maxPageSize, LuceneSliceQueue sliceQueue) { this.blockFactory = blockFactory; this.maxPageSize = maxPageSize; this.sliceQueue = sliceQueue; @@ -73,18 +85,23 @@ LuceneScorer getCurrentOrLoadNextScorer() { if (currentSlice == null) { doneCollecting = true; return null; - } else { - processSlices++; } if (currentSlice.numLeaves() == 0) { continue; } + processedSlices++; + processedShards.add( + currentSlice.searchContext().getSearchExecutionContext().getFullyQualifiedIndex().getName() + + ":" + + currentSlice.searchContext().getSearchExecutionContext().getShardId() + ); } final PartialLeafReaderContext partialLeaf = currentSlice.getLeaf(sliceIndex++); logger.trace("Starting {}", partialLeaf); final LeafReaderContext leaf = partialLeaf.leafReaderContext(); if (currentScorer == null || currentScorer.leafReaderContext() != leaf) { final Weight weight = currentSlice.weight().get(); + processedQueries.add(weight.getQuery()); currentScorer = new LuceneScorer(currentSlice.shardIndex(), currentSlice.searchContext(), weight, leaf); } assert currentScorer.maxPosition <= partialLeaf.maxDoc() : currentScorer.maxPosition + ">" + partialLeaf.maxDoc(); @@ -190,6 +207,8 @@ public static class Status implements Operator.Status { ); private final int processedSlices; + private final Set processedQueries; + private final Set processedShards; private final int totalSlices; private final int pagesEmitted; private final int sliceIndex; @@ -198,7 +217,9 @@ public static class Status implements Operator.Status { private final int current; private Status(LuceneOperator operator) { - processedSlices = operator.processSlices; + processedSlices = operator.processedSlices; + processedQueries = operator.processedQueries.stream().map(Query::toString).collect(Collectors.toCollection(TreeSet::new)); + processedShards = new TreeSet<>(operator.processedShards); sliceIndex = operator.sliceIndex; totalSlices = operator.sliceQueue.totalSlices(); LuceneSlice slice = operator.currentSlice; @@ -219,8 +240,20 @@ private Status(LuceneOperator operator) { pagesEmitted = operator.pagesEmitted; } - Status(int processedSlices, int sliceIndex, int totalSlices, int pagesEmitted, int sliceMin, int sliceMax, int current) { + Status( + int processedSlices, + Set processedQueries, + Set processedShards, + int sliceIndex, + int totalSlices, + int pagesEmitted, + int sliceMin, + int sliceMax, + int current + ) { this.processedSlices = processedSlices; + this.processedQueries = processedQueries; + this.processedShards = processedShards; this.sliceIndex = sliceIndex; this.totalSlices = totalSlices; this.pagesEmitted = pagesEmitted; @@ -231,6 +264,13 @@ private Status(LuceneOperator operator) { Status(StreamInput in) throws IOException { processedSlices = in.readVInt(); + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_STATUS_INCLUDE_LUCENE_QUERIES)) { + processedQueries = in.readCollectionAsSet(StreamInput::readString); + processedShards = in.readCollectionAsSet(StreamInput::readString); + } else { + processedQueries = Collections.emptySet(); + processedShards = Collections.emptySet(); + } sliceIndex = in.readVInt(); totalSlices = in.readVInt(); pagesEmitted = in.readVInt(); @@ -242,6 +282,10 @@ private Status(LuceneOperator operator) { @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(processedSlices); + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_STATUS_INCLUDE_LUCENE_QUERIES)) { + out.writeCollection(processedQueries, StreamOutput::writeString); + out.writeCollection(processedShards, StreamOutput::writeString); + } out.writeVInt(sliceIndex); out.writeVInt(totalSlices); out.writeVInt(pagesEmitted); @@ -259,6 +303,14 @@ public int processedSlices() { return processedSlices; } + public Set processedQueries() { + return processedQueries; + } + + public Set processedShards() { + return processedShards; + } + public int sliceIndex() { return sliceIndex; } @@ -287,6 +339,8 @@ public int current() { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field("processed_slices", processedSlices); + builder.field("processed_queries", processedQueries); + builder.field("processed_shards", processedShards); builder.field("slice_index", sliceIndex); builder.field("total_slices", totalSlices); builder.field("pages_emitted", pagesEmitted); @@ -302,6 +356,8 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) return false; Status status = (Status) o; return processedSlices == status.processedSlices + && processedQueries.equals(status.processedQueries) + && processedShards.equals(status.processedShards) && sliceIndex == status.sliceIndex && totalSlices == status.totalSlices && pagesEmitted == status.pagesEmitted diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSourceOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSourceOperator.java index 7b2b276a619c6..b636e4aba8a5e 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSourceOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSourceOperator.java @@ -95,7 +95,7 @@ public LuceneSourceOperator(BlockFactory blockFactory, int maxPageSize, LuceneSl super(blockFactory, maxPageSize, sliceQueue); this.minPageSize = Math.max(1, maxPageSize / 2); this.remainingDocs = limit; - this.docsBuilder = IntVector.newVectorBuilder(Math.min(limit, maxPageSize), blockFactory); + this.docsBuilder = blockFactory.newIntVectorBuilder(Math.min(limit, maxPageSize)); this.leafCollector = new LeafCollector() { @Override public void setScorer(Scorable scorer) { @@ -149,10 +149,10 @@ public Page getOutput() { IntBlock leaf = null; IntVector docs = null; try { - shard = IntBlock.newConstantBlockWith(scorer.shardIndex(), currentPagePos, blockFactory); - leaf = IntBlock.newConstantBlockWith(scorer.leafReaderContext().ord, currentPagePos, blockFactory); + shard = blockFactory.newConstantIntBlockWith(scorer.shardIndex(), currentPagePos); + leaf = blockFactory.newConstantIntBlockWith(scorer.leafReaderContext().ord, currentPagePos); docs = docsBuilder.build(); - docsBuilder = IntVector.newVectorBuilder(Math.min(remainingDocs, maxPageSize), blockFactory); + docsBuilder = blockFactory.newIntVectorBuilder(Math.min(remainingDocs, maxPageSize)); page = new Page(currentPagePos, new DocVector(shard.asVector(), leaf.asVector(), docs, true).asBlock()); } finally { if (page == null) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperator.java index 9624fa48ef20d..7f08c8ca66821 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperator.java @@ -200,8 +200,8 @@ private Page emit(boolean startEmitting) { IntVector docs = null; Page page = null; try ( - IntVector.Builder currentSegmentBuilder = IntVector.newVectorBuilder(size, blockFactory); - IntVector.Builder currentDocsBuilder = IntVector.newVectorBuilder(size, blockFactory) + IntVector.Builder currentSegmentBuilder = blockFactory.newIntVectorFixedBuilder(size); + IntVector.Builder currentDocsBuilder = blockFactory.newIntVectorFixedBuilder(size) ) { int start = offset; offset += size; @@ -213,7 +213,7 @@ private Page emit(boolean startEmitting) { currentDocsBuilder.appendInt(doc - leafContexts.get(segment).docBase); // the offset inside the segment } - shard = IntBlock.newConstantBlockWith(perShardCollector.shardIndex, size, blockFactory); + shard = blockFactory.newConstantIntBlockWith(perShardCollector.shardIndex, size); segments = currentSegmentBuilder.build(); docs = currentDocsBuilder.build(); page = new Page(size, new DocVector(shard.asVector(), segments, docs, null).asBlock()); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TextValueSource.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TextValueSource.java deleted file mode 100644 index 04dbcd91c18c8..0000000000000 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TextValueSource.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.compute.lucene; - -import org.apache.lucene.index.LeafReaderContext; -import org.elasticsearch.index.fielddata.IndexFieldData; -import org.elasticsearch.index.fielddata.LeafFieldData; -import org.elasticsearch.index.fielddata.SortedBinaryDocValues; -import org.elasticsearch.script.field.TextDocValuesField; -import org.elasticsearch.search.aggregations.support.ValuesSource; - -public class TextValueSource extends ValuesSource.Bytes { - - private final IndexFieldData indexFieldData; - - public TextValueSource(IndexFieldData indexFieldData) { - this.indexFieldData = indexFieldData; - } - - @Override - public SortedBinaryDocValues bytesValues(LeafReaderContext leafReaderContext) { - String fieldName = indexFieldData.getFieldName(); - LeafFieldData fieldData = indexFieldData.load(leafReaderContext); - return ((TextDocValuesFieldWrapper) fieldData.getScriptFieldFactory(fieldName)).bytesValues(); - } - - /** Wrapper around TextDocValuesField that provides access to the SortedBinaryDocValues. */ - static final class TextDocValuesFieldWrapper extends TextDocValuesField { - TextDocValuesFieldWrapper(SortedBinaryDocValues input, String name) { - super(input, name); - } - - SortedBinaryDocValues bytesValues() { - return input; - } - } -} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java index 826c25f3e7828..b9be899cec4f3 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java @@ -27,6 +27,7 @@ import org.elasticsearch.compute.operator.AbstractPageMappingOperator; import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.Operator; +import org.elasticsearch.core.Assertions; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; import org.elasticsearch.index.fieldvisitor.StoredFieldLoader; @@ -43,6 +44,7 @@ import java.util.Map; import java.util.Objects; import java.util.TreeMap; +import java.util.function.IntFunction; import java.util.function.Supplier; /** @@ -95,22 +97,25 @@ public String describe() { } } + /** + * Configuration for a field to load. + * + * {@code blockLoader} maps shard index to the {@link BlockLoader}s + * which load the actual blocks. + */ + public record FieldInfo(String name, ElementType type, IntFunction blockLoader) {} + public record ShardContext(IndexReader reader, Supplier newSourceLoader) {} - private final List fields; + private final FieldWork[] fields; private final List shardContexts; private final int docChannel; private final BlockFactory blockFactory; private final Map readersBuilt = new TreeMap<>(); - /** - * Configuration for a field to load. - * - * {@code blockLoaders} is a list, one entry per shard, of - * {@link BlockLoader}s which load the actual blocks. - */ - public record FieldInfo(String name, List blockLoaders) {} + int lastShard = -1; + int lastSegment = -1; /** * Creates a new extractor @@ -118,7 +123,7 @@ public record FieldInfo(String name, List blockLoaders) {} * @param docChannel the channel containing the shard, leaf/segment and doc id */ public ValuesSourceReaderOperator(BlockFactory blockFactory, List fields, List shardContexts, int docChannel) { - this.fields = fields.stream().map(f -> new FieldWork(f)).toList(); + this.fields = fields.stream().map(f -> new FieldWork(f)).toArray(FieldWork[]::new); this.shardContexts = shardContexts; this.docChannel = docChannel; this.blockFactory = blockFactory; @@ -128,13 +133,21 @@ public ValuesSourceReaderOperator(BlockFactory blockFactory, List fie protected Page process(Page page) { DocVector docVector = page.getBlock(docChannel).asVector(); - Block[] blocks = new Block[fields.size()]; + Block[] blocks = new Block[fields.length]; boolean success = false; try { if (docVector.singleSegmentNonDecreasing()) { loadFromSingleLeaf(blocks, docVector); } else { - loadFromManyLeaves(blocks, docVector); + try (LoadFromMany many = new LoadFromMany(blocks, docVector)) { + many.run(); + } + } + if (Assertions.ENABLED) { + for (int f = 0; f < fields.length; f++) { + assert blocks[f].elementType() == ElementType.NULL || blocks[f].elementType() == fields[f].info.type + : blocks[f].elementType() + " NOT IN (NULL, " + fields[f].info.type + ")"; + } } success = true; } catch (IOException e) { @@ -147,10 +160,51 @@ protected Page process(Page page) { return page.appendBlocks(blocks); } + private void positionFieldWork(int shard, int segment, int firstDoc) { + if (lastShard == shard) { + if (lastSegment == segment) { + for (FieldWork w : fields) { + w.sameSegment(firstDoc); + } + return; + } + lastSegment = segment; + for (FieldWork w : fields) { + w.sameShardNewSegment(); + } + return; + } + lastShard = shard; + lastSegment = segment; + for (FieldWork w : fields) { + w.newShard(shard); + } + } + + private boolean positionFieldWorkDocGuarteedAscending(int shard, int segment) { + if (lastShard == shard) { + if (lastSegment == segment) { + return false; + } + lastSegment = segment; + for (FieldWork w : fields) { + w.sameShardNewSegment(); + } + return true; + } + lastShard = shard; + lastSegment = segment; + for (FieldWork w : fields) { + w.newShard(shard); + } + return true; + } + private void loadFromSingleLeaf(Block[] blocks, DocVector docVector) throws IOException { int shard = docVector.shards().getInt(0); int segment = docVector.segments().getInt(0); int firstDoc = docVector.docs().getInt(0); + positionFieldWork(shard, segment, firstDoc); IntVector docs = docVector.docs(); BlockLoader.Docs loaderDocs = new BlockLoader.Docs() { @Override @@ -164,24 +218,24 @@ public int get(int i) { } }; StoredFieldsSpec storedFieldsSpec = StoredFieldsSpec.NO_REQUIREMENTS; - List rowStrideReaders = new ArrayList<>(fields.size()); + List rowStrideReaders = new ArrayList<>(fields.length); ComputeBlockLoaderFactory loaderBlockFactory = new ComputeBlockLoaderFactory(blockFactory, docs.getPositionCount()); + LeafReaderContext ctx = ctx(shard, segment); try { - for (int b = 0; b < fields.size(); b++) { - FieldWork field = fields.get(b); - BlockLoader.ColumnAtATimeReader columnAtATime = field.columnAtATime.reader(shard, segment, firstDoc); + for (int f = 0; f < fields.length; f++) { + FieldWork field = fields[f]; + BlockLoader.ColumnAtATimeReader columnAtATime = field.columnAtATime(ctx); if (columnAtATime != null) { - blocks[b] = (Block) columnAtATime.read(loaderBlockFactory, loaderDocs); + blocks[f] = (Block) columnAtATime.read(loaderBlockFactory, loaderDocs); } else { - BlockLoader.RowStrideReader rowStride = field.rowStride.reader(shard, segment, firstDoc); rowStrideReaders.add( new RowStrideReaderWork( - rowStride, - (Block.Builder) field.info.blockLoaders.get(shard).builder(loaderBlockFactory, docs.getPositionCount()), - b + field.rowStride(ctx), + (Block.Builder) field.loader.builder(loaderBlockFactory, docs.getPositionCount()), + f ) ); - storedFieldsSpec = storedFieldsSpec.merge(field.info.blockLoaders.get(shard).rowStrideStoredFieldSpec()); + storedFieldsSpec = storedFieldsSpec.merge(field.loader.rowStrideStoredFieldSpec()); } } @@ -193,7 +247,6 @@ public int get(int i) { "found row stride readers [" + rowStrideReaders + "] without stored fields [" + storedFieldsSpec + "]" ); } - LeafReaderContext ctx = ctx(shard, segment); StoredFieldLoader storedFieldLoader; if (useSequentialStoredFieldsReader(docVector.docs())) { storedFieldLoader = StoredFieldLoader.fromSpecSequential(storedFieldsSpec); @@ -203,7 +256,6 @@ public int get(int i) { trackStoredFields(storedFieldsSpec, false); } BlockLoaderStoredFieldsFromLeafLoader storedFields = new BlockLoaderStoredFieldsFromLeafLoader( - // TODO enable the optimization by passing non-null to docs if correct storedFieldLoader.getLoader(ctx, null), storedFieldsSpec.requiresSource() ? shardContexts.get(shard).newSourceLoader.get().leaf(ctx.reader(), null) : null ); @@ -226,50 +278,91 @@ public int get(int i) { } } - private void loadFromManyLeaves(Block[] blocks, DocVector docVector) throws IOException { - IntVector shards = docVector.shards(); - IntVector segments = docVector.segments(); - IntVector docs = docVector.docs(); - Block.Builder[] builders = new Block.Builder[blocks.length]; - int[] forwards = docVector.shardSegmentDocMapForwards(); - ComputeBlockLoaderFactory loaderBlockFactory = new ComputeBlockLoaderFactory(blockFactory, docs.getPositionCount()); - try { - for (int b = 0; b < fields.size(); b++) { - FieldWork field = fields.get(b); - builders[b] = builderFromFirstNonNull(loaderBlockFactory, field, docs.getPositionCount()); + private class LoadFromMany implements Releasable { + private final Block[] target; + private final IntVector shards; + private final IntVector segments; + private final IntVector docs; + private final int[] forwards; + private final int[] backwards; + private final Block.Builder[] builders; + private final BlockLoader.RowStrideReader[] rowStride; + + BlockLoaderStoredFieldsFromLeafLoader storedFields; + + LoadFromMany(Block[] target, DocVector docVector) { + this.target = target; + shards = docVector.shards(); + segments = docVector.segments(); + docs = docVector.docs(); + forwards = docVector.shardSegmentDocMapForwards(); + backwards = docVector.shardSegmentDocMapBackwards(); + builders = new Block.Builder[target.length]; + rowStride = new BlockLoader.RowStrideReader[target.length]; + } + + void run() throws IOException { + for (int f = 0; f < fields.length; f++) { + /* + * Important note: each block loader has a method to build an + * optimized block loader, but we have *many* fields and some + * of those block loaders may not be compatible with each other. + * So! We take the least common denominator which is the loader + * from the element expected element type. + */ + builders[f] = fields[f].info.type.newBlockBuilder(docs.getPositionCount(), blockFactory); } - int lastShard = -1; - int lastSegment = -1; - BlockLoaderStoredFieldsFromLeafLoader storedFields = null; - for (int i = 0; i < forwards.length; i++) { - int p = forwards[i]; - int shard = shards.getInt(p); - int segment = segments.getInt(p); - int doc = docs.getInt(p); - if (shard != lastShard || segment != lastSegment) { - lastShard = shard; - lastSegment = segment; - StoredFieldsSpec storedFieldsSpec = storedFieldsSpecForShard(shard); - LeafReaderContext ctx = ctx(shard, segment); - storedFields = new BlockLoaderStoredFieldsFromLeafLoader( - StoredFieldLoader.fromSpec(storedFieldsSpec).getLoader(ctx, null), - storedFieldsSpec.requiresSource() ? shardContexts.get(shard).newSourceLoader.get().leaf(ctx.reader(), null) : null - ); - if (false == storedFieldsSpec.equals(StoredFieldsSpec.NO_REQUIREMENTS)) { - trackStoredFields(storedFieldsSpec, false); - } + int p = forwards[0]; + int shard = shards.getInt(p); + int segment = segments.getInt(p); + int firstDoc = docs.getInt(p); + positionFieldWork(shard, segment, firstDoc); + LeafReaderContext ctx = ctx(shard, segment); + fieldsMoved(ctx, shard); + read(firstDoc); + for (int i = 1; i < forwards.length; i++) { + p = forwards[i]; + shard = shards.getInt(p); + segment = segments.getInt(p); + boolean changedSegment = positionFieldWorkDocGuarteedAscending(shard, segment); + if (changedSegment) { + ctx = ctx(shard, segment); + fieldsMoved(ctx, shard); } - storedFields.advanceTo(doc); - for (int r = 0; r < blocks.length; r++) { - fields.get(r).rowStride.reader(shard, segment, doc).read(doc, storedFields, builders[r]); + read(docs.getInt(p)); + } + for (int f = 0; f < builders.length; f++) { + try (Block orig = builders[f].build()) { + target[f] = orig.filter(backwards); } } - for (int r = 0; r < blocks.length; r++) { - try (Block orig = builders[r].build()) { - blocks[r] = orig.filter(docVector.shardSegmentDocMapBackwards()); + } + + private void fieldsMoved(LeafReaderContext ctx, int shard) throws IOException { + StoredFieldsSpec storedFieldsSpec = StoredFieldsSpec.NO_REQUIREMENTS; + for (int f = 0; f < fields.length; f++) { + FieldWork field = fields[f]; + rowStride[f] = field.rowStride(ctx); + storedFieldsSpec = storedFieldsSpec.merge(field.loader.rowStrideStoredFieldSpec()); + storedFields = new BlockLoaderStoredFieldsFromLeafLoader( + StoredFieldLoader.fromSpec(storedFieldsSpec).getLoader(ctx, null), + storedFieldsSpec.requiresSource() ? shardContexts.get(shard).newSourceLoader.get().leaf(ctx.reader(), null) : null + ); + if (false == storedFieldsSpec.equals(StoredFieldsSpec.NO_REQUIREMENTS)) { + trackStoredFields(storedFieldsSpec, false); } } - } finally { + } + + private void read(int doc) throws IOException { + storedFields.advanceTo(doc); + for (int f = 0; f < builders.length; f++) { + rowStride[f].read(doc, storedFields, builders[f]); + } + } + + @Override + public void close() { Releasables.closeExpectNoException(builders); } } @@ -298,83 +391,55 @@ private void trackStoredFields(StoredFieldsSpec spec, boolean sequential) { ); } - /** - * Returns a builder from the first non - {@link BlockLoader#CONSTANT_NULLS} loader - * in the list. If they are all the null loader then returns a null builder. - */ - private Block.Builder builderFromFirstNonNull(BlockLoader.BlockFactory loaderBlockFactory, FieldWork field, int positionCount) { - for (BlockLoader loader : field.info.blockLoaders) { - if (loader != BlockLoader.CONSTANT_NULLS) { - return (Block.Builder) loader.builder(loaderBlockFactory, positionCount); - } - } - // All null, just let the first one build the null block loader. - return (Block.Builder) field.info.blockLoaders.get(0).builder(loaderBlockFactory, positionCount); - } - - private StoredFieldsSpec storedFieldsSpecForShard(int shard) { - StoredFieldsSpec storedFieldsSpec = StoredFieldsSpec.NO_REQUIREMENTS; - for (int b = 0; b < fields.size(); b++) { - FieldWork field = fields.get(b); - storedFieldsSpec = storedFieldsSpec.merge(field.info.blockLoaders.get(shard).rowStrideStoredFieldSpec()); - } - return storedFieldsSpec; - } - private class FieldWork { final FieldInfo info; - final GuardedReader columnAtATime = new GuardedReader<>() { - @Override - BlockLoader.ColumnAtATimeReader build(BlockLoader loader, LeafReaderContext ctx) throws IOException { - return loader.columnAtATimeReader(ctx); - } - @Override - String type() { - return "column_at_a_time"; - } - }; + BlockLoader loader; + BlockLoader.ColumnAtATimeReader columnAtATime; + BlockLoader.RowStrideReader rowStride; - final GuardedReader rowStride = new GuardedReader<>() { - @Override - BlockLoader.RowStrideReader build(BlockLoader loader, LeafReaderContext ctx) throws IOException { - return loader.rowStrideReader(ctx); - } + FieldWork(FieldInfo info) { + this.info = info; + } - @Override - String type() { - return "row_stride"; + void sameSegment(int firstDoc) { + if (columnAtATime != null && columnAtATime.canReuse(firstDoc) == false) { + columnAtATime = null; } - }; + if (rowStride != null && rowStride.canReuse(firstDoc) == false) { + rowStride = null; + } + } - FieldWork(FieldInfo info) { - this.info = info; + void sameShardNewSegment() { + columnAtATime = null; + rowStride = null; } - private abstract class GuardedReader { - private int lastShard = -1; - private int lastSegment = -1; - V lastReader; + void newShard(int shard) { + loader = info.blockLoader.apply(shard); + columnAtATime = null; + rowStride = null; + } - V reader(int shard, int segment, int startingDocId) throws IOException { - if (lastShard == shard && lastSegment == segment) { - if (lastReader == null) { - return null; - } - if (lastReader.canReuse(startingDocId)) { - return lastReader; - } - } - lastShard = shard; - lastSegment = segment; - lastReader = build(info.blockLoaders.get(shard), ctx(shard, segment)); - readersBuilt.merge(info.name + ":" + type() + ":" + lastReader, 1, (prev, one) -> prev + one); - return lastReader; + BlockLoader.ColumnAtATimeReader columnAtATime(LeafReaderContext ctx) throws IOException { + if (columnAtATime == null) { + columnAtATime = loader.columnAtATimeReader(ctx); + trackReader("column_at_a_time", this.columnAtATime); } + return columnAtATime; + } - abstract V build(BlockLoader loader, LeafReaderContext ctx) throws IOException; + BlockLoader.RowStrideReader rowStride(LeafReaderContext ctx) throws IOException { + if (rowStride == null) { + rowStride = loader.rowStrideReader(ctx); + trackReader("row_stride", this.rowStride); + } + return rowStride; + } - abstract String type(); + private void trackReader(String type, BlockLoader.Reader reader) { + readersBuilt.merge(info.name + ":" + type + ":" + reader, 1, (prev, one) -> prev + one); } } @@ -393,7 +458,7 @@ private LeafReaderContext ctx(int shard, int segment) { public String toString() { StringBuilder sb = new StringBuilder(); sb.append("ValuesSourceReaderOperator[fields = ["); - if (fields.size() < 10) { + if (fields.length < 10) { boolean first = true; for (FieldWork f : fields) { if (first) { @@ -404,7 +469,7 @@ public String toString() { sb.append(f.info.name); } } else { - sb.append(fields.size()).append(" fields"); + sb.append(fields.length).append(" fields"); } return sb.append("]]").toString(); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java index 98ba37e3f32d1..bcab6a39496fd 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java @@ -181,8 +181,12 @@ public void finish() { @Override public boolean isFinished() { - checkFailure(); - return finished && checkpoint.getPersistedCheckpoint() == checkpoint.getMaxSeqNo(); + if (finished && checkpoint.getPersistedCheckpoint() == checkpoint.getMaxSeqNo()) { + checkFailure(); + return true; + } else { + return false; + } } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverTaskRunner.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverTaskRunner.java index 1293118680824..38d879f8f7ad4 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverTaskRunner.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverTaskRunner.java @@ -11,9 +11,9 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.CompositeIndicesRequest; +import org.elasticsearch.action.support.ChannelActionListener; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.compute.OwningChannelActionListener; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; @@ -117,7 +117,7 @@ public Status getStatus() { private record DriverRequestHandler(TransportService transportService) implements TransportRequestHandler { @Override public void messageReceived(DriverRequest request, TransportChannel channel, Task task) { - var listener = new OwningChannelActionListener(channel); + var listener = new ChannelActionListener(channel); Driver.start( transportService.getThreadPool().getThreadContext(), request.executor, diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/EvalOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/EvalOperator.java index 2a6a3c9b6210b..10f23ed29094f 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/EvalOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/EvalOperator.java @@ -75,27 +75,22 @@ interface Factory { public static final ExpressionEvaluator.Factory CONSTANT_NULL_FACTORY = new ExpressionEvaluator.Factory() { @Override public ExpressionEvaluator get(DriverContext driverContext) { - return CONSTANT_NULL; - } + return new ExpressionEvaluator() { + @Override + public Block eval(Page page) { + return driverContext.blockFactory().newConstantNullBlock(page.getPositionCount()); + } - @Override - public String toString() { - return CONSTANT_NULL.toString(); - } - }; + @Override + public void close() { - public static final ExpressionEvaluator CONSTANT_NULL = new ExpressionEvaluator() { - @Override - public Block eval(Page page) { - return Block.constantNullBlock(page.getPositionCount()); + } + }; } @Override public String toString() { return "ConstantNull"; } - - @Override - public void close() {} }; } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashAggregationOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashAggregationOperator.java index 39068787f3c9e..ad3dce98e34d9 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashAggregationOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashAggregationOperator.java @@ -7,7 +7,6 @@ package org.elasticsearch.compute.operator; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.Describable; import org.elasticsearch.compute.aggregation.GroupingAggregator; import org.elasticsearch.compute.aggregation.GroupingAggregatorFunction; @@ -31,12 +30,9 @@ public class HashAggregationOperator implements Operator { public record GroupSpec(int channel, ElementType elementType) {} - public record HashAggregationOperatorFactory( - List groups, - List aggregators, - int maxPageSize, - BigArrays bigArrays - ) implements OperatorFactory { + public record HashAggregationOperatorFactory(List groups, List aggregators, int maxPageSize) + implements + OperatorFactory { @Override public Operator get(DriverContext driverContext) { return new HashAggregationOperator( diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/MultivalueDedupeBoolean.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/MultivalueDedupeBoolean.java index f3570bf7b853b..d6a908306e2f4 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/MultivalueDedupeBoolean.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/MultivalueDedupeBoolean.java @@ -46,7 +46,7 @@ public BooleanBlock dedupeToBlock(BlockFactory blockFactory) { block.incRef(); return block; } - try (BooleanBlock.Builder builder = BooleanBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (BooleanBlock.Builder builder = blockFactory.newBooleanBlockBuilder(block.getPositionCount())) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/OrdinalsGroupingOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/OrdinalsGroupingOperator.java index 4fb90ddb57e25..c3a26cedf5bbe 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/OrdinalsGroupingOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/OrdinalsGroupingOperator.java @@ -42,6 +42,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.function.IntFunction; import java.util.function.Supplier; import static java.util.Objects.requireNonNull; @@ -52,14 +53,13 @@ */ public class OrdinalsGroupingOperator implements Operator { public record OrdinalsGroupingOperatorFactory( - List blockLoaders, + IntFunction blockLoaders, List shardContexts, ElementType groupingElementType, int docChannel, String groupingField, List aggregators, - int maxPageSize, - BigArrays bigArrays + int maxPageSize ) implements OperatorFactory { @Override @@ -72,7 +72,6 @@ public Operator get(DriverContext driverContext) { groupingField, aggregators, maxPageSize, - bigArrays, driverContext ); } @@ -83,7 +82,7 @@ public String describe() { } } - private final List blockLoaders; + private final IntFunction blockLoaders; private final List shardContexts; private final int docChannel; private final String groupingField; @@ -91,7 +90,6 @@ public String describe() { private final List aggregatorFactories; private final ElementType groupingElementType; private final Map ordinalAggregators; - private final BigArrays bigArrays; private final DriverContext driverContext; @@ -102,14 +100,13 @@ public String describe() { private ValuesAggregator valuesAggregator; public OrdinalsGroupingOperator( - List blockLoaders, + IntFunction blockLoaders, List shardContexts, ElementType groupingElementType, int docChannel, String groupingField, List aggregatorFactories, int maxPageSize, - BigArrays bigArrays, DriverContext driverContext ) { Objects.requireNonNull(aggregatorFactories); @@ -121,7 +118,6 @@ public OrdinalsGroupingOperator( this.aggregatorFactories = aggregatorFactories; this.ordinalAggregators = new HashMap<>(); this.maxPageSize = maxPageSize; - this.bigArrays = bigArrays; this.driverContext = driverContext; } @@ -136,7 +132,7 @@ public void addInput(Page page) { requireNonNull(page, "page is null"); DocVector docVector = page.getBlock(docChannel).asVector(); final int shardIndex = docVector.shards().getInt(0); - final var blockLoader = blockLoaders.get(shardIndex); + final var blockLoader = blockLoaders.apply(shardIndex); boolean pagePassed = false; try { if (docVector.singleSegmentNonDecreasing() && blockLoader.supportsOrdinals()) { @@ -150,7 +146,7 @@ public void addInput(Page page) { driverContext.blockFactory(), this::createGroupingAggregators, () -> blockLoader.ordinals(shardContexts.get(k.shardIndex).reader().leaves().get(k.segmentIndex)), - bigArrays + driverContext.bigArrays() ); } catch (IOException e) { throw new UncheckedIOException(e); @@ -464,7 +460,7 @@ private static class ValuesAggregator implements Releasable { private final HashAggregationOperator aggregator; ValuesAggregator( - List blockLoaders, + IntFunction blockLoaders, List shardContexts, ElementType groupingElementType, int docChannel, @@ -475,8 +471,8 @@ private static class ValuesAggregator implements Releasable { DriverContext driverContext ) { this.extractor = new ValuesSourceReaderOperator( - BlockFactory.getNonBreakingInstance(), - List.of(new ValuesSourceReaderOperator.FieldInfo(groupingField, blockLoaders)), + driverContext.blockFactory(), + List.of(new ValuesSourceReaderOperator.FieldInfo(groupingField, groupingElementType, blockLoaders)), shardContexts, docChannel ); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/RowOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/RowOperator.java index ff124021ea3ad..4b4379eb6a4d8 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/RowOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/RowOperator.java @@ -22,8 +22,7 @@ public record RowOperatorFactory(List objects) implements SourceOperator @Override public SourceOperator get(DriverContext driverContext) { - // We aren't yet ready to use the read block factory - return new RowOperator(BlockFactory.getNonBreakingInstance(), objects); + return new RowOperator(driverContext.blockFactory(), objects); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/StringExtractOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/StringExtractOperator.java index 4ffa530bc5d3a..ec61408954219 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/StringExtractOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/StringExtractOperator.java @@ -65,7 +65,7 @@ protected Page process(Page page) { BytesRefBlock.Builder[] blockBuilders = new BytesRefBlock.Builder[fieldNames.length]; try { for (int i = 0; i < fieldNames.length; i++) { - blockBuilders[i] = BytesRefBlock.newBlockBuilder(rowsCount, driverContext.blockFactory()); + blockBuilders[i] = driverContext.blockFactory().newBytesRefBlockBuilder(rowsCount); } try (BytesRefBlock input = (BytesRefBlock) inputEvaluator.eval(page)) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ThrowingDriverContext.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ThrowingDriverContext.java index 313ec0b682602..05bda58b34a6b 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ThrowingDriverContext.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ThrowingDriverContext.java @@ -7,6 +7,7 @@ package org.elasticsearch.compute.operator; +import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.ByteArray; import org.elasticsearch.common.util.DoubleArray; @@ -16,24 +17,37 @@ import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.core.Releasable; -public class ThrowingDriverContext extends DriverContext { - public ThrowingDriverContext() { - super(new ThrowingBigArrays(), BlockFactory.getNonBreakingInstance()); +/** + * A driver context that doesn't support any interaction. Consider it as a place holder where we need a dummy driver context. + */ +final class ThrowingDriverContext extends DriverContext { + ThrowingDriverContext() { + super(new ThrowingBigArrays(), BlockFactory.getInstance(new NoopCircuitBreaker("throwing-context"), new ThrowingBigArrays())); } @Override public BigArrays bigArrays() { - throw new AssertionError("should not reach here"); + throw unsupported(); } @Override public BlockFactory blockFactory() { - throw new AssertionError("should not reach here"); + throw unsupported(); } @Override public boolean addReleasable(Releasable releasable) { - throw new AssertionError("should not reach here"); + throw unsupported(); + } + + @Override + public void addAsyncAction() { + throw unsupported(); + } + + static UnsupportedOperationException unsupported() { + assert false : "ThrowingDriverContext doesn't support any interaction"; + throw new UnsupportedOperationException("ThrowingDriverContext doesn't support any interaction"); } static class ThrowingBigArrays extends BigArrays { @@ -44,27 +58,27 @@ static class ThrowingBigArrays extends BigArrays { @Override public ByteArray newByteArray(long size, boolean clearOnResize) { - throw new AssertionError("should not reach here"); + throw unsupported(); } @Override public IntArray newIntArray(long size, boolean clearOnResize) { - throw new AssertionError("should not reach here"); + throw unsupported(); } @Override public LongArray newLongArray(long size, boolean clearOnResize) { - throw new AssertionError("should not reach here"); + throw unsupported(); } @Override public FloatArray newFloatArray(long size, boolean clearOnResize) { - throw new AssertionError("should not reach here"); + throw unsupported(); } @Override public DoubleArray newDoubleArray(long size, boolean clearOnResize) { - throw new AssertionError("should not reach here"); + throw unsupported(); } } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/X-MultivalueDedupe.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/X-MultivalueDedupe.java.st index 169e7aa427717..d55f1c4cb43ec 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/X-MultivalueDedupe.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/X-MultivalueDedupe.java.st @@ -16,20 +16,16 @@ import org.elasticsearch.common.util.LongHash; $endif$ import org.elasticsearch.compute.aggregation.GroupingAggregatorFunction; import org.elasticsearch.compute.aggregation.blockhash.BlockHash; -$if(int)$ import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; +$if(int)$ import org.elasticsearch.compute.data.IntBlock; $elseif(long)$ -import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.LongBlock; $else$ -import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.$Type$Block; import org.elasticsearch.compute.data.IntBlock; @@ -50,9 +46,7 @@ $if(BytesRef)$ private static final int ALWAYS_COPY_MISSING = 20; // TODO BytesRef should try adding to the hash *first* and then comparing. $elseif(double)$ private static final int ALWAYS_COPY_MISSING = 110; -$elseif(int)$ - private static final int ALWAYS_COPY_MISSING = 300; -$elseif(long)$ +$else$ private static final int ALWAYS_COPY_MISSING = 300; $endif$ @@ -77,7 +71,7 @@ $endif$ block.incRef(); return block; } - try ($Type$Block.Builder builder = $Type$Block.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try ($Type$Block.Builder builder = blockFactory.new$Type$BlockBuilder(block.getPositionCount())) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -131,7 +125,7 @@ $endif$ block.incRef(); return block; } - try ($Type$Block.Builder builder = $Type$Block.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try ($Type$Block.Builder builder = blockFactory.new$Type$BlockBuilder(block.getPositionCount())) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -165,7 +159,7 @@ $endif$ block.incRef(); return block; } - try ($Type$Block.Builder builder = $Type$Block.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try ($Type$Block.Builder builder = blockFactory.new$Type$BlockBuilder(block.getPositionCount())) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java index 8fb38ccf907d6..3173b716467be 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java @@ -13,7 +13,7 @@ import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; -import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.action.support.ChannelActionListener; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.io.stream.StreamInput; @@ -21,14 +21,12 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AbstractAsyncTask; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.compute.OwningChannelActionListener; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockStreamInput; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; -import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestHandler; @@ -43,7 +41,7 @@ /** * {@link ExchangeService} is responsible for exchanging pages between exchange sinks and sources on the same or different nodes. - * It holds a map of {@link ExchangeSourceHandler} and {@link ExchangeSinkHandler} instances for each node in the cluster. + * It holds a map of {@link ExchangeSinkHandler} instances for each node in the cluster to serve {@link ExchangeRequest}s * To connect exchange sources to exchange sinks, use the {@link ExchangeSourceHandler#addRemoteSink(RemoteSink, int)} method. */ public final class ExchangeService extends AbstractLifecycleComponent { @@ -66,7 +64,6 @@ public final class ExchangeService extends AbstractLifecycleComponent { private final BlockFactory blockFactory; private final Map sinks = ConcurrentCollections.newConcurrentMap(); - private final Map sources = ConcurrentCollections.newConcurrentMap(); private final InactiveSinksReaper inactiveSinksReaper; @@ -125,35 +122,22 @@ public void finishSinkHandler(String exchangeId, Exception failure) { } } - /** - * Creates an {@link ExchangeSourceHandler} for the specified exchange id. - * - * @throws IllegalStateException if a source handler for the given id already exists - */ - public ExchangeSourceHandler createSourceHandler(String exchangeId, int maxBufferSize, String fetchExecutor) { - ExchangeSourceHandler sourceHandler = new ExchangeSourceHandler(maxBufferSize, threadPool.executor(fetchExecutor)); - if (sources.putIfAbsent(exchangeId, sourceHandler) != null) { - throw new IllegalStateException("source exchanger for id [" + exchangeId + "] already exists"); - } - sourceHandler.addCompletionListener(ActionListener.releasing(() -> sources.remove(exchangeId))); - return sourceHandler; - } - /** * Opens a remote sink handler on the remote node for the given session ID. */ public static void openExchange( TransportService transportService, - DiscoveryNode targetNode, + Transport.Connection connection, String sessionId, int exchangeBuffer, Executor responseExecutor, ActionListener listener ) { transportService.sendRequest( - targetNode, + connection, OPEN_EXCHANGE_ACTION_NAME, new OpenExchangeRequest(sessionId, exchangeBuffer), + TransportRequestOptions.EMPTY, new ActionListenerResponseHandler<>(listener.map(unused -> null), in -> TransportResponse.Empty.INSTANCE, responseExecutor) ); } @@ -193,15 +177,11 @@ private class ExchangeTransportAction implements TransportRequestHandler listener = new OwningChannelActionListener<>(channel); + ActionListener listener = new ChannelActionListener<>(channel); final ExchangeSinkHandler sinkHandler = sinks.get(exchangeId); if (sinkHandler == null) { listener.onResponse(new ExchangeResponse(null, true)); } else { - // the data-node request hasn't arrived yet; use the task framework to cancel the request if needed. - if (sinkHandler.hasData() == false) { - ((CancellableTask) task).addListener(() -> sinkHandler.onFailure(new TaskCancelledException("task cancelled"))); - } sinkHandler.fetchPageAsync(request.sourcesFinished(), listener); } } @@ -251,16 +231,16 @@ protected void runInternal() { * @param parentTask the parent task that initialized the ESQL request * @param exchangeId the exchange ID * @param transportService the transport service - * @param remoteNode the node where the remote exchange sink is located + * @param conn the connection to the remote node where the remote exchange sink is located */ - public RemoteSink newRemoteSink(Task parentTask, String exchangeId, TransportService transportService, DiscoveryNode remoteNode) { - return new TransportRemoteSink(transportService, blockFactory, remoteNode, parentTask, exchangeId, executor); + public RemoteSink newRemoteSink(Task parentTask, String exchangeId, TransportService transportService, Transport.Connection conn) { + return new TransportRemoteSink(transportService, blockFactory, conn, parentTask, exchangeId, executor); } record TransportRemoteSink( TransportService transportService, BlockFactory blockFactory, - DiscoveryNode node, + Transport.Connection connection, Task parentTask, String exchangeId, Executor responseExecutor @@ -269,7 +249,7 @@ record TransportRemoteSink( @Override public void fetchPageAsync(boolean allSourcesFinished, ActionListener listener) { transportService.sendChildRequest( - node, + connection, EXCHANGE_ACTION_NAME, new ExchangeRequest(exchangeId, allSourcesFinished), parentTask, @@ -285,7 +265,7 @@ public void fetchPageAsync(boolean allSourcesFinished, ActionListener docIds = new ArrayList<>(positionCount); + for (int i = 0; i < positionCount; i++) { + shardsBuilder.appendInt(docVector.shards().getInt(i)); + segmentsBuilder.appendInt(docVector.segments().getInt(i)); + docIds.add(docVector.docs().getInt(i)); } - } - IntVector segments = docVector.segments(); - if (randomBoolean()) { - try (IntVector.Builder builder = IntVector.newVectorBuilder(positionCount)) { - for (int i = 0; i < positionCount; i++) { - builder.appendInt(segments.getInt(i)); - } - segments.close(); - segments = builder.build(); + shards = shardsBuilder.build(); + segments = segmentsBuilder.build(); + Collections.shuffle(docIds, random()); + for (Integer d : docIds) { + docsBuilder.appendInt(d); } - } - IntVector docs = docVector.docs(); - if (randomBoolean()) { - List ids = new ArrayList<>(positionCount); - for (int i = 0; i < positionCount; i++) { - ids.add(docs.getInt(i)); + docs = docsBuilder.build(); + } finally { + if (docs == null) { + Releasables.closeExpectNoException(docVector, shards, segments); + } else { + Releasables.closeExpectNoException(docVector); } - Collections.shuffle(ids, random()); - docs.close(); - docs = blockFactory.newIntArrayVector(ids.stream().mapToInt(n -> n).toArray(), positionCount); } Block[] blocks = new Block[page.getBlockCount()]; blocks[0] = new DocVector(shards, segments, docs, false).asBlock(); @@ -222,7 +218,7 @@ public String toString() { List.of(shuffleDocsOperator, new AbstractPageMappingOperator() { @Override protected Page process(Page page) { - return page.appendBlock(IntBlock.newConstantBlockWith(1, page.getPositionCount())); + return page.appendBlock(driverContext.blockFactory().newConstantIntBlockWith(1, page.getPositionCount())); } @Override @@ -231,18 +227,17 @@ public String toString() { } }, new OrdinalsGroupingOperator( - List.of(new KeywordFieldMapper.KeywordFieldType("g").blockLoader(null)), + shardIdx -> new KeywordFieldMapper.KeywordFieldType("g").blockLoader(null), List.of(new ValuesSourceReaderOperator.ShardContext(reader, () -> SourceLoader.FROM_STORED_SOURCE)), ElementType.BYTES_REF, 0, gField, - List.of(CountAggregatorFunction.supplier(bigArrays, List.of(1)).groupingAggregatorFactory(INITIAL)), + List.of(CountAggregatorFunction.supplier(List.of(1)).groupingAggregatorFactory(INITIAL)), randomPageSize(), - bigArrays, driverContext ), new HashAggregationOperator( - List.of(CountAggregatorFunction.supplier(bigArrays, List.of(1, 2)).groupingAggregatorFactory(FINAL)), + List.of(CountAggregatorFunction.supplier(List.of(1, 2)).groupingAggregatorFactory(FINAL)), () -> BlockHash.build( List.of(new HashAggregationOperator.GroupSpec(0, ElementType.BYTES_REF)), driverContext, @@ -350,7 +345,7 @@ public static void assertDriverContext(DriverContext driverContext) { } static LuceneOperator.Factory luceneOperatorFactory(IndexReader reader, Query query, int limit) { - final SearchContext searchContext = mockSearchContext(reader); + final SearchContext searchContext = mockSearchContext(reader, 0); return new LuceneSourceOperator.Factory( List.of(searchContext), ctx -> query, diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/AggregatorFunctionTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/AggregatorFunctionTestCase.java index 894b94476c08d..c41b7a8475066 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/AggregatorFunctionTestCase.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/AggregatorFunctionTestCase.java @@ -8,8 +8,6 @@ package org.elasticsearch.compute.aggregation; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockTestUtils; @@ -21,6 +19,7 @@ import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.data.TestBlockFactory; import org.elasticsearch.compute.operator.AggregationOperator; import org.elasticsearch.compute.operator.CannedSourceOperator; import org.elasticsearch.compute.operator.Driver; @@ -43,10 +42,10 @@ import static org.hamcrest.Matchers.hasSize; public abstract class AggregatorFunctionTestCase extends ForkingOperatorTestCase { - protected abstract AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels); + protected abstract AggregatorFunctionSupplier aggregatorFunction(List inputChannels); protected final int aggregatorIntermediateBlockCount() { - try (var agg = aggregatorFunction(nonBreakingBigArrays(), List.of()).aggregator(driverContext())) { + try (var agg = aggregatorFunction(List.of()).aggregator(driverContext())) { return agg.intermediateBlockCount(); } } @@ -56,12 +55,9 @@ protected final int aggregatorIntermediateBlockCount() { protected abstract void assertSimpleOutput(List input, Block result); @Override - protected Operator.OperatorFactory simpleWithMode(BigArrays bigArrays, AggregatorMode mode) { + protected Operator.OperatorFactory simpleWithMode(AggregatorMode mode) { List channels = mode.isInputPartial() ? range(0, aggregatorIntermediateBlockCount()).boxed().toList() : List.of(0); - return new AggregationOperator.AggregationOperatorFactory( - List.of(aggregatorFunction(bigArrays, channels).aggregatorFactory(mode)), - mode - ); + return new AggregationOperator.AggregationOperatorFactory(List.of(aggregatorFunction(channels).aggregatorFactory(mode)), mode); } @Override @@ -85,25 +81,19 @@ protected final void assertSimpleOutput(List input, List results) { assertSimpleOutput(input.stream().map(p -> p.getBlock(0)).toList(), result); } - @Override - protected final ByteSizeValue smallEnoughToCircuitBreak() { - assumeTrue("doesn't use big array so never breaks", false); - return null; - } - public final void testIgnoresNulls() { int end = between(1_000, 100_000); List results = new ArrayList<>(); DriverContext driverContext = driverContext(); BlockFactory blockFactory = driverContext.blockFactory(); List input = CannedSourceOperator.collectPages(simpleInput(blockFactory, end)); - List origInput = BlockTestUtils.deepCopyOf(input, BlockFactory.getNonBreakingInstance()); + List origInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); try ( Driver d = new Driver( driverContext, new NullInsertingSourceOperator(new CannedSourceOperator(input.iterator()), blockFactory), - List.of(simple(nonBreakingBigArrays().withCircuitBreaking()).get(driverContext)), + List.of(simple().get(driverContext)), new TestResultPageSinkOperator(results::add), () -> {} ) @@ -120,8 +110,8 @@ public final void testMultivalued() { List input = CannedSourceOperator.collectPages( new PositionMergingSourceOperator(simpleInput(driverContext.blockFactory(), end), blockFactory) ); - List origInput = BlockTestUtils.deepCopyOf(input, BlockFactory.getNonBreakingInstance()); - assertSimpleOutput(origInput, drive(simple(BigArrays.NON_RECYCLING_INSTANCE).get(driverContext), input.iterator(), driverContext)); + List origInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); + assertSimpleOutput(origInput, drive(simple().get(driverContext), input.iterator(), driverContext)); } public final void testMultivaluedWithNulls() { @@ -134,17 +124,13 @@ public final void testMultivaluedWithNulls() { blockFactory ) ); - List origInput = BlockTestUtils.deepCopyOf(input, BlockFactory.getNonBreakingInstance()); - assertSimpleOutput(origInput, drive(simple(BigArrays.NON_RECYCLING_INSTANCE).get(driverContext), input.iterator(), driverContext)); + List origInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); + assertSimpleOutput(origInput, drive(simple().get(driverContext), input.iterator(), driverContext)); } public final void testEmptyInput() { DriverContext driverContext = driverContext(); - List results = drive( - simple(nonBreakingBigArrays().withCircuitBreaking()).get(driverContext), - List.of().iterator(), - driverContext - ); + List results = drive(simple().get(driverContext), List.of().iterator(), driverContext); assertThat(results, hasSize(1)); } @@ -152,8 +138,8 @@ public final void testEmptyInput() { public final void testEmptyInputInitialFinal() { DriverContext driverContext = driverContext(); var operators = List.of( - simpleWithMode(nonBreakingBigArrays().withCircuitBreaking(), AggregatorMode.INITIAL).get(driverContext), - simpleWithMode(nonBreakingBigArrays().withCircuitBreaking(), AggregatorMode.FINAL).get(driverContext) + simpleWithMode(AggregatorMode.INITIAL).get(driverContext), + simpleWithMode(AggregatorMode.FINAL).get(driverContext) ); List results = drive(operators, List.of().iterator(), driverContext); assertThat(results, hasSize(1)); @@ -162,9 +148,9 @@ public final void testEmptyInputInitialFinal() { public final void testEmptyInputInitialIntermediateFinal() { DriverContext driverContext = driverContext(); var operators = List.of( - simpleWithMode(nonBreakingBigArrays().withCircuitBreaking(), AggregatorMode.INITIAL).get(driverContext), - simpleWithMode(nonBreakingBigArrays().withCircuitBreaking(), AggregatorMode.INTERMEDIATE).get(driverContext), - simpleWithMode(nonBreakingBigArrays().withCircuitBreaking(), AggregatorMode.FINAL).get(driverContext) + simpleWithMode(AggregatorMode.INITIAL).get(driverContext), + simpleWithMode(AggregatorMode.INTERMEDIATE).get(driverContext), + simpleWithMode(AggregatorMode.FINAL).get(driverContext) ); List results = drive(operators, List.of().iterator(), driverContext); @@ -175,7 +161,7 @@ public final void testEmptyInputInitialIntermediateFinal() { // Returns an intermediate state that is equivalent to what the local execution planner will emit // if it determines that certain shards have no relevant data. final List nullIntermediateState(BlockFactory blockFactory) { - try (var agg = aggregatorFunction(nonBreakingBigArrays(), List.of()).aggregator(driverContext())) { + try (var agg = aggregatorFunction(List.of()).aggregator(driverContext())) { var method = agg.getClass().getMethod("intermediateStateDesc"); @SuppressWarnings("unchecked") List intermediateStateDescs = (List) method.invoke(null); @@ -197,8 +183,8 @@ public final void testNullIntermediateFinal() { BlockFactory blockFactory = driverContext.blockFactory(); List input = nullIntermediateState(blockFactory); var operators = List.of( - simpleWithMode(nonBreakingBigArrays().withCircuitBreaking(), AggregatorMode.INTERMEDIATE).get(driverContext), - simpleWithMode(nonBreakingBigArrays().withCircuitBreaking(), AggregatorMode.FINAL).get(driverContext) + simpleWithMode(AggregatorMode.INTERMEDIATE).get(driverContext), + simpleWithMode(AggregatorMode.FINAL).get(driverContext) ); List results = drive(operators, input.iterator(), driverContext); assertThat(results, hasSize(1)); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountAggregatorFunctionTests.java index 623de7fdd1fff..a81ce65c5360b 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountAggregatorFunctionTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.compute.aggregation; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.BasicBlockTests; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; @@ -27,8 +26,8 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { } @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return CountAggregatorFunction.supplier(bigArrays, inputChannels); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return CountAggregatorFunction.supplier(inputChannels); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctBooleanAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctBooleanAggregatorFunctionTests.java index b44eb0ba1bd3f..84ab9d787aec1 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctBooleanAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctBooleanAggregatorFunctionTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.compute.aggregation; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.BasicBlockTests; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; @@ -27,8 +26,8 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { } @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return new CountDistinctBooleanAggregatorFunctionSupplier(bigArrays, inputChannels); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new CountDistinctBooleanAggregatorFunctionSupplier(inputChannels); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctBooleanGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctBooleanGroupingAggregatorFunctionTests.java index 169ca964482bf..66ecbb6eb1130 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctBooleanGroupingAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctBooleanGroupingAggregatorFunctionTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.compute.aggregation; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.LongBlock; @@ -24,8 +23,8 @@ public class CountDistinctBooleanGroupingAggregatorFunctionTests extends GroupingAggregatorFunctionTestCase { @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return new CountDistinctBooleanAggregatorFunctionSupplier(bigArrays, inputChannels); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new CountDistinctBooleanAggregatorFunctionSupplier(inputChannels); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctBytesRefAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctBytesRefAggregatorFunctionTests.java index 5277eef577997..8872e65054fa9 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctBytesRefAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctBytesRefAggregatorFunctionTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.compute.aggregation; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.BasicBlockTests; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; @@ -33,8 +32,8 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { } @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return new CountDistinctBytesRefAggregatorFunctionSupplier(bigArrays, inputChannels, 40000); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new CountDistinctBytesRefAggregatorFunctionSupplier(inputChannels, 40000); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctBytesRefGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctBytesRefGroupingAggregatorFunctionTests.java index 82190775a7e4c..cbc2a5227d9ea 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctBytesRefGroupingAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctBytesRefGroupingAggregatorFunctionTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.compute.aggregation; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.LongBlock; @@ -26,8 +25,8 @@ public class CountDistinctBytesRefGroupingAggregatorFunctionTests extends GroupingAggregatorFunctionTestCase { @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return new CountDistinctBytesRefAggregatorFunctionSupplier(bigArrays, inputChannels, 40000); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new CountDistinctBytesRefAggregatorFunctionSupplier(inputChannels, 40000); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctDoubleAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctDoubleAggregatorFunctionTests.java index d65d7a9f8b88a..9b6bc6be23e62 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctDoubleAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctDoubleAggregatorFunctionTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.compute.aggregation; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.BasicBlockTests; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; @@ -29,8 +28,8 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { } @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return new CountDistinctDoubleAggregatorFunctionSupplier(bigArrays, inputChannels, 40000); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new CountDistinctDoubleAggregatorFunctionSupplier(inputChannels, 40000); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctDoubleGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctDoubleGroupingAggregatorFunctionTests.java index 71ea63554a05e..56a0d863038bc 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctDoubleGroupingAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctDoubleGroupingAggregatorFunctionTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.compute.aggregation; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.LongBlock; @@ -25,8 +24,8 @@ public class CountDistinctDoubleGroupingAggregatorFunctionTests extends GroupingAggregatorFunctionTestCase { @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return new CountDistinctDoubleAggregatorFunctionSupplier(bigArrays, inputChannels, 40000); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new CountDistinctDoubleAggregatorFunctionSupplier(inputChannels, 40000); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctIntAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctIntAggregatorFunctionTests.java index 2f48210a32245..7bfbb2f70a5a4 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctIntAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctIntAggregatorFunctionTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.compute.aggregation; import org.elasticsearch.common.collect.Iterators; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.BasicBlockTests; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; @@ -35,8 +34,8 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { } @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return new CountDistinctIntAggregatorFunctionSupplier(bigArrays, inputChannels, 40000); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new CountDistinctIntAggregatorFunctionSupplier(inputChannels, 40000); } @Override @@ -68,7 +67,7 @@ public void testRejectsDouble() { Driver d = new Driver( driverContext, new CannedSourceOperator(Iterators.single(new Page(blockFactory.newDoubleArrayVector(new double[] { 1.0 }, 1).asBlock()))), - List.of(simple(nonBreakingBigArrays()).get(driverContext)), + List.of(simple().get(driverContext)), new PageConsumerOperator(page -> fail("shouldn't have made it this far")), () -> {} ) diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctIntGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctIntGroupingAggregatorFunctionTests.java index 858b4ec71d58d..229ec49bcffa8 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctIntGroupingAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctIntGroupingAggregatorFunctionTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.compute.aggregation; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.LongBlock; @@ -25,8 +24,8 @@ public class CountDistinctIntGroupingAggregatorFunctionTests extends GroupingAggregatorFunctionTestCase { @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return new CountDistinctIntAggregatorFunctionSupplier(bigArrays, inputChannels, 40000); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new CountDistinctIntAggregatorFunctionSupplier(inputChannels, 40000); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctLongAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctLongAggregatorFunctionTests.java index 704b5c649f744..4df611a41a8dc 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctLongAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctLongAggregatorFunctionTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.compute.aggregation; import org.elasticsearch.common.collect.Iterators; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.BasicBlockTests; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; @@ -36,8 +35,8 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { } @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return new CountDistinctLongAggregatorFunctionSupplier(bigArrays, inputChannels, 40000); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new CountDistinctLongAggregatorFunctionSupplier(inputChannels, 40000); } @Override @@ -69,7 +68,7 @@ public void testRejectsDouble() { Driver d = new Driver( driverContext, new CannedSourceOperator(Iterators.single(new Page(blockFactory.newDoubleArrayVector(new double[] { 1.0 }, 1).asBlock()))), - List.of(simple(nonBreakingBigArrays()).get(driverContext)), + List.of(simple().get(driverContext)), new PageConsumerOperator(page -> fail("shouldn't have made it this far")), () -> {} ) diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctLongGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctLongGroupingAggregatorFunctionTests.java index 4282adaba595e..539ef35390663 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctLongGroupingAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountDistinctLongGroupingAggregatorFunctionTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.compute.aggregation; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.LongBlock; @@ -24,8 +23,8 @@ public class CountDistinctLongGroupingAggregatorFunctionTests extends GroupingAggregatorFunctionTestCase { @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return new CountDistinctLongAggregatorFunctionSupplier(bigArrays, inputChannels, 40000); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new CountDistinctLongAggregatorFunctionSupplier(inputChannels, 40000); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountGroupingAggregatorFunctionTests.java index e90bab093abc2..1d658f80c4e29 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountGroupingAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/CountGroupingAggregatorFunctionTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.compute.aggregation; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.LongBlock; @@ -24,8 +23,8 @@ public class CountGroupingAggregatorFunctionTests extends GroupingAggregatorFunctionTestCase { @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return CountAggregatorFunction.supplier(bigArrays, inputChannels); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return CountAggregatorFunction.supplier(inputChannels); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/GroupingAggregatorFunctionTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/GroupingAggregatorFunctionTestCase.java index 6afd285987696..a6e88234dc25b 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/GroupingAggregatorFunctionTestCase.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/GroupingAggregatorFunctionTestCase.java @@ -8,8 +8,6 @@ package org.elasticsearch.compute.aggregation; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.BitArray; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; @@ -23,6 +21,7 @@ import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.LongVector; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.data.TestBlockFactory; import org.elasticsearch.compute.operator.CannedSourceOperator; import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.ForkingOperatorTestCase; @@ -48,10 +47,10 @@ import static org.hamcrest.Matchers.hasSize; public abstract class GroupingAggregatorFunctionTestCase extends ForkingOperatorTestCase { - protected abstract AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels); + protected abstract AggregatorFunctionSupplier aggregatorFunction(List inputChannels); protected final int aggregatorIntermediateBlockCount() { - try (var agg = aggregatorFunction(nonBreakingBigArrays(), List.of()).aggregator(driverContext())) { + try (var agg = aggregatorFunction(List.of()).aggregator(driverContext())) { return agg.intermediateBlockCount(); } } @@ -61,19 +60,18 @@ protected final int aggregatorIntermediateBlockCount() { protected abstract void assertSimpleGroup(List input, Block result, int position, Long group); @Override - protected final Operator.OperatorFactory simpleWithMode(BigArrays bigArrays, AggregatorMode mode) { + protected final Operator.OperatorFactory simpleWithMode(AggregatorMode mode) { List channels = mode.isInputPartial() ? range(1, 1 + aggregatorIntermediateBlockCount()).boxed().toList() : List.of(1); int emitChunkSize = between(100, 200); - AggregatorFunctionSupplier supplier = aggregatorFunction(bigArrays, channels); + AggregatorFunctionSupplier supplier = aggregatorFunction(channels); if (randomBoolean()) { supplier = chunkGroups(emitChunkSize, supplier); } return new HashAggregationOperator.HashAggregationOperatorFactory( List.of(new HashAggregationOperator.GroupSpec(0, ElementType.LONG)), List.of(supplier.groupingAggregatorFactory(mode)), - randomPageSize(), - bigArrays + randomPageSize() ); } @@ -145,11 +143,6 @@ protected final void assertSimpleOutput(List input, List results) { } } - @Override - protected ByteSizeValue smallEnoughToCircuitBreak() { - return ByteSizeValue.ofBytes(between(1, 32)); - } - public final void testNullGroupsAndValues() { DriverContext driverContext = driverContext(); BlockFactory blockFactory = driverContext.blockFactory(); @@ -157,12 +150,8 @@ public final void testNullGroupsAndValues() { List input = CannedSourceOperator.collectPages( new NullInsertingSourceOperator(simpleInput(driverContext.blockFactory(), end), blockFactory) ); - List origInput = BlockTestUtils.deepCopyOf(input, BlockFactory.getNonBreakingInstance()); - List results = drive( - simple(nonBreakingBigArrays().withCircuitBreaking()).get(driverContext), - input.iterator(), - driverContext - ); + List origInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); + List results = drive(simple().get(driverContext), input.iterator(), driverContext); assertSimpleOutput(origInput, results); } @@ -171,12 +160,8 @@ public final void testNullGroups() { BlockFactory blockFactory = driverContext.blockFactory(); int end = between(50, 60); List input = CannedSourceOperator.collectPages(nullGroups(simpleInput(blockFactory, end), blockFactory)); - List origInput = BlockTestUtils.deepCopyOf(input, BlockFactory.getNonBreakingInstance()); - List results = drive( - simple(nonBreakingBigArrays().withCircuitBreaking()).get(driverContext), - input.iterator(), - driverContext - ); + List origInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); + List results = drive(simple().get(driverContext), input.iterator(), driverContext); assertSimpleOutput(origInput, results); } @@ -189,7 +174,7 @@ public void testAllKeyNulls() { input.add(p); } else { Block[] blocks = new Block[p.getBlockCount()]; - blocks[0] = Block.constantNullBlock(p.getPositionCount(), blockFactory); + blocks[0] = blockFactory.newConstantNullBlock(p.getPositionCount()); for (int i = 1; i < blocks.length; i++) { blocks[i] = p.getBlock(i); } @@ -197,12 +182,8 @@ public void testAllKeyNulls() { input.add(new Page(blocks)); } } - List origInput = BlockTestUtils.deepCopyOf(input, BlockFactory.getNonBreakingInstance()); - List results = drive( - simple(nonBreakingBigArrays().withCircuitBreaking()).get(driverContext), - input.iterator(), - driverContext - ); + List origInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); + List results = drive(simple().get(driverContext), input.iterator(), driverContext); assertSimpleOutput(origInput, results); } @@ -232,12 +213,8 @@ public final void testNullValues() { BlockFactory blockFactory = driverContext.blockFactory(); int end = between(50, 60); List input = CannedSourceOperator.collectPages(nullValues(simpleInput(driverContext.blockFactory(), end), blockFactory)); - List origInput = BlockTestUtils.deepCopyOf(input, BlockFactory.getNonBreakingInstance()); - List results = drive( - simple(nonBreakingBigArrays().withCircuitBreaking()).get(driverContext), - input.iterator(), - driverContext - ); + List origInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); + List results = drive(simple().get(driverContext), input.iterator(), driverContext); assertSimpleOutput(origInput, results); } @@ -246,12 +223,12 @@ public final void testNullValuesInitialIntermediateFinal() { BlockFactory blockFactory = driverContext.blockFactory(); int end = between(50, 60); List input = CannedSourceOperator.collectPages(nullValues(simpleInput(driverContext.blockFactory(), end), blockFactory)); - List origInput = BlockTestUtils.deepCopyOf(input, BlockFactory.getNonBreakingInstance()); + List origInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); List results = drive( List.of( - simpleWithMode(nonBreakingBigArrays().withCircuitBreaking(), AggregatorMode.INITIAL).get(driverContext), - simpleWithMode(nonBreakingBigArrays().withCircuitBreaking(), AggregatorMode.INTERMEDIATE).get(driverContext), - simpleWithMode(nonBreakingBigArrays().withCircuitBreaking(), AggregatorMode.FINAL).get(driverContext) + simpleWithMode(AggregatorMode.INITIAL).get(driverContext), + simpleWithMode(AggregatorMode.INTERMEDIATE).get(driverContext), + simpleWithMode(AggregatorMode.FINAL).get(driverContext) ), input.iterator(), driverContext @@ -278,12 +255,8 @@ public final void testMultivalued() { List input = CannedSourceOperator.collectPages( mergeValues(simpleInput(driverContext.blockFactory(), end), driverContext.blockFactory()) ); - List origInput = BlockTestUtils.deepCopyOf(input, BlockFactory.getNonBreakingInstance()); - List results = drive( - simple(nonBreakingBigArrays().withCircuitBreaking()).get(driverContext), - input.iterator(), - driverContext - ); + List origInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); + List results = drive(simple().get(driverContext), input.iterator(), driverContext); assertSimpleOutput(origInput, results); } @@ -294,12 +267,8 @@ public final void testMulitvaluedNullGroupsAndValues() { List input = CannedSourceOperator.collectPages( new NullInsertingSourceOperator(mergeValues(simpleInput(driverContext.blockFactory(), end), blockFactory), blockFactory) ); - List origInput = BlockTestUtils.deepCopyOf(input, BlockFactory.getNonBreakingInstance()); - List results = drive( - simple(nonBreakingBigArrays().withCircuitBreaking()).get(driverContext), - input.iterator(), - driverContext - ); + List origInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); + List results = drive(simple().get(driverContext), input.iterator(), driverContext); assertSimpleOutput(origInput, results); } @@ -309,12 +278,8 @@ public void testMulitvaluedNullGroup() { int end = between(1, 2); // TODO revert var inputOperator = nullGroups(mergeValues(simpleInput(driverContext.blockFactory(), end), blockFactory), blockFactory); List input = CannedSourceOperator.collectPages(inputOperator); - List origInput = BlockTestUtils.deepCopyOf(input, BlockFactory.getNonBreakingInstance()); - List results = drive( - simple(nonBreakingBigArrays().withCircuitBreaking()).get(driverContext), - input.iterator(), - driverContext - ); + List origInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); + List results = drive(simple().get(driverContext), input.iterator(), driverContext); assertSimpleOutput(origInput, results); } @@ -325,27 +290,20 @@ public final void testMulitvaluedNullValues() { List input = CannedSourceOperator.collectPages( nullValues(mergeValues(simpleInput(blockFactory, end), blockFactory), blockFactory) ); - List origInput = BlockTestUtils.deepCopyOf(input, BlockFactory.getNonBreakingInstance()); - List results = drive( - simple(nonBreakingBigArrays().withCircuitBreaking()).get(driverContext), - input.iterator(), - driverContext - ); + List origInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); + List results = drive(simple().get(driverContext), input.iterator(), driverContext); assertSimpleOutput(origInput, results); } public final void testNullOnly() { DriverContext driverContext = driverContext(); - assertNullOnly(List.of(simple(nonBreakingBigArrays().withCircuitBreaking()).get(driverContext)), driverContext); + assertNullOnly(List.of(simple().get(driverContext)), driverContext); } public final void testNullOnlyInputInitialFinal() { DriverContext driverContext = driverContext(); assertNullOnly( - List.of( - simpleWithMode(nonBreakingBigArrays().withCircuitBreaking(), AggregatorMode.INITIAL).get(driverContext), - simpleWithMode(nonBreakingBigArrays().withCircuitBreaking(), AggregatorMode.FINAL).get(driverContext) - ), + List.of(simpleWithMode(AggregatorMode.INITIAL).get(driverContext), simpleWithMode(AggregatorMode.FINAL).get(driverContext)), driverContext ); } @@ -354,9 +312,9 @@ public final void testNullOnlyInputInitialIntermediateFinal() { DriverContext driverContext = driverContext(); assertNullOnly( List.of( - simpleWithMode(nonBreakingBigArrays().withCircuitBreaking(), AggregatorMode.INITIAL).get(driverContext), - simpleWithMode(nonBreakingBigArrays().withCircuitBreaking(), AggregatorMode.INTERMEDIATE).get(driverContext), - simpleWithMode(nonBreakingBigArrays().withCircuitBreaking(), AggregatorMode.FINAL).get(driverContext) + simpleWithMode(AggregatorMode.INITIAL).get(driverContext), + simpleWithMode(AggregatorMode.INTERMEDIATE).get(driverContext), + simpleWithMode(AggregatorMode.FINAL).get(driverContext) ), driverContext ); @@ -366,33 +324,32 @@ public final void testNullOnlyInputInitialIntermediateFinal() { * Run the aggregation passing only null values. */ private void assertNullOnly(List operators, DriverContext driverContext) { - LongBlock.Builder groupBuilder = LongBlock.newBlockBuilder(1); - if (randomBoolean()) { - groupBuilder.appendLong(1); - } else { - groupBuilder.appendNull(); - } - List source = List.of(new Page(groupBuilder.build(), Block.constantNullBlock(1))); - List results = drive(operators, source.iterator(), driverContext); + BlockFactory blockFactory = driverContext.blockFactory(); + try (var groupBuilder = blockFactory.newLongBlockBuilder(1)) { + if (randomBoolean()) { + groupBuilder.appendLong(1); + } else { + groupBuilder.appendNull(); + } + List source = List.of(new Page(groupBuilder.build(), blockFactory.newConstantNullBlock(1))); + List results = drive(operators, source.iterator(), driverContext); - assertThat(results, hasSize(1)); - Block resultBlock = results.get(0).getBlock(1); - assertOutputFromNullOnly(resultBlock, 0); + assertThat(results, hasSize(1)); + Block resultBlock = results.get(0).getBlock(1); + assertOutputFromNullOnly(resultBlock, 0); + } } public final void testNullSome() { DriverContext driverContext = driverContext(); - assertNullSome(driverContext, List.of(simple(nonBreakingBigArrays().withCircuitBreaking()).get(driverContext))); + assertNullSome(driverContext, List.of(simple().get(driverContext))); } public final void testNullSomeInitialFinal() { DriverContext driverContext = driverContext(); assertNullSome( driverContext, - List.of( - simpleWithMode(nonBreakingBigArrays().withCircuitBreaking(), AggregatorMode.INITIAL).get(driverContext), - simpleWithMode(nonBreakingBigArrays().withCircuitBreaking(), AggregatorMode.FINAL).get(driverContext) - ) + List.of(simpleWithMode(AggregatorMode.INITIAL).get(driverContext), simpleWithMode(AggregatorMode.FINAL).get(driverContext)) ); } @@ -401,9 +358,9 @@ public final void testNullSomeInitialIntermediateFinal() { assertNullSome( driverContext, List.of( - simpleWithMode(nonBreakingBigArrays().withCircuitBreaking(), AggregatorMode.INITIAL).get(driverContext), - simpleWithMode(nonBreakingBigArrays().withCircuitBreaking(), AggregatorMode.INTERMEDIATE).get(driverContext), - simpleWithMode(nonBreakingBigArrays().withCircuitBreaking(), AggregatorMode.FINAL).get(driverContext) + simpleWithMode(AggregatorMode.INITIAL).get(driverContext), + simpleWithMode(AggregatorMode.INTERMEDIATE).get(driverContext), + simpleWithMode(AggregatorMode.FINAL).get(driverContext) ) ); } @@ -465,7 +422,7 @@ protected Block merge(int blockIndex, Block block) { if (blockIndex != 0) { return super.merge(blockIndex, block); } - Block.Builder builder = block.elementType().newBlockBuilder(block.getPositionCount() / 2); + Block.Builder builder = block.elementType().newBlockBuilder(block.getPositionCount() / 2, blockFactory); for (int p = 0; p + 1 < block.getPositionCount(); p += 2) { builder.copyFrom(block, p, p + 1); } @@ -565,7 +522,7 @@ public AddInput prepareProcessPage(SeenGroupIds ignoredSeenGroupIds, Page page) @Override public void add(int positionOffset, IntBlock groupIds) { for (int offset = 0; offset < groupIds.getPositionCount(); offset += emitChunkSize) { - IntBlock.Builder builder = IntBlock.newBlockBuilder(emitChunkSize); + IntBlock.Builder builder = blockFactory().newIntBlockBuilder(emitChunkSize); int endP = Math.min(groupIds.getPositionCount(), offset + emitChunkSize); for (int p = offset; p < endP; p++) { int start = groupIds.getFirstValueIndex(p); @@ -603,7 +560,7 @@ public void add(int positionOffset, IntVector groupIds) { seenGroupIds.set(group); chunk[count++] = group; } - BlockFactory blockFactory = BlockFactory.getNonBreakingInstance(); // TODO: just for compile + BlockFactory blockFactory = TestBlockFactory.getNonBreakingInstance(); // TODO: just for compile delegateAddInput.add(positionOffset + offset, blockFactory.newIntArrayVector(chunk, count)); } } @@ -618,7 +575,7 @@ public void addIntermediateInput(int positionOffset, IntVector groupIds, Page pa for (int i = offset; i < Math.min(groupIds.getPositionCount(), offset + emitChunkSize); i++) { chunk[count++] = groupIds.getInt(i); } - BlockFactory blockFactory = BlockFactory.getNonBreakingInstance(); // TODO: just for compile + BlockFactory blockFactory = TestBlockFactory.getNonBreakingInstance(); // TODO: just for compile delegate.addIntermediateInput(positionOffset + offset, blockFactory.newIntArrayVector(chunk, count), page); } } @@ -645,7 +602,7 @@ public int intermediateBlockCount() { @Override public void close() { - Releasables.close(delegate::close, seenGroupIds); + Releasables.close(delegate, seenGroupIds); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxDoubleAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxDoubleAggregatorFunctionTests.java index 1e4bbc62b1926..9d638fae4e822 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxDoubleAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxDoubleAggregatorFunctionTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.compute.aggregation; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.DoubleBlock; @@ -27,8 +26,8 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { } @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return new MaxDoubleAggregatorFunctionSupplier(bigArrays, inputChannels); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new MaxDoubleAggregatorFunctionSupplier(inputChannels); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxDoubleGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxDoubleGroupingAggregatorFunctionTests.java index 2b374545e3023..18aec87a9d07b 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxDoubleGroupingAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxDoubleGroupingAggregatorFunctionTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.compute.aggregation; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.DoubleBlock; @@ -33,8 +32,8 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int end) { } @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return new MaxDoubleAggregatorFunctionSupplier(bigArrays, inputChannels); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new MaxDoubleAggregatorFunctionSupplier(inputChannels); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxIntAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxIntAggregatorFunctionTests.java index 2a1ce21530128..af198e3aec9d5 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxIntAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxIntAggregatorFunctionTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.compute.aggregation; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.IntBlock; @@ -26,8 +25,8 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { } @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return new MaxIntAggregatorFunctionSupplier(bigArrays, inputChannels); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new MaxIntAggregatorFunctionSupplier(inputChannels); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxIntGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxIntGroupingAggregatorFunctionTests.java index 982e46eec2e0e..372015ebd767c 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxIntGroupingAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxIntGroupingAggregatorFunctionTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.compute.aggregation; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.IntBlock; @@ -24,8 +23,8 @@ public class MaxIntGroupingAggregatorFunctionTests extends GroupingAggregatorFunctionTestCase { @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return new MaxIntAggregatorFunctionSupplier(bigArrays, inputChannels); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new MaxIntAggregatorFunctionSupplier(inputChannels); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxLongAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxLongAggregatorFunctionTests.java index a51aa98f7a5a8..8b9c9c1c39b8b 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxLongAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxLongAggregatorFunctionTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.compute.aggregation; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.LongBlock; @@ -27,8 +26,8 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { } @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return new MaxLongAggregatorFunctionSupplier(bigArrays, inputChannels); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new MaxLongAggregatorFunctionSupplier(inputChannels); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxLongGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxLongGroupingAggregatorFunctionTests.java index a1f44e128c2e1..1bf7cd9eea27d 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxLongGroupingAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MaxLongGroupingAggregatorFunctionTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.compute.aggregation; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.LongBlock; @@ -24,8 +23,8 @@ public class MaxLongGroupingAggregatorFunctionTests extends GroupingAggregatorFunctionTestCase { @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return new MaxLongAggregatorFunctionSupplier(bigArrays, inputChannels); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new MaxLongAggregatorFunctionSupplier(inputChannels); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleAggregatorFunctionTests.java index cf6efe48e33ea..1d105430ce1db 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleAggregatorFunctionTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.compute.aggregation; import org.elasticsearch.common.Randomness; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.DoubleBlock; @@ -30,8 +29,8 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int end) { } @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return new MedianAbsoluteDeviationDoubleAggregatorFunctionSupplier(bigArrays, inputChannels); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new MedianAbsoluteDeviationDoubleAggregatorFunctionSupplier(inputChannels); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleGroupingAggregatorFunctionTests.java index 0799f9bc4736f..8eba1842d688d 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleGroupingAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleGroupingAggregatorFunctionTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.compute.aggregation; import org.elasticsearch.common.Randomness; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.DoubleBlock; @@ -47,8 +46,8 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int end) { } @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return new MedianAbsoluteDeviationDoubleAggregatorFunctionSupplier(bigArrays, inputChannels); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new MedianAbsoluteDeviationDoubleAggregatorFunctionSupplier(inputChannels); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntAggregatorFunctionTests.java index 681aef76f75ba..fa396d7dcf7a6 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntAggregatorFunctionTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.compute.aggregation; import org.elasticsearch.common.Randomness; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.DoubleBlock; @@ -30,8 +29,8 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int end) { } @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return new MedianAbsoluteDeviationIntAggregatorFunctionSupplier(bigArrays, inputChannels); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new MedianAbsoluteDeviationIntAggregatorFunctionSupplier(inputChannels); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntGroupingAggregatorFunctionTests.java index 42664cc14d7e2..8a8b051528195 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntGroupingAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntGroupingAggregatorFunctionTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.compute.aggregation; import org.elasticsearch.common.Randomness; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.DoubleBlock; @@ -47,8 +46,8 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int end) { } @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return new MedianAbsoluteDeviationIntAggregatorFunctionSupplier(bigArrays, inputChannels); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new MedianAbsoluteDeviationIntAggregatorFunctionSupplier(inputChannels); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongAggregatorFunctionTests.java index 0ba6dc6eb4812..a3db32955d28c 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongAggregatorFunctionTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.compute.aggregation; import org.elasticsearch.common.Randomness; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.DoubleBlock; @@ -30,8 +29,8 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int end) { } @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return new MedianAbsoluteDeviationLongAggregatorFunctionSupplier(bigArrays, inputChannels); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new MedianAbsoluteDeviationLongAggregatorFunctionSupplier(inputChannels); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongGroupingAggregatorFunctionTests.java index b53fab2567499..818150d3234aa 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongGroupingAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongGroupingAggregatorFunctionTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.compute.aggregation; import org.elasticsearch.common.Randomness; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.DoubleBlock; @@ -47,8 +46,8 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int end) { } @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return new MedianAbsoluteDeviationLongAggregatorFunctionSupplier(bigArrays, inputChannels); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new MedianAbsoluteDeviationLongAggregatorFunctionSupplier(inputChannels); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinDoubleAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinDoubleAggregatorFunctionTests.java index 2dd16c396723d..e92b98ebf91d0 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinDoubleAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinDoubleAggregatorFunctionTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.compute.aggregation; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.DoubleBlock; @@ -27,8 +26,8 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { } @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return new MinDoubleAggregatorFunctionSupplier(bigArrays, inputChannels); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new MinDoubleAggregatorFunctionSupplier(inputChannels); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinDoubleGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinDoubleGroupingAggregatorFunctionTests.java index 4e0b591b86b6b..62cf954a1909e 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinDoubleGroupingAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinDoubleGroupingAggregatorFunctionTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.compute.aggregation; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.DoubleBlock; @@ -32,8 +31,8 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int end) { } @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return new MinDoubleAggregatorFunctionSupplier(bigArrays, inputChannels); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new MinDoubleAggregatorFunctionSupplier(inputChannels); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinIntAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinIntAggregatorFunctionTests.java index 2eb58e7b0b987..ffa2189f96b66 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinIntAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinIntAggregatorFunctionTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.compute.aggregation; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.IntBlock; @@ -26,8 +25,8 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { } @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return new MinIntAggregatorFunctionSupplier(bigArrays, inputChannels); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new MinIntAggregatorFunctionSupplier(inputChannels); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinIntGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinIntGroupingAggregatorFunctionTests.java index a7a71b073ceed..a7644c8bb26a9 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinIntGroupingAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinIntGroupingAggregatorFunctionTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.compute.aggregation; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.IntBlock; @@ -24,8 +23,8 @@ public class MinIntGroupingAggregatorFunctionTests extends GroupingAggregatorFunctionTestCase { @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return new MinIntAggregatorFunctionSupplier(bigArrays, inputChannels); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new MinIntAggregatorFunctionSupplier(inputChannels); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinLongAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinLongAggregatorFunctionTests.java index 91feb141ac74b..00920be73117a 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinLongAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinLongAggregatorFunctionTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.compute.aggregation; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.LongBlock; @@ -27,8 +26,8 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { } @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return new MinLongAggregatorFunctionSupplier(bigArrays, inputChannels); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new MinLongAggregatorFunctionSupplier(inputChannels); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinLongGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinLongGroupingAggregatorFunctionTests.java index 02dda3fe3c236..5591fb57a8f2d 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinLongGroupingAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MinLongGroupingAggregatorFunctionTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.compute.aggregation; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.LongBlock; @@ -24,8 +23,8 @@ public class MinLongGroupingAggregatorFunctionTests extends GroupingAggregatorFunctionTestCase { @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return new MinLongAggregatorFunctionSupplier(bigArrays, inputChannels); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new MinLongAggregatorFunctionSupplier(inputChannels); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/PercentileDoubleAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/PercentileDoubleAggregatorFunctionTests.java index 7d252a2e39a6f..be3db1f6b9517 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/PercentileDoubleAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/PercentileDoubleAggregatorFunctionTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.compute.aggregation; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.DoubleBlock; @@ -32,8 +31,8 @@ public void initParameters() { } @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return new PercentileDoubleAggregatorFunctionSupplier(bigArrays, inputChannels, percentile); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new PercentileDoubleAggregatorFunctionSupplier(inputChannels, percentile); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/PercentileDoubleGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/PercentileDoubleGroupingAggregatorFunctionTests.java index 9c1c81d67e014..8160de83691ad 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/PercentileDoubleGroupingAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/PercentileDoubleGroupingAggregatorFunctionTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.compute.aggregation; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.DoubleBlock; @@ -33,8 +32,8 @@ public void initParameters() { } @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return new PercentileDoubleAggregatorFunctionSupplier(bigArrays, inputChannels, percentile); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new PercentileDoubleAggregatorFunctionSupplier(inputChannels, percentile); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/PercentileIntAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/PercentileIntAggregatorFunctionTests.java index 0549170c2c432..21cbcf51a310d 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/PercentileIntAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/PercentileIntAggregatorFunctionTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.compute.aggregation; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.DoubleBlock; @@ -31,8 +30,8 @@ public void initParameters() { } @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return new PercentileIntAggregatorFunctionSupplier(bigArrays, inputChannels, percentile); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new PercentileIntAggregatorFunctionSupplier(inputChannels, percentile); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/PercentileIntGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/PercentileIntGroupingAggregatorFunctionTests.java index db8064feafd25..5d08be0ba2c6e 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/PercentileIntGroupingAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/PercentileIntGroupingAggregatorFunctionTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.compute.aggregation; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.DoubleBlock; @@ -33,8 +32,8 @@ public void initParameters() { } @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return new PercentileIntAggregatorFunctionSupplier(bigArrays, inputChannels, percentile); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new PercentileIntAggregatorFunctionSupplier(inputChannels, percentile); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/PercentileLongAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/PercentileLongAggregatorFunctionTests.java index eb32dac18ea80..ca982ded01498 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/PercentileLongAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/PercentileLongAggregatorFunctionTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.compute.aggregation; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.DoubleBlock; @@ -31,8 +30,8 @@ public void initParameters() { } @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return new PercentileLongAggregatorFunctionSupplier(bigArrays, inputChannels, percentile); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new PercentileLongAggregatorFunctionSupplier(inputChannels, percentile); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/PercentileLongGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/PercentileLongGroupingAggregatorFunctionTests.java index 6360be8595ff8..258041a9d9552 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/PercentileLongGroupingAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/PercentileLongGroupingAggregatorFunctionTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.compute.aggregation; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.DoubleBlock; @@ -33,8 +32,8 @@ public void initParameters() { } @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return new PercentileLongAggregatorFunctionSupplier(bigArrays, inputChannels, percentile); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new PercentileLongAggregatorFunctionSupplier(inputChannels, percentile); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/SumDoubleAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/SumDoubleAggregatorFunctionTests.java index ea428d7d87cad..3f292ff0f81a6 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/SumDoubleAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/SumDoubleAggregatorFunctionTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.compute.aggregation; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.DoubleBlock; @@ -34,8 +33,8 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { } @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return new SumDoubleAggregatorFunctionSupplier(bigArrays, inputChannels); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new SumDoubleAggregatorFunctionSupplier(inputChannels); } @Override @@ -56,7 +55,7 @@ public void testOverflowSucceeds() { Driver d = new Driver( driverContext, new SequenceDoubleBlockSourceOperator(driverContext.blockFactory(), DoubleStream.of(Double.MAX_VALUE - 1, 2)), - List.of(simple(nonBreakingBigArrays()).get(driverContext)), + List.of(simple().get(driverContext)), new TestResultPageSinkOperator(results::add), () -> {} ) @@ -77,7 +76,7 @@ public void testSummationAccuracy() { driverContext.blockFactory(), DoubleStream.of(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7) ), - List.of(simple(nonBreakingBigArrays()).get(driverContext)), + List.of(simple().get(driverContext)), new TestResultPageSinkOperator(results::add), () -> {} ) @@ -103,7 +102,7 @@ public void testSummationAccuracy() { Driver d = new Driver( driverContext, new SequenceDoubleBlockSourceOperator(driverContext.blockFactory(), DoubleStream.of(values)), - List.of(simple(nonBreakingBigArrays()).get(driverContext)), + List.of(simple().get(driverContext)), new TestResultPageSinkOperator(results::add), () -> {} ) @@ -125,7 +124,7 @@ public void testSummationAccuracy() { Driver d = new Driver( driverContext, new SequenceDoubleBlockSourceOperator(driverContext.blockFactory(), DoubleStream.of(largeValues)), - List.of(simple(nonBreakingBigArrays()).get(driverContext)), + List.of(simple().get(driverContext)), new TestResultPageSinkOperator(results::add), () -> {} ) @@ -144,7 +143,7 @@ public void testSummationAccuracy() { Driver d = new Driver( driverContext, new SequenceDoubleBlockSourceOperator(driverContext.blockFactory(), DoubleStream.of(largeValues)), - List.of(simple(nonBreakingBigArrays()).get(driverContext)), + List.of(simple().get(driverContext)), new TestResultPageSinkOperator(results::add), () -> {} ) diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/SumDoubleGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/SumDoubleGroupingAggregatorFunctionTests.java index 307d57fa9ddcc..f982ee6cd58d6 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/SumDoubleGroupingAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/SumDoubleGroupingAggregatorFunctionTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.compute.aggregation; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.DoubleBlock; @@ -32,8 +31,8 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int end) { } @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return new SumDoubleAggregatorFunctionSupplier(bigArrays, inputChannels); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new SumDoubleAggregatorFunctionSupplier(inputChannels); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/SumIntAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/SumIntAggregatorFunctionTests.java index 28fbeb9d590e7..cf364943e1d11 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/SumIntAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/SumIntAggregatorFunctionTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.compute.aggregation; import org.elasticsearch.common.collect.Iterators; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.LongBlock; @@ -33,8 +32,8 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { } @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return new SumIntAggregatorFunctionSupplier(bigArrays, inputChannels); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new SumIntAggregatorFunctionSupplier(inputChannels); } @Override @@ -55,7 +54,7 @@ public void testRejectsDouble() { Driver d = new Driver( driverContext, new CannedSourceOperator(Iterators.single(new Page(blockFactory.newDoubleArrayVector(new double[] { 1.0 }, 1).asBlock()))), - List.of(simple(nonBreakingBigArrays()).get(driverContext)), + List.of(simple().get(driverContext)), new PageConsumerOperator(page -> fail("shouldn't have made it this far")), () -> {} ) diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/SumIntGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/SumIntGroupingAggregatorFunctionTests.java index c8d584bf12dbd..3dfa4e9332a08 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/SumIntGroupingAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/SumIntGroupingAggregatorFunctionTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.compute.aggregation; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.LongBlock; @@ -23,8 +22,8 @@ public class SumIntGroupingAggregatorFunctionTests extends GroupingAggregatorFunctionTestCase { @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return new SumIntAggregatorFunctionSupplier(bigArrays, inputChannels); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new SumIntAggregatorFunctionSupplier(inputChannels); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/SumLongAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/SumLongAggregatorFunctionTests.java index e9523c5583cd4..7fd3cabb2c91e 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/SumLongAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/SumLongAggregatorFunctionTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.compute.aggregation; import org.elasticsearch.common.collect.Iterators; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.LongBlock; @@ -33,8 +32,8 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { } @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return new SumLongAggregatorFunctionSupplier(bigArrays, inputChannels); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new SumLongAggregatorFunctionSupplier(inputChannels); } @Override @@ -54,7 +53,7 @@ public void testOverflowFails() { Driver d = new Driver( driverContext, new SequenceLongBlockSourceOperator(driverContext.blockFactory(), LongStream.of(Long.MAX_VALUE - 1, 2)), - List.of(simple(nonBreakingBigArrays()).get(driverContext)), + List.of(simple().get(driverContext)), new PageConsumerOperator(page -> fail("shouldn't have made it this far")), () -> {} ) @@ -71,7 +70,7 @@ public void testRejectsDouble() { Driver d = new Driver( driverContext, new CannedSourceOperator(Iterators.single(new Page(blockFactory.newDoubleArrayVector(new double[] { 1.0 }, 1).asBlock()))), - List.of(simple(nonBreakingBigArrays()).get(driverContext)), + List.of(simple().get(driverContext)), new PageConsumerOperator(page -> fail("shouldn't have made it this far")), () -> {} ) diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/SumLongGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/SumLongGroupingAggregatorFunctionTests.java index 827dc06a4f542..f41a5cbef94fb 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/SumLongGroupingAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/SumLongGroupingAggregatorFunctionTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.compute.aggregation; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.LongBlock; @@ -23,8 +22,8 @@ public class SumLongGroupingAggregatorFunctionTests extends GroupingAggregatorFunctionTestCase { @Override - protected AggregatorFunctionSupplier aggregatorFunction(BigArrays bigArrays, List inputChannels) { - return new SumLongAggregatorFunctionSupplier(bigArrays, inputChannels); + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new SumLongAggregatorFunctionSupplier(inputChannels); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/BlockHashTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/BlockHashTests.java index 567f58d0dee75..0ccf2d3af04d9 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/BlockHashTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/BlockHashTests.java @@ -18,7 +18,6 @@ import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.compute.aggregation.GroupingAggregatorFunction; import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.DoubleBlock; @@ -27,6 +26,7 @@ import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.MockBlockFactory; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.data.TestBlockFactory; import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.HashAggregationOperator; import org.elasticsearch.core.Releasables; @@ -498,12 +498,12 @@ public void testBooleanHashTrueOnly() { assertThat(ordsAndKeys.description, startsWith("PackedValuesBlockHash{groups=[0:BOOLEAN], entries=1, size=")); assertOrds(ordsAndKeys.ords, 0, 0, 0, 0); assertKeys(ordsAndKeys.keys, true); - assertThat(ordsAndKeys.nonEmpty, equalTo(IntVector.newVectorBuilder(1).appendInt(0).build())); + assertThat(ordsAndKeys.nonEmpty, equalTo(TestBlockFactory.getNonBreakingInstance().newConstantIntVector(0, 1))); } else { assertThat(ordsAndKeys.description, equalTo("BooleanBlockHash{channel=0, seenFalse=false, seenTrue=true, seenNull=false}")); assertOrds(ordsAndKeys.ords, 2, 2, 2, 2); assertKeys(ordsAndKeys.keys, true); - assertThat(ordsAndKeys.nonEmpty, equalTo(IntVector.newVectorBuilder(1).appendInt(2).build())); + assertThat(ordsAndKeys.nonEmpty, equalTo(TestBlockFactory.getNonBreakingInstance().newConstantIntVector(2, 1))); } }, blockFactory.newBooleanArrayVector(values, values.length).asBlock()); } @@ -514,11 +514,11 @@ public void testBooleanHashFalseOnly() { if (forcePackedHash) { assertThat(ordsAndKeys.description, startsWith("PackedValuesBlockHash{groups=[0:BOOLEAN], entries=1, size=")); assertOrds(ordsAndKeys.ords, 0, 0, 0, 0); - assertThat(ordsAndKeys.nonEmpty, equalTo(IntVector.newVectorBuilder(1).appendInt(0).build())); + assertThat(ordsAndKeys.nonEmpty, equalTo(TestBlockFactory.getNonBreakingInstance().newConstantIntVector(0, 1))); } else { assertThat(ordsAndKeys.description, equalTo("BooleanBlockHash{channel=0, seenFalse=true, seenTrue=false, seenNull=false}")); assertOrds(ordsAndKeys.ords, 1, 1, 1, 1); - assertThat(ordsAndKeys.nonEmpty, equalTo(IntVector.newVectorBuilder(1).appendInt(1).build())); + assertThat(ordsAndKeys.nonEmpty, equalTo(TestBlockFactory.getNonBreakingInstance().newConstantIntVector(1, 1))); } assertKeys(ordsAndKeys.keys, false); }, blockFactory.newBooleanArrayVector(values, values.length).asBlock()); @@ -1262,6 +1262,6 @@ static CircuitBreakerService mockBreakerService(CircuitBreaker breaker) { } IntVector intRange(int startInclusive, int endExclusive) { - return IntVector.range(startInclusive, endExclusive, BlockFactory.getNonBreakingInstance()); + return IntVector.range(startInclusive, endExclusive, TestBlockFactory.getNonBreakingInstance()); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicBlockTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicBlockTests.java index 2a49feeab9a30..7681b147824a5 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicBlockTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicBlockTests.java @@ -11,10 +11,19 @@ import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.BitArray; import org.elasticsearch.common.util.BytesRefArray; +import org.elasticsearch.common.util.DoubleArray; +import org.elasticsearch.common.util.IntArray; +import org.elasticsearch.common.util.LongArray; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.core.RefCounted; +import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; +import org.elasticsearch.geo.GeometryTestUtils; +import org.elasticsearch.geo.ShapeTestUtils; +import org.elasticsearch.geometry.Point; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.test.ESTestCase; import org.junit.After; @@ -25,9 +34,12 @@ import java.util.BitSet; import java.util.List; import java.util.function.BiConsumer; +import java.util.function.Supplier; import java.util.stream.IntStream; import java.util.stream.LongStream; +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.CARTESIAN; +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.GEO; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -54,53 +66,59 @@ public void testEmpty() { } void testEmpty(BlockFactory bf) { - assertZeroPositionsAndRelease(bf.newIntArrayBlock(new int[] {}, 0, new int[] {}, new BitSet(), randomOrdering())); - assertZeroPositionsAndRelease(IntBlock.newBlockBuilder(0, bf).build()); + assertZeroPositionsAndRelease(bf.newIntArrayBlock(new int[] {}, 0, new int[] { 0 }, new BitSet(), randomOrdering())); + assertZeroPositionsAndRelease(bf.newIntBlockBuilder(0).build()); assertZeroPositionsAndRelease(bf.newIntArrayVector(new int[] {}, 0)); - assertZeroPositionsAndRelease(IntVector.newVectorBuilder(0, bf).build()); - assertZeroPositionsAndRelease(bf.newLongArrayBlock(new long[] {}, 0, new int[] {}, new BitSet(), randomOrdering())); - assertZeroPositionsAndRelease(LongBlock.newBlockBuilder(0, bf).build()); + assertZeroPositionsAndRelease(bf.newIntVectorBuilder(0).build()); + assertZeroPositionsAndRelease(bf.newLongArrayBlock(new long[] {}, 0, new int[] { 0 }, new BitSet(), randomOrdering())); + assertZeroPositionsAndRelease(bf.newLongBlockBuilder(0).build()); assertZeroPositionsAndRelease(bf.newLongArrayVector(new long[] {}, 0)); - assertZeroPositionsAndRelease(LongVector.newVectorBuilder(0, bf).build()); - assertZeroPositionsAndRelease(bf.newDoubleArrayBlock(new double[] {}, 0, new int[] {}, new BitSet(), randomOrdering())); - assertZeroPositionsAndRelease(DoubleBlock.newBlockBuilder(0, bf).build()); + assertZeroPositionsAndRelease(bf.newLongVectorBuilder(0).build()); + assertZeroPositionsAndRelease(bf.newDoubleArrayBlock(new double[] {}, 0, new int[] { 0 }, new BitSet(), randomOrdering())); + assertZeroPositionsAndRelease(bf.newDoubleBlockBuilder(0).build()); assertZeroPositionsAndRelease(bf.newDoubleArrayVector(new double[] {}, 0)); - assertZeroPositionsAndRelease(DoubleVector.newVectorBuilder(0, bf).build()); + assertZeroPositionsAndRelease(bf.newDoubleVectorBuilder(0).build()); assertZeroPositionsAndRelease( - bf.newBytesRefArrayBlock(new BytesRefArray(0, bf.bigArrays()), 0, new int[] {}, new BitSet(), randomOrdering()) + bf.newBytesRefArrayBlock(new BytesRefArray(0, bf.bigArrays()), 0, new int[] { 0 }, new BitSet(), randomOrdering()) ); - assertZeroPositionsAndRelease(BytesRefBlock.newBlockBuilder(0, bf).build()); + assertZeroPositionsAndRelease(bf.newBytesRefBlockBuilder(0).build()); assertZeroPositionsAndRelease(bf.newBytesRefArrayVector(new BytesRefArray(0, bf.bigArrays()), 0)); - assertZeroPositionsAndRelease(BytesRefVector.newVectorBuilder(0, bf).build()); - assertZeroPositionsAndRelease(bf.newBooleanArrayBlock(new boolean[] {}, 0, new int[] {}, new BitSet(), randomOrdering())); - assertZeroPositionsAndRelease(BooleanBlock.newBlockBuilder(0, bf).build()); + assertZeroPositionsAndRelease(bf.newBytesRefVectorBuilder(0).build()); + assertZeroPositionsAndRelease(bf.newBooleanArrayBlock(new boolean[] {}, 0, new int[] { 0 }, new BitSet(), randomOrdering())); + assertZeroPositionsAndRelease(bf.newBooleanBlockBuilder(0).build()); assertZeroPositionsAndRelease(bf.newBooleanArrayVector(new boolean[] {}, 0)); - assertZeroPositionsAndRelease(BooleanVector.newVectorBuilder(0, bf).build()); + assertZeroPositionsAndRelease(bf.newBooleanVectorBuilder(0).build()); } public void testSmallSingleValueDenseGrowthInt() { for (int initialSize : List.of(0, 1, 2, 3, 4, 5)) { - try (var blockBuilder = IntBlock.newBlockBuilder(initialSize)) { + try (var blockBuilder = blockFactory.newIntBlockBuilder(initialSize)) { IntStream.range(0, 10).forEach(blockBuilder::appendInt); - assertSingleValueDenseBlock(blockBuilder.build()); + IntBlock block = blockBuilder.build(); + assertSingleValueDenseBlock(block); + block.close(); } } } public void testSmallSingleValueDenseGrowthLong() { for (int initialSize : List.of(0, 1, 2, 3, 4, 5)) { - try (var blockBuilder = LongBlock.newBlockBuilder(initialSize)) { + try (var blockBuilder = blockFactory.newLongBlockBuilder(initialSize)) { IntStream.range(0, 10).forEach(blockBuilder::appendLong); - assertSingleValueDenseBlock(blockBuilder.build()); + LongBlock block = blockBuilder.build(); + assertSingleValueDenseBlock(block); + block.close(); } } } public void testSmallSingleValueDenseGrowthDouble() { for (int initialSize : List.of(0, 1, 2, 3, 4, 5)) { - try (var blockBuilder = DoubleBlock.newBlockBuilder(initialSize)) { + try (var blockBuilder = blockFactory.newDoubleBlockBuilder(initialSize)) { IntStream.range(0, 10).forEach(blockBuilder::appendDouble); - assertSingleValueDenseBlock(blockBuilder.build()); + DoubleBlock block = blockBuilder.build(); + assertSingleValueDenseBlock(block); + block.close(); } } } @@ -108,18 +126,22 @@ public void testSmallSingleValueDenseGrowthDouble() { public void testSmallSingleValueDenseGrowthBytesRef() { final BytesRef NULL_VALUE = new BytesRef(); for (int initialSize : List.of(0, 1, 2, 3, 4, 5)) { - try (var blockBuilder = BytesRefBlock.newBlockBuilder(initialSize)) { + try (var blockBuilder = blockFactory.newBytesRefBlockBuilder(initialSize)) { IntStream.range(0, 10).mapToObj(i -> NULL_VALUE).forEach(blockBuilder::appendBytesRef); - assertSingleValueDenseBlock(blockBuilder.build()); + BytesRefBlock block = blockBuilder.build(); + assertSingleValueDenseBlock(block); + block.close(); } } } public void testSmallSingleValueDenseGrowthBoolean() { for (int initialSize : List.of(0, 1, 2, 3, 4, 5)) { - try (var blockBuilder = BooleanBlock.newBlockBuilder(initialSize)) { + try (var blockBuilder = blockFactory.newBooleanBlockBuilder(initialSize)) { IntStream.range(0, 10).forEach(i -> blockBuilder.appendBoolean(i % 3 == 0)); - assertSingleValueDenseBlock(blockBuilder.build()); + BooleanBlock block = blockBuilder.build(); + assertSingleValueDenseBlock(block); + block.close(); } } } @@ -156,7 +178,7 @@ public void testIntBlock() { IntBlock block; if (randomBoolean()) { final int builderEstimateSize = randomBoolean() ? randomIntBetween(1, positionCount) : positionCount; - try (IntBlock.Builder blockBuilder = IntBlock.newBlockBuilder(builderEstimateSize, blockFactory)) { + try (IntBlock.Builder blockBuilder = blockFactory.newIntBlockBuilder(builderEstimateSize)) { IntStream.range(0, positionCount).forEach(blockBuilder::appendInt); block = blockBuilder.build(); } @@ -171,7 +193,7 @@ public void testIntBlock() { assertThat(pos, is(block.getInt(pos))); assertSingleValueDenseBlock(block); - try (IntBlock.Builder blockBuilder = IntBlock.newBlockBuilder(1, blockFactory)) { + try (IntBlock.Builder blockBuilder = blockFactory.newIntBlockBuilder(1)) { IntBlock copy = blockBuilder.copyFrom(block, 0, block.getPositionCount()).build(); assertThat(copy, equalTo(block)); releaseAndAssertBreaker(block, copy); @@ -180,20 +202,19 @@ public void testIntBlock() { if (positionCount > 1) { assertNullValues( positionCount, - size -> IntBlock.newBlockBuilder(size, blockFactory), - (bb, value) -> bb.appendInt(value), + blockFactory::newIntBlockBuilder, + IntBlock.Builder::appendInt, position -> position, IntBlock.Builder::build, (randomNonNullPosition, b) -> { - assertThat((int) randomNonNullPosition, is(b.getInt(randomNonNullPosition.intValue()))); + assertThat(randomNonNullPosition, is(b.getInt(randomNonNullPosition.intValue()))); } ); } try ( - IntVector.Builder vectorBuilder = IntVector.newVectorBuilder( - randomBoolean() ? randomIntBetween(1, positionCount) : positionCount, - blockFactory + IntVector.Builder vectorBuilder = blockFactory.newIntVectorBuilder( + randomBoolean() ? randomIntBetween(1, positionCount) : positionCount ) ) { IntStream.range(0, positionCount).forEach(vectorBuilder::appendInt); @@ -209,12 +230,7 @@ public void testConstantIntBlock() { assertThat(breaker.getUsed(), is(0L)); int positionCount = randomIntBetween(1, 16 * 1024); int value = randomInt(); - IntBlock block; - if (randomBoolean()) { - block = IntBlock.newConstantBlockWith(value, positionCount, blockFactory); - } else { - block = blockFactory.newConstantIntBlockWith(value, positionCount); - } + IntBlock block = blockFactory.newConstantIntBlockWith(value, positionCount); assertThat(positionCount, is(block.getPositionCount())); assertThat(value, is(block.getInt(0))); assertThat(value, is(block.getInt(positionCount - 1))); @@ -255,8 +271,8 @@ public void testLongBlock() { if (positionCount > 1) { assertNullValues( positionCount, - size -> LongBlock.newBlockBuilder(size, blockFactory), - (bb, value) -> bb.appendLong(value), + blockFactory::newLongBlockBuilder, + LongBlock.Builder::appendLong, position -> (long) position, LongBlock.Builder::build, (randomNonNullPosition, b) -> { @@ -280,12 +296,7 @@ public void testConstantLongBlock() { assertThat(breaker.getUsed(), is(0L)); int positionCount = randomIntBetween(1, 16 * 1024); long value = randomLong(); - LongBlock block; - if (randomBoolean()) { - block = LongBlock.newConstantBlockWith(value, positionCount, blockFactory); - } else { - block = blockFactory.newConstantLongBlockWith(value, positionCount); - } + LongBlock block = blockFactory.newConstantLongBlockWith(value, positionCount); assertThat(positionCount, is(block.getPositionCount())); assertThat(value, is(block.getLong(0))); assertThat(value, is(block.getLong(positionCount - 1))); @@ -318,7 +329,7 @@ public void testDoubleBlock() { assertThat((double) pos, is(block.getDouble(pos))); assertSingleValueDenseBlock(block); - try (DoubleBlock.Builder blockBuilder = DoubleBlock.newBlockBuilder(1)) { + try (DoubleBlock.Builder blockBuilder = blockFactory.newDoubleBlockBuilder(1)) { DoubleBlock copy = blockBuilder.copyFrom(block, 0, block.getPositionCount()).build(); assertThat(copy, equalTo(block)); releaseAndAssertBreaker(block, copy); @@ -327,8 +338,8 @@ public void testDoubleBlock() { if (positionCount > 1) { assertNullValues( positionCount, - size -> DoubleBlock.newBlockBuilder(size, blockFactory), - (bb, value) -> bb.appendDouble(value), + blockFactory::newDoubleBlockBuilder, + DoubleBlock.Builder::appendDouble, position -> (double) position, DoubleBlock.Builder::build, (randomNonNullPosition, b) -> { @@ -354,12 +365,7 @@ public void testConstantDoubleBlock() { for (int i = 0; i < 1000; i++) { int positionCount = randomIntBetween(1, 16 * 1024); double value = randomDouble(); - DoubleBlock block; - if (randomBoolean()) { - block = DoubleBlock.newConstantBlockWith(value, positionCount, blockFactory); - } else { - block = blockFactory.newConstantDoubleBlockWith(value, positionCount); - } + DoubleBlock block = blockFactory.newConstantDoubleBlockWith(value, positionCount); assertThat(positionCount, is(block.getPositionCount())); assertThat(value, is(block.getDouble(0))); assertThat(value, is(block.getDouble(positionCount - 1))); @@ -369,12 +375,12 @@ public void testConstantDoubleBlock() { } } - public void testBytesRefBlock() { + private void testBytesRefBlock(Supplier byteArraySupplier, boolean chomp, org.mockito.ThrowingConsumer assertions) { int positionCount = randomIntBetween(1, 16 * 1024); BytesRef[] values = new BytesRef[positionCount]; for (int i = 0; i < positionCount; i++) { - BytesRef bytesRef = new BytesRef(randomByteArrayOfLength(between(1, 20))); - if (bytesRef.length > 0 && randomBoolean()) { + BytesRef bytesRef = byteArraySupplier.get(); + if (chomp && bytesRef.length > 0 && randomBoolean()) { bytesRef.offset = randomIntBetween(0, bytesRef.length - 1); bytesRef.length = randomIntBetween(0, bytesRef.length - bytesRef.offset); } @@ -400,10 +406,11 @@ public void testBytesRefBlock() { int pos = randomIntBetween(0, positionCount - 1); bytes = block.getBytesRef(pos, bytes); assertThat(bytes, equalTo(values[pos])); + assertions.accept(bytes); } assertSingleValueDenseBlock(block); - try (BytesRefBlock.Builder blockBuilder = BytesRefBlock.newBlockBuilder(1)) { + try (BytesRefBlock.Builder blockBuilder = blockFactory.newBytesRefBlockBuilder(1)) { BytesRefBlock copy = blockBuilder.copyFrom(block, 0, block.getPositionCount()).build(); assertThat(copy, equalTo(block)); releaseAndAssertBreaker(block, copy); @@ -412,8 +419,8 @@ public void testBytesRefBlock() { if (positionCount > 1) { assertNullValues( positionCount, - size -> BytesRefBlock.newBlockBuilder(size, blockFactory), - (bb, value) -> bb.appendBytesRef(value), + blockFactory::newBytesRefBlockBuilder, + BytesRefBlock.Builder::appendBytesRef, position -> values[position], BytesRefBlock.Builder::build, (randomNonNullPosition, b) -> assertThat( @@ -435,6 +442,18 @@ public void testBytesRefBlock() { } } + public void testBytesRefBlock() { + testBytesRefBlock(() -> new BytesRef(randomByteArrayOfLength(between(1, 20))), true, b -> {}); + } + + public void testBytesRefBlockOnGeoPoints() { + testBytesRefBlock(() -> GEO.pointAsWKB(GeometryTestUtils.randomPoint()), false, GEO::wkbAsString); + } + + public void testBytesRefBlockOnCartesianPoints() { + testBytesRefBlock(() -> CARTESIAN.pointAsWKB(ShapeTestUtils.randomPoint()), false, CARTESIAN::wkbAsString); + } + public void testBytesRefBlockBuilderWithNulls() { int positionCount = randomIntBetween(0, 16 * 1024); final int builderEstimateSize = randomBoolean() ? randomIntBetween(1, positionCount) : positionCount; @@ -481,12 +500,7 @@ public void testConstantBytesRefBlock() { for (int i = 0; i < 1000; i++) { int positionCount = randomIntBetween(1, 16 * 1024); BytesRef value = new BytesRef(randomByteArrayOfLength(between(1, 20))); - BytesRefBlock block; - if (randomBoolean()) { - block = BytesRefBlock.newConstantBlockWith(value, positionCount, blockFactory); - } else { - block = blockFactory.newConstantBytesRefBlockWith(value, positionCount); - } + BytesRefBlock block = blockFactory.newConstantBytesRefBlockWith(value, positionCount); assertThat(block.getPositionCount(), is(positionCount)); BytesRef bytes = new BytesRef(); @@ -524,7 +538,7 @@ public void testBooleanBlock() { assertThat(block.getBoolean(positionCount - 1), is((positionCount - 1) % 10 == 0)); assertSingleValueDenseBlock(block); - try (BooleanBlock.Builder blockBuilder = BooleanBlock.newBlockBuilder(1)) { + try (BooleanBlock.Builder blockBuilder = blockFactory.newBooleanBlockBuilder(1)) { BooleanBlock copy = blockBuilder.copyFrom(block, 0, block.getPositionCount()).build(); assertThat(copy, equalTo(block)); releaseAndAssertBreaker(block, copy); @@ -533,7 +547,7 @@ public void testBooleanBlock() { if (positionCount > 1) { assertNullValues( positionCount, - size -> BooleanBlock.newBlockBuilder(size, blockFactory), + size -> blockFactory.newBooleanBlockBuilder(size), (bb, value) -> bb.appendBoolean(value), position -> position % 10 == 0, BooleanBlock.Builder::build, @@ -557,12 +571,7 @@ public void testConstantBooleanBlock() { for (int i = 0; i < 1000; i++) { int positionCount = randomIntBetween(1, 16 * 1024); boolean value = randomBoolean(); - BooleanBlock block; - if (randomBoolean()) { - block = BooleanBlock.newConstantBlockWith(value, positionCount, blockFactory); - } else { - block = blockFactory.newConstantBooleanBlockWith(value, positionCount); - } + BooleanBlock block = blockFactory.newConstantBooleanBlockWith(value, positionCount); assertThat(positionCount, is(block.getPositionCount())); assertThat(block.getBoolean(0), is(value)); assertThat(block.getBoolean(positionCount - 1), is(value)); @@ -576,7 +585,7 @@ public void testConstantNullBlock() { for (int i = 0; i < 100; i++) { assertThat(breaker.getUsed(), is(0L)); int positionCount = randomIntBetween(1, 16 * 1024); - Block block = Block.constantNullBlock(positionCount, blockFactory); + Block block = blockFactory.newConstantNullBlock(positionCount); assertTrue(block.areAllValuesNull()); assertThat(block, instanceOf(BooleanBlock.class)); assertThat(block, instanceOf(IntBlock.class)); @@ -599,7 +608,7 @@ public void testConstantNullBlock() { public void testSingleValueSparseInt() { int positionCount = randomIntBetween(2, 16 * 1024); final int builderEstimateSize = randomBoolean() ? randomIntBetween(1, positionCount) : positionCount; - try (var blockBuilder = IntBlock.newBlockBuilder(builderEstimateSize)) { + try (var blockBuilder = blockFactory.newIntBlockBuilder(builderEstimateSize)) { int actualValueCount = 0; int[] values = new int[positionCount]; @@ -627,13 +636,14 @@ public void testSingleValueSparseInt() { } assertThat(block.nullValuesCount(), is(nullCount)); assertThat(block.asVector(), nullCount > 0 ? is(nullValue()) : is(notNullValue())); + block.close(); } } public void testSingleValueSparseLong() { int positionCount = randomIntBetween(2, 16 * 1024); final int builderEstimateSize = randomBoolean() ? randomIntBetween(1, positionCount) : positionCount; - try (var blockBuilder = LongBlock.newBlockBuilder(builderEstimateSize)) { + try (var blockBuilder = blockFactory.newLongBlockBuilder(builderEstimateSize)) { int actualValueCount = 0; long[] values = new long[positionCount]; @@ -660,13 +670,14 @@ public void testSingleValueSparseLong() { } assertThat(block.nullValuesCount(), is(nullCount)); assertThat(block.asVector(), nullCount > 0 ? is(nullValue()) : is(notNullValue())); + block.close(); } } public void testSingleValueSparseDouble() { int positionCount = randomIntBetween(2, 16 * 1024); final int builderEstimateSize = randomBoolean() ? randomIntBetween(1, positionCount) : positionCount; - try (var blockBuilder = DoubleBlock.newBlockBuilder(builderEstimateSize)) { + try (var blockBuilder = blockFactory.newDoubleBlockBuilder(builderEstimateSize)) { int actualValueCount = 0; double[] values = new double[positionCount]; @@ -693,13 +704,14 @@ public void testSingleValueSparseDouble() { } assertThat(block.nullValuesCount(), is(nullCount)); assertThat(block.asVector(), nullCount > 0 ? is(nullValue()) : is(notNullValue())); + block.close(); } } public void testSingleValueSparseBoolean() { int positionCount = randomIntBetween(2, 16 * 1024); final int builderEstimateSize = randomBoolean() ? randomIntBetween(1, positionCount) : positionCount; - try (var blockBuilder = BooleanBlock.newBlockBuilder(builderEstimateSize)) { + try (var blockBuilder = blockFactory.newBooleanBlockBuilder(builderEstimateSize)) { boolean[] values = new boolean[positionCount]; int actualValueCount = 0; @@ -726,6 +738,7 @@ public void testSingleValueSparseBoolean() { } assertThat(block.nullValuesCount(), is(nullCount)); assertThat(block.asVector(), nullCount > 0 ? is(nullValue()) : is(notNullValue())); + block.close(); } } @@ -733,8 +746,8 @@ public void testToStringSmall() { final int estimatedSize = randomIntBetween(1024, 4096); try ( - var boolBlock = BooleanBlock.newBlockBuilder(estimatedSize).appendBoolean(true).appendBoolean(false).build(); - var boolVector = BooleanVector.newVectorBuilder(estimatedSize).appendBoolean(true).appendBoolean(false).build() + var boolBlock = blockFactory.newBooleanBlockBuilder(estimatedSize).appendBoolean(true).appendBoolean(false).build(); + var boolVector = blockFactory.newBooleanVectorBuilder(estimatedSize).appendBoolean(true).appendBoolean(false).build() ) { for (Object obj : List.of(boolVector, boolBlock, boolBlock.asVector())) { String s = obj.toString(); @@ -744,8 +757,8 @@ public void testToStringSmall() { } try ( - var intBlock = IntBlock.newBlockBuilder(estimatedSize).appendInt(1).appendInt(2).build(); - var intVector = IntVector.newVectorBuilder(estimatedSize).appendInt(1).appendInt(2).build() + var intBlock = blockFactory.newIntBlockBuilder(estimatedSize).appendInt(1).appendInt(2).build(); + var intVector = blockFactory.newIntVectorBuilder(estimatedSize).appendInt(1).appendInt(2).build() ) { for (Object obj : List.of(intVector, intBlock, intBlock.asVector())) { String s = obj.toString(); @@ -753,25 +766,38 @@ public void testToStringSmall() { assertThat(s, containsString("positions=2")); } for (IntBlock block : List.of(intBlock, intVector.asBlock())) { - assertThat(block.filter(0).toString(), containsString("IntVectorBlock[vector=ConstantIntVector[positions=1, value=1]]")); - assertThat(block.filter(1).toString(), containsString("IntVectorBlock[vector=ConstantIntVector[positions=1, value=2]]")); - assertThat( - block.filter(0, 1).toString(), - containsString("IntVectorBlock[vector=IntArrayVector[positions=2, values=[1, 2]]]") - ); - assertThat(block.filter().toString(), containsString("IntVectorBlock[vector=IntArrayVector[positions=0, values=[]]]")); + try (var filter = block.filter(0)) { + assertThat(filter.toString(), containsString("IntVectorBlock[vector=ConstantIntVector[positions=1, value=1]]")); + } + try (var filter = block.filter(1)) { + assertThat(filter.toString(), containsString("IntVectorBlock[vector=ConstantIntVector[positions=1, value=2]]")); + } + try (var filter = block.filter(0, 1)) { + assertThat(filter.toString(), containsString("IntVectorBlock[vector=IntArrayVector[positions=2, values=[1, 2]]]")); + } + try (var filter = block.filter()) { + assertThat(filter.toString(), containsString("IntVectorBlock[vector=IntArrayVector[positions=0, values=[]]]")); + } } for (IntVector vector : List.of(intVector, intBlock.asVector())) { - assertThat(vector.filter(0).toString(), containsString("ConstantIntVector[positions=1, value=1]")); - assertThat(vector.filter(1).toString(), containsString("ConstantIntVector[positions=1, value=2]")); - assertThat(vector.filter(0, 1).toString(), containsString("IntArrayVector[positions=2, values=[1, 2]]")); - assertThat(vector.filter().toString(), containsString("IntArrayVector[positions=0, values=[]]")); + try (var filter = vector.filter(0)) { + assertThat(filter.toString(), containsString("ConstantIntVector[positions=1, value=1]")); + } + try (IntVector filter = vector.filter(1)) { + assertThat(filter.toString(), containsString("ConstantIntVector[positions=1, value=2]")); + } + try (IntVector filter = vector.filter(0, 1)) { + assertThat(filter.toString(), containsString("IntArrayVector[positions=2, values=[1, 2]]")); + } + try (IntVector filter = vector.filter()) { + assertThat(filter.toString(), containsString("IntArrayVector[positions=0, values=[]]")); + } } } try ( - var longBlock = LongBlock.newBlockBuilder(estimatedSize).appendLong(10L).appendLong(20L).build(); - var longVector = LongVector.newVectorBuilder(estimatedSize).appendLong(10L).appendLong(20L).build() + var longBlock = blockFactory.newLongBlockBuilder(estimatedSize).appendLong(10L).appendLong(20L).build(); + var longVector = blockFactory.newLongVectorBuilder(estimatedSize).appendLong(10L).appendLong(20L).build() ) { for (Object obj : List.of(longVector, longBlock, longBlock.asVector())) { String s = obj.toString(); @@ -781,8 +807,8 @@ public void testToStringSmall() { } try ( - var doubleBlock = DoubleBlock.newBlockBuilder(estimatedSize).appendDouble(3.3).appendDouble(4.4).build(); - var doubleVector = DoubleVector.newVectorBuilder(estimatedSize).appendDouble(3.3).appendDouble(4.4).build() + var doubleBlock = blockFactory.newDoubleBlockBuilder(estimatedSize).appendDouble(3.3).appendDouble(4.4).build(); + var doubleVector = blockFactory.newDoubleVectorBuilder(estimatedSize).appendDouble(3.3).appendDouble(4.4).build() ) { for (Object obj : List.of(doubleVector, doubleBlock, doubleBlock.asVector())) { String s = obj.toString(); @@ -793,8 +819,8 @@ public void testToStringSmall() { assert new BytesRef("1a").toString().equals("[31 61]") && new BytesRef("2b").toString().equals("[32 62]"); try ( - var blockBuilder = BytesRefBlock.newBlockBuilder(estimatedSize); - var vectorBuilder = BytesRefVector.newVectorBuilder(estimatedSize) + var blockBuilder = blockFactory.newBytesRefBlockBuilder(estimatedSize); + var vectorBuilder = blockFactory.newBytesRefVectorBuilder(estimatedSize) ) { var bytesRefBlock = blockBuilder.appendBytesRef(new BytesRef("1a")).appendBytesRef(new BytesRef("2b")).build(); var bytesRefVector = vectorBuilder.appendBytesRef(new BytesRef("1a")).appendBytesRef(new BytesRef("2b")).build(); @@ -802,6 +828,7 @@ public void testToStringSmall() { String s = obj.toString(); assertThat(s, containsString("positions=2")); } + Releasables.close(bytesRefBlock, bytesRefVector); } } @@ -846,7 +873,7 @@ public static RandomBlock randomBlock( int maxDupsPerPosition ) { return randomBlock( - BlockFactory.getNonBreakingInstance(), + TestBlockFactory.getNonBreakingInstance(), elementType, positionCount, nullAllowed, @@ -869,6 +896,8 @@ public static RandomBlock randomBlock( ) { List> values = new ArrayList<>(); try (var builder = elementType.newBlockBuilder(positionCount, blockFactory)) { + boolean bytesRefFromPoints = randomBoolean(); + Supplier pointSupplier = randomBoolean() ? GeometryTestUtils::randomPoint : ShapeTestUtils::randomPoint; for (int p = 0; p < positionCount; p++) { int valueCount = between(minValuesPerPosition, maxValuesPerPosition); if (valueCount == 0 || nullAllowed && randomBoolean()) { @@ -900,7 +929,9 @@ public static RandomBlock randomBlock( ((DoubleBlock.Builder) builder).appendDouble(d); } case BYTES_REF -> { - BytesRef b = new BytesRef(randomRealisticUnicodeOfLength(4)); + BytesRef b = bytesRefFromPoints + ? GEO.pointAsWKB(pointSupplier.get()) + : new BytesRef(randomRealisticUnicodeOfLength(4)); valuesAtPosition.add(b); ((BytesRefBlock.Builder) builder).appendBytesRef(b); } @@ -1000,13 +1031,7 @@ void releaseAndAssertBreaker(Vector vector) { static void assertCannotDoubleRelease(Block block) { var ex = expectThrows(IllegalStateException.class, () -> block.close()); - assertThat(ex.getMessage(), containsString("can't release already released block")); - } - - static void assertCannotReleaseIfVectorAlreadyReleased(Block block) { - var ex = expectThrows(IllegalStateException.class, () -> block.close()); - assertThat(ex.getMessage(), containsString("can't release block")); - assertThat(ex.getMessage(), containsString("containing already released vector")); + assertThat(ex.getMessage(), containsString("can't release already released object")); } static void assertCannotReadFromPage(Page page) { @@ -1041,6 +1066,13 @@ public void testRefCountingArrayBlock() { assertThat(breaker.getUsed(), is(0L)); } + public void testRefCountingBigArrayBlock() { + Block block = randomBigArrayBlock(); + assertThat(breaker.getUsed(), greaterThan(0L)); + assertRefCountingBehavior(block); + assertThat(breaker.getUsed(), is(0L)); + } + public void testRefCountingConstantNullBlock() { Block block = blockFactory.newConstantNullBlock(10); assertThat(breaker.getUsed(), greaterThan(0L)); @@ -1057,83 +1089,165 @@ public void testRefCountingDocBlock() { } public void testRefCountingVectorBlock() { - Block block = randomNonDocVector().asBlock(); + Block block = randomConstantVector().asBlock(); assertThat(breaker.getUsed(), greaterThan(0L)); assertRefCountingBehavior(block); assertThat(breaker.getUsed(), is(0L)); } - // Take a block with exactly 1 reference and assert that ref counting works fine. - static void assertRefCountingBehavior(Block b) { - assertTrue(b.hasReferences()); + public void testRefCountingArrayVector() { + Vector vector = randomArrayVector(); + assertThat(breaker.getUsed(), greaterThan(0L)); + assertRefCountingBehavior(vector); + assertThat(breaker.getUsed(), is(0L)); + } + + public void testRefCountingBigArrayVector() { + Vector vector = randomBigArrayVector(); + assertThat(breaker.getUsed(), greaterThan(0L)); + assertRefCountingBehavior(vector); + assertThat(breaker.getUsed(), is(0L)); + } + + public void testRefCountingConstantVector() { + Vector vector = randomConstantVector(); + assertThat(breaker.getUsed(), greaterThan(0L)); + assertRefCountingBehavior(vector); + assertThat(breaker.getUsed(), is(0L)); + } + + public void testRefCountingDocVector() { + int positionCount = randomIntBetween(0, 100); + DocVector vector = new DocVector(intVector(positionCount), intVector(positionCount), intVector(positionCount), true); + assertThat(breaker.getUsed(), greaterThan(0L)); + assertRefCountingBehavior(vector); + assertThat(breaker.getUsed(), is(0L)); + } + + /** + * Take an object with exactly 1 reference and assert that ref counting works fine. + * Assumes that {@link Releasable#close()} and {@link RefCounted#decRef()} are equivalent. + */ + static void assertRefCountingBehavior(T object) { + assertTrue(object.hasReferences()); int numShallowCopies = randomIntBetween(0, 15); for (int i = 0; i < numShallowCopies; i++) { if (randomBoolean()) { - b.incRef(); + object.incRef(); } else { - assertTrue(b.tryIncRef()); + assertTrue(object.tryIncRef()); } } for (int i = 0; i < numShallowCopies; i++) { if (randomBoolean()) { - b.close(); + object.close(); } else { // closing and decRef'ing must be equivalent - assertFalse(b.decRef()); + assertFalse(object.decRef()); } - assertTrue(b.hasReferences()); + assertTrue(object.hasReferences()); } if (randomBoolean()) { - b.close(); + object.close(); } else { - assertTrue(b.decRef()); + assertTrue(object.decRef()); } - assertFalse(b.hasReferences()); - assertFalse(b.tryIncRef()); + assertFalse(object.hasReferences()); + assertFalse(object.tryIncRef()); - expectThrows(IllegalStateException.class, b::close); - expectThrows(IllegalStateException.class, b::incRef); + expectThrows(IllegalStateException.class, object::close); + expectThrows(IllegalStateException.class, object::incRef); } - public void testReleasedVectorInvalidatesBlockState() { - Vector vector = randomNonDocVector(); - Block block = vector.asBlock(); - - int numRefs = randomIntBetween(1, 10); - for (int i = 0; i < numRefs - 1; i++) { - block.incRef(); - } - - vector.close(); - assertEquals(false, block.tryIncRef()); - expectThrows(IllegalStateException.class, block::close); - expectThrows(IllegalStateException.class, block::incRef); + private IntVector intVector(int positionCount) { + return blockFactory.newIntArrayVector(IntStream.range(0, positionCount).toArray(), positionCount); } - public void testReleasedDocVectorInvalidatesBlockState() { + private Vector randomArrayVector() { int positionCount = randomIntBetween(0, 100); - DocVector vector = new DocVector(intVector(positionCount), intVector(positionCount), intVector(positionCount), true); - DocBlock block = vector.asBlock(); + int vectorType = randomIntBetween(0, 4); - int numRefs = randomIntBetween(1, 10); - for (int i = 0; i < numRefs - 1; i++) { - block.incRef(); - } + return switch (vectorType) { + case 0 -> { + boolean[] values = new boolean[positionCount]; + Arrays.fill(values, randomBoolean()); + yield blockFactory.newBooleanArrayVector(values, positionCount); + } + case 1 -> { + BytesRefArray values = new BytesRefArray(positionCount, BigArrays.NON_RECYCLING_INSTANCE); + for (int i = 0; i < positionCount; i++) { + values.append(new BytesRef(randomByteArrayOfLength(between(1, 20)))); + } + + yield blockFactory.newBytesRefArrayVector(values, positionCount); + } + case 2 -> { + double[] values = new double[positionCount]; + Arrays.fill(values, 1.0); + + yield blockFactory.newDoubleArrayVector(values, positionCount); + } + case 3 -> { + int[] values = new int[positionCount]; + Arrays.fill(values, 1); + + yield blockFactory.newIntArrayVector(values, positionCount); + } + default -> { + long[] values = new long[positionCount]; + Arrays.fill(values, 1L); - vector.close(); - assertEquals(false, block.tryIncRef()); - expectThrows(IllegalStateException.class, block::close); - expectThrows(IllegalStateException.class, block::incRef); + yield blockFactory.newLongArrayVector(values, positionCount); + } + }; } - private IntVector intVector(int positionCount) { - return blockFactory.newIntArrayVector(IntStream.range(0, positionCount).toArray(), positionCount); + private Vector randomBigArrayVector() { + int positionCount = randomIntBetween(0, 10000); + int arrayType = randomIntBetween(0, 3); + + return switch (arrayType) { + case 0 -> { + BitArray values = new BitArray(positionCount, blockFactory.bigArrays()); + for (int i = 0; i < positionCount; i++) { + if (randomBoolean()) { + values.set(positionCount); + } + } + + yield new BooleanBigArrayVector(values, positionCount, blockFactory); + } + case 1 -> { + DoubleArray values = blockFactory.bigArrays().newDoubleArray(positionCount, false); + for (int i = 0; i < positionCount; i++) { + values.set(i, randomDouble()); + } + + yield new DoubleBigArrayVector(values, positionCount, blockFactory); + } + case 2 -> { + IntArray values = blockFactory.bigArrays().newIntArray(positionCount, false); + for (int i = 0; i < positionCount; i++) { + values.set(i, randomInt()); + } + + yield new IntBigArrayVector(values, positionCount, blockFactory); + } + default -> { + LongArray values = blockFactory.bigArrays().newLongArray(positionCount, false); + for (int i = 0; i < positionCount; i++) { + values.set(i, randomLong()); + } + + yield new LongBigArrayVector(values, positionCount, blockFactory); + } + }; } - private Vector randomNonDocVector() { + private Vector randomConstantVector() { int positionCount = randomIntBetween(0, 100); int vectorType = randomIntBetween(0, 4); @@ -1149,13 +1263,14 @@ private Vector randomNonDocVector() { private Block randomArrayBlock() { int positionCount = randomIntBetween(0, 100); int arrayType = randomIntBetween(0, 4); + int[] firstValueIndexes = IntStream.range(0, positionCount + 1).toArray(); return switch (arrayType) { case 0 -> { boolean[] values = new boolean[positionCount]; - Arrays.fill(values, true); + Arrays.fill(values, randomBoolean()); - yield blockFactory.newBooleanArrayBlock(values, positionCount, new int[] {}, new BitSet(), randomOrdering()); + yield blockFactory.newBooleanArrayBlock(values, positionCount, firstValueIndexes, new BitSet(), randomOrdering()); } case 1 -> { BytesRefArray values = new BytesRefArray(positionCount, BigArrays.NON_RECYCLING_INSTANCE); @@ -1163,25 +1278,67 @@ private Block randomArrayBlock() { values.append(new BytesRef(randomByteArrayOfLength(between(1, 20)))); } - yield blockFactory.newBytesRefArrayBlock(values, positionCount, new int[] {}, new BitSet(), randomOrdering()); + yield blockFactory.newBytesRefArrayBlock(values, positionCount, firstValueIndexes, new BitSet(), randomOrdering()); } case 2 -> { double[] values = new double[positionCount]; Arrays.fill(values, 1.0); - yield blockFactory.newDoubleArrayBlock(values, positionCount, new int[] {}, new BitSet(), randomOrdering()); + yield blockFactory.newDoubleArrayBlock(values, positionCount, firstValueIndexes, new BitSet(), randomOrdering()); } case 3 -> { int[] values = new int[positionCount]; Arrays.fill(values, 1); - yield blockFactory.newIntArrayBlock(values, positionCount, new int[] {}, new BitSet(), randomOrdering()); + yield blockFactory.newIntArrayBlock(values, positionCount, firstValueIndexes, new BitSet(), randomOrdering()); } default -> { long[] values = new long[positionCount]; Arrays.fill(values, 1L); - yield blockFactory.newLongArrayBlock(values, positionCount, new int[] {}, new BitSet(), randomOrdering()); + yield blockFactory.newLongArrayBlock(values, positionCount, firstValueIndexes, new BitSet(), randomOrdering()); + } + }; + } + + private Block randomBigArrayBlock() { + int positionCount = randomIntBetween(0, 10000); + int arrayType = randomIntBetween(0, 3); + + return switch (arrayType) { + case 0 -> { + BitArray values = new BitArray(positionCount, blockFactory.bigArrays()); + for (int i = 0; i < positionCount; i++) { + if (randomBoolean()) { + values.set(positionCount); + } + } + + yield new BooleanBigArrayBlock(values, positionCount, null, new BitSet(), Block.MvOrdering.UNORDERED, blockFactory); + } + case 1 -> { + DoubleArray values = blockFactory.bigArrays().newDoubleArray(positionCount, false); + for (int i = 0; i < positionCount; i++) { + values.set(i, randomDouble()); + } + + yield new DoubleBigArrayBlock(values, positionCount, null, new BitSet(), Block.MvOrdering.UNORDERED, blockFactory); + } + case 2 -> { + IntArray values = blockFactory.bigArrays().newIntArray(positionCount, false); + for (int i = 0; i < positionCount; i++) { + values.set(i, randomInt()); + } + + yield new IntBigArrayBlock(values, positionCount, null, new BitSet(), Block.MvOrdering.UNORDERED, blockFactory); + } + default -> { + LongArray values = blockFactory.bigArrays().newLongArray(positionCount, false); + for (int i = 0; i < positionCount; i++) { + values.set(i, randomLong()); + } + + yield new LongBigArrayBlock(values, positionCount, null, new BitSet(), Block.MvOrdering.UNORDERED, blockFactory); } }; } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicPageTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicPageTests.java index 25cd9ed5b9fe5..f76ff0708120b 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicPageTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicPageTests.java @@ -41,41 +41,72 @@ public void testExceptions() { } public void testEqualityAndHashCodeSmallInput() { + Page in = new Page(0); EqualsHashCodeTestUtils.checkEqualsAndHashCode( - new Page(0, new Block[] {}), - page -> new Page(0, new Block[] {}), - page -> new Page(1, IntBlock.newConstantBlockWith(1, 1)) + in, + page -> new Page(0), + page -> new Page(1, blockFactory.newConstantIntBlockWith(1, 1)), + Page::releaseBlocks ); + in.releaseBlocks(); + + in = new Page(blockFactory.newIntArrayVector(new int[] {}, 0).asBlock()); EqualsHashCodeTestUtils.checkEqualsAndHashCode( - new Page(new IntArrayVector(new int[] {}, 0).asBlock()), - page -> new Page(new IntArrayVector(new int[] {}, 0).asBlock()), - page -> new Page(new IntArrayVector(new int[] { 1 }, 1).asBlock()) + in, + page -> new Page(blockFactory.newIntArrayVector(new int[] {}, 0).asBlock()), + page -> new Page(blockFactory.newIntArrayVector(new int[] { 1 }, 1).asBlock()), + Page::releaseBlocks ); + in.releaseBlocks(); + + in = new Page(blockFactory.newIntArrayVector(new int[] { 1 }, 0).asBlock()); EqualsHashCodeTestUtils.checkEqualsAndHashCode( - new Page(new IntArrayVector(new int[] { 1 }, 0).asBlock()), - page -> new Page(new IntArrayVector(new int[] { 1 }, 0).asBlock()), - page -> new Page(new IntArrayVector(new int[] { 1 }, 1).asBlock()) + in, + page -> new Page(blockFactory.newIntArrayVector(new int[] { 1 }, 0).asBlock()), + page -> new Page(blockFactory.newIntArrayVector(new int[] { 1 }, 1).asBlock()), + Page::releaseBlocks ); + in.releaseBlocks(); + + in = new Page(blockFactory.newIntArrayVector(new int[] { 1, 1, 1 }, 3).asBlock()); EqualsHashCodeTestUtils.checkEqualsAndHashCode( - new Page(new IntArrayVector(new int[] { 1, 1, 1 }, 3).asBlock()), - page -> new Page(IntBlock.newConstantBlockWith(1, 3)), - page -> new Page(IntBlock.newConstantBlockWith(1, 2)) + in, + page -> new Page(blockFactory.newConstantIntBlockWith(1, 3)), + page -> new Page(blockFactory.newConstantIntBlockWith(1, 2)), + Page::releaseBlocks ); + in.releaseBlocks(); + + in = new Page(blockFactory.newIntArrayVector(IntStream.range(0, 10).toArray(), 10).asBlock()); EqualsHashCodeTestUtils.checkEqualsAndHashCode( - new Page(new IntArrayVector(IntStream.range(0, 10).toArray(), 10).asBlock()), - page -> new Page(new IntArrayVector(IntStream.range(0, 10).toArray(), 10).asBlock()), - page -> new Page(new IntArrayVector(IntStream.range(0, 10).toArray(), 9).asBlock()) + in, + page -> new Page(blockFactory.newIntArrayVector(IntStream.range(0, 10).toArray(), 10).asBlock()), + page -> new Page(blockFactory.newIntArrayVector(IntStream.range(0, 10).toArray(), 9).asBlock()), + Page::releaseBlocks ); + in.releaseBlocks(); + + in = new Page(blockFactory.newIntArrayVector(IntStream.range(0, 100).toArray(), 100).asBlock()); EqualsHashCodeTestUtils.checkEqualsAndHashCode( - new Page(new IntArrayVector(IntStream.range(0, 100).toArray(), 100).asBlock()), - page -> new Page(new IntArrayVector(IntStream.range(0, 100).toArray(), 100).asBlock()), - page -> new Page(new LongArrayVector(LongStream.range(0, 100).toArray(), 100).asBlock()) + in, + page -> new Page(blockFactory.newIntArrayVector(IntStream.range(0, 100).toArray(), 100).asBlock()), + page -> new Page(blockFactory.newLongArrayVector(LongStream.range(0, 100).toArray(), 100).asBlock()), + Page::releaseBlocks ); - EqualsHashCodeTestUtils.checkEqualsAndHashCode( - new Page(new IntArrayVector(new int[] { 1 }, 1).asBlock()), - page -> new Page(1, page.getBlock(0)), - page -> new Page(new IntArrayVector(new int[] { 1 }, 1).asBlock(), new IntArrayVector(new int[] { 1 }, 1).asBlock()) + in.releaseBlocks(); + + in = new Page(blockFactory.newIntArrayVector(new int[] { 1 }, 1).asBlock()); + EqualsHashCodeTestUtils.checkEqualsAndHashCode(in, page -> { + page.getBlock(0).incRef(); + return new Page(1, page.getBlock(0)); + }, + page -> new Page( + blockFactory.newIntArrayVector(new int[] { 1 }, 1).asBlock(), + blockFactory.newIntArrayVector(new int[] { 1 }, 1).asBlock() + ), + Page::releaseBlocks ); + in.releaseBlocks(); } public void testEqualityAndHashCode() throws IOException { @@ -93,7 +124,10 @@ public void testEqualityAndHashCode() throws IOException { int positions = randomInt(page.getPositionCount() - 1); for (int blockIndex = 0; blockIndex < blocks.length; blockIndex++) { Block block = page.getBlock(blockIndex); - blocks[blockIndex] = block.elementType().newBlockBuilder(positions).copyFrom(block, 0, page.getPositionCount() - 1).build(); + blocks[blockIndex] = block.elementType() + .newBlockBuilder(positions, TestBlockFactory.getNonBreakingInstance()) + .copyFrom(block, 0, page.getPositionCount() - 1) + .build(); } return new Page(blocks); }; @@ -103,13 +137,13 @@ public void testEqualityAndHashCode() throws IOException { Block[] blocks = new Block[blockCount]; for (int blockIndex = 0; blockIndex < blockCount; blockIndex++) { blocks[blockIndex] = switch (randomInt(6)) { - case 0 -> new IntArrayVector(randomInts(positions).toArray(), positions).asBlock(); - case 1 -> new LongArrayVector(randomLongs(positions).toArray(), positions).asBlock(); - case 2 -> new DoubleArrayVector(randomDoubles(positions).toArray(), positions).asBlock(); - case 3 -> IntBlock.newConstantBlockWith(randomInt(), positions); - case 4 -> LongBlock.newConstantBlockWith(randomLong(), positions); - case 5 -> DoubleBlock.newConstantBlockWith(randomDouble(), positions); - case 6 -> BytesRefBlock.newConstantBlockWith(new BytesRef(Integer.toHexString(randomInt())), positions); + case 0 -> blockFactory.newIntArrayVector(randomInts(positions).toArray(), positions).asBlock(); + case 1 -> blockFactory.newLongArrayVector(randomLongs(positions).toArray(), positions).asBlock(); + case 2 -> blockFactory.newDoubleArrayVector(randomDoubles(positions).toArray(), positions).asBlock(); + case 3 -> blockFactory.newConstantIntBlockWith(randomInt(), positions); + case 4 -> blockFactory.newConstantLongBlockWith(randomLong(), positions); + case 5 -> blockFactory.newConstantDoubleBlockWith(randomDouble(), positions); + case 6 -> blockFactory.newConstantBytesRefBlockWith(new BytesRef(Integer.toHexString(randomInt())), positions); default -> throw new AssertionError(); }; } @@ -125,36 +159,40 @@ public void testEqualityAndHashCode() throws IOException { public void testBasic() { int positions = randomInt(1024); - Page page = new Page(new IntArrayVector(IntStream.range(0, positions).toArray(), positions).asBlock()); + Page page = new Page(blockFactory.newIntArrayVector(IntStream.range(0, positions).toArray(), positions).asBlock()); assertThat(1, is(page.getBlockCount())); assertThat(positions, is(page.getPositionCount())); IntBlock block = page.getBlock(0); IntStream.range(0, positions).forEach(i -> assertThat(i, is(block.getInt(i)))); + page.releaseBlocks(); } public void testAppend() { - Page page1 = new Page(new IntArrayVector(IntStream.range(0, 10).toArray(), 10).asBlock()); - Page page2 = page1.appendBlock(new LongArrayVector(LongStream.range(0, 10).toArray(), 10).asBlock()); + Page page1 = new Page(blockFactory.newIntArrayVector(IntStream.range(0, 10).toArray(), 10).asBlock()); + Page page2 = page1.appendBlock(blockFactory.newLongArrayVector(LongStream.range(0, 10).toArray(), 10).asBlock()); assertThat(1, is(page1.getBlockCount())); assertThat(2, is(page2.getBlockCount())); IntBlock block1 = page2.getBlock(0); IntStream.range(0, 10).forEach(i -> assertThat(i, is(block1.getInt(i)))); LongBlock block2 = page2.getBlock(1); IntStream.range(0, 10).forEach(i -> assertThat((long) i, is(block2.getLong(i)))); + page2.releaseBlocks(); } public void testPageSerializationSimple() throws IOException { + IntVector toFilter = blockFactory.newIntArrayVector(IntStream.range(0, 20).toArray(), 20); Page origPage = new Page( - new IntArrayVector(IntStream.range(0, 10).toArray(), 10).asBlock(), - new LongArrayVector(LongStream.range(10, 20).toArray(), 10).asBlock(), - new DoubleArrayVector(LongStream.range(30, 40).mapToDouble(i -> i).toArray(), 10).asBlock(), - new BytesRefArrayVector(bytesRefArrayOf("0a", "1b", "2c", "3d", "4e", "5f", "6g", "7h", "8i", "9j"), 10).asBlock(), - IntBlock.newConstantBlockWith(randomInt(), 10), - LongBlock.newConstantBlockWith(randomInt(), 10), - DoubleBlock.newConstantBlockWith(randomInt(), 10), - BytesRefBlock.newConstantBlockWith(new BytesRef(Integer.toHexString(randomInt())), 10), - new IntArrayVector(IntStream.range(0, 20).toArray(), 20).filter(5, 6, 7, 8, 9, 10, 11, 12, 13, 14).asBlock() + blockFactory.newIntArrayVector(IntStream.range(0, 10).toArray(), 10).asBlock(), + blockFactory.newLongArrayVector(LongStream.range(10, 20).toArray(), 10).asBlock(), + blockFactory.newDoubleArrayVector(LongStream.range(30, 40).mapToDouble(i -> i).toArray(), 10).asBlock(), + blockFactory.newBytesRefArrayVector(bytesRefArrayOf("0a", "1b", "2c", "3d", "4e", "5f", "6g", "7h", "8i", "9j"), 10).asBlock(), + blockFactory.newConstantIntBlockWith(randomInt(), 10), + blockFactory.newConstantLongBlockWith(randomLong(), 10), + blockFactory.newConstantDoubleBlockWith(randomDouble(), 10), + blockFactory.newConstantBytesRefBlockWith(new BytesRef(Integer.toHexString(randomInt())), 10), + toFilter.filter(5, 6, 7, 8, 9, 10, 11, 12, 13, 14).asBlock() ); + toFilter.close(); try { Page deserPage = serializeDeserializePage(origPage); try { @@ -177,12 +215,12 @@ public void testPageSerializationSimple() throws IOException { public void testSerializationListPages() throws IOException { final int positions = randomIntBetween(1, 64); List origPages = List.of( - new Page(new IntArrayVector(randomInts(positions).toArray(), positions).asBlock()), + new Page(blockFactory.newIntArrayVector(randomInts(positions).toArray(), positions).asBlock()), new Page( - new LongArrayVector(randomLongs(positions).toArray(), positions).asBlock(), - DoubleBlock.newConstantBlockWith(randomInt(), positions) + blockFactory.newLongArrayVector(randomLongs(positions).toArray(), positions).asBlock(), + blockFactory.newConstantDoubleBlockWith(randomInt(), positions) ), - new Page(BytesRefBlock.newConstantBlockWith(new BytesRef("Hello World"), positions)) + new Page(blockFactory.newConstantBytesRefBlockWith(new BytesRef("Hello World"), positions)) ); try { EqualsHashCodeTestUtils.checkEqualsAndHashCode(origPages, page -> { @@ -198,7 +236,7 @@ public void testSerializationListPages() throws IOException { public void testPageMultiRelease() { int positions = randomInt(1024); - var block = new IntArrayVector(IntStream.range(0, positions).toArray(), positions).asBlock(); + var block = blockFactory.newIntArrayVector(IntStream.range(0, positions).toArray(), positions).asBlock(); Page page = new Page(block); page.releaseBlocks(); assertThat(block.isReleased(), is(true)); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BigArrayBlockBuilderTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BigArrayBlockBuilderTests.java new file mode 100644 index 0000000000000..0fd78fd3cb9bf --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BigArrayBlockBuilderTests.java @@ -0,0 +1,260 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data; + +import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.unit.ByteSizeValue; + +import java.io.IOException; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; + +public class BigArrayBlockBuilderTests extends SerializationTestCase { + + static ByteSizeValue estimateArraySize(long elementSize, long numElements) { + long bytes = RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + RamUsageEstimator.alignObjectSize(elementSize * numElements); + return ByteSizeValue.ofBytes(bytes); + } + + public void testLongVector() throws IOException { + int maxPrimitiveElements = randomIntBetween(100, 1000); + var maxPrimitiveSize = estimateArraySize(Long.BYTES, maxPrimitiveElements); + blockFactory = new BlockFactory(blockFactory.breaker(), blockFactory.bigArrays(), maxPrimitiveSize); + int numElements = between(2, maxPrimitiveElements / 2); + try (var builder = blockFactory.newLongBlockBuilder(between(1, maxPrimitiveElements / 2))) { + long[] elements = new long[numElements]; + for (int i = 0; i < numElements; i++) { + elements[i] = randomLong(); + builder.appendLong(elements[i]); + } + try (LongBlock block = builder.build()) { + assertThat(block, instanceOf(LongVectorBlock.class)); + assertThat(block.asVector(), instanceOf(LongArrayVector.class)); + assertThat(block.getPositionCount(), equalTo(numElements)); + for (int i = 0; i < numElements; i++) { + assertThat(block.getLong(i), equalTo(elements[i])); + } + try (LongBlock copy = serializeDeserializeBlock(block)) { + assertThat(copy, instanceOf(LongVectorBlock.class)); + assertThat(block.asVector(), instanceOf(LongArrayVector.class)); + assertThat(copy.getPositionCount(), equalTo(numElements)); + for (int i = 0; i < numElements; i++) { + assertThat(copy.getLong(i), equalTo(elements[i])); + } + } + } + } + assertThat(blockFactory.breaker().getUsed(), equalTo(0L)); + numElements = between(maxPrimitiveElements + 10, maxPrimitiveElements * 2); + try (var builder = blockFactory.newLongBlockBuilder(between(1, maxPrimitiveElements * 2))) { + long[] elements = new long[numElements]; + for (int i = 0; i < numElements; i++) { + elements[i] = randomLong(); + builder.appendLong(elements[i]); + } + try (LongBlock block = builder.build()) { + assertThat(block, instanceOf(LongVectorBlock.class)); + assertThat(block.asVector(), instanceOf(LongBigArrayVector.class)); + assertThat(block.getPositionCount(), equalTo(numElements)); + for (int i = 0; i < numElements; i++) { + assertThat(block.getLong(i), equalTo(elements[i])); + } + try (LongBlock copy = serializeDeserializeBlock(block)) { + assertThat(copy, instanceOf(LongVectorBlock.class)); + assertThat(block.asVector(), instanceOf(LongBigArrayVector.class)); + assertThat(copy.getPositionCount(), equalTo(numElements)); + for (int i = 0; i < numElements; i++) { + assertThat(copy.getLong(i), equalTo(elements[i])); + } + } + } + } + assertThat(blockFactory.breaker().getUsed(), equalTo(0L)); + } + + public void testLongBlock() throws IOException { + int maxPrimitiveElements = randomIntBetween(1000, 5000); + var maxPrimitiveSize = estimateArraySize(Long.BYTES, maxPrimitiveElements); + blockFactory = new BlockFactory(blockFactory.breaker(), blockFactory.bigArrays(), maxPrimitiveSize); + int numElements = between(2, maxPrimitiveElements / 2); + try (var builder = blockFactory.newLongBlockBuilder(between(1, maxPrimitiveElements / 2))) { + long[] elements = new long[numElements]; + builder.beginPositionEntry(); + for (int i = 0; i < numElements; i++) { + elements[i] = randomLong(); + builder.appendLong(elements[i]); + } + builder.endPositionEntry(); + try (LongBlock block = builder.build()) { + assertThat(block, instanceOf(LongArrayBlock.class)); + assertNull(block.asVector()); + assertThat(block.getPositionCount(), equalTo(1)); + for (int i = 0; i < numElements; i++) { + assertThat(block.getLong(i), equalTo(elements[i])); + } + try (LongBlock copy = serializeDeserializeBlock(block)) { + assertThat(copy, instanceOf(LongArrayBlock.class)); + assertNull(copy.asVector()); + assertThat(copy.getPositionCount(), equalTo(1)); + for (int i = 0; i < numElements; i++) { + assertThat(copy.getLong(i), equalTo(elements[i])); + } + } + } + } + assertThat(blockFactory.breaker().getUsed(), equalTo(0L)); + numElements = between(maxPrimitiveElements + 10, maxPrimitiveElements * 2); + try (var builder = blockFactory.newLongBlockBuilder(between(1, maxPrimitiveElements * 2))) { + long[] elements = new long[numElements]; + builder.beginPositionEntry(); + for (int i = 0; i < numElements; i++) { + elements[i] = randomLong(); + builder.appendLong(elements[i]); + } + builder.endPositionEntry(); + try (LongBlock block = builder.build()) { + assertThat(block, instanceOf(LongBigArrayBlock.class)); + assertNull(block.asVector()); + assertThat(block.getPositionCount(), equalTo(1)); + assertThat(block.getTotalValueCount(), equalTo(numElements)); + for (int i = 0; i < numElements; i++) { + assertThat(block.getLong(i), equalTo(elements[i])); + } + try (LongBlock copy = serializeDeserializeBlock(block)) { + assertThat(copy, instanceOf(LongBigArrayBlock.class)); + assertNull(block.asVector()); + assertThat(copy.getPositionCount(), equalTo(1)); + assertThat(copy.getTotalValueCount(), equalTo(numElements)); + for (int i = 0; i < numElements; i++) { + assertThat(copy.getLong(i), equalTo(elements[i])); + } + } + } + } + assertThat(blockFactory.breaker().getUsed(), equalTo(0L)); + } + + public void testBooleanVector() throws IOException { + int maxPrimitiveElements = randomIntBetween(100, 1000); + var maxPrimitiveSize = estimateArraySize(Byte.BYTES, maxPrimitiveElements); + blockFactory = new BlockFactory(blockFactory.breaker(), blockFactory.bigArrays(), maxPrimitiveSize); + int numElements = between(2, maxPrimitiveElements / 2); + try (var builder = blockFactory.newBooleanBlockBuilder(between(1, maxPrimitiveElements / 2))) { + boolean[] elements = new boolean[numElements]; + for (int i = 0; i < numElements; i++) { + elements[i] = randomBoolean(); + builder.appendBoolean(elements[i]); + } + try (var block = builder.build()) { + assertThat(block, instanceOf(BooleanVectorBlock.class)); + assertThat(block.asVector(), instanceOf(BooleanArrayVector.class)); + assertThat(block.getPositionCount(), equalTo(numElements)); + for (int i = 0; i < numElements; i++) { + assertThat(block.getBoolean(i), equalTo(elements[i])); + } + try (var copy = serializeDeserializeBlock(block)) { + assertThat(copy, instanceOf(BooleanVectorBlock.class)); + assertThat(block.asVector(), instanceOf(BooleanArrayVector.class)); + assertThat(copy.getPositionCount(), equalTo(numElements)); + for (int i = 0; i < numElements; i++) { + assertThat(copy.getBoolean(i), equalTo(elements[i])); + } + } + } + } + assertThat(blockFactory.breaker().getUsed(), equalTo(0L)); + numElements = between(maxPrimitiveElements + 10, maxPrimitiveElements * 2); + try (var builder = blockFactory.newBooleanBlockBuilder(between(1, maxPrimitiveElements * 2))) { + boolean[] elements = new boolean[numElements]; + for (int i = 0; i < numElements; i++) { + elements[i] = randomBoolean(); + builder.appendBoolean(elements[i]); + } + try (var block = builder.build()) { + assertThat(block, instanceOf(BooleanVectorBlock.class)); + assertThat(block.asVector(), instanceOf(BooleanBigArrayVector.class)); + assertThat(block.getPositionCount(), equalTo(numElements)); + for (int i = 0; i < numElements; i++) { + assertThat(block.getBoolean(i), equalTo(elements[i])); + } + try (var copy = serializeDeserializeBlock(block)) { + assertThat(copy, instanceOf(BooleanVectorBlock.class)); + assertThat(block.asVector(), instanceOf(BooleanBigArrayVector.class)); + assertThat(copy.getPositionCount(), equalTo(numElements)); + for (int i = 0; i < numElements; i++) { + assertThat(copy.getBoolean(i), equalTo(elements[i])); + } + } + } + } + assertThat(blockFactory.breaker().getUsed(), equalTo(0L)); + } + + public void testBooleanBlock() throws IOException { + int maxPrimitiveElements = randomIntBetween(1000, 5000); + var maxPrimitiveSize = estimateArraySize(Byte.BYTES, maxPrimitiveElements); + blockFactory = new BlockFactory(blockFactory.breaker(), blockFactory.bigArrays(), maxPrimitiveSize); + int numElements = between(2, maxPrimitiveElements / 2); + try (var builder = blockFactory.newBooleanBlockBuilder(between(1, maxPrimitiveElements / 2))) { + boolean[] elements = new boolean[numElements]; + builder.beginPositionEntry(); + for (int i = 0; i < numElements; i++) { + elements[i] = randomBoolean(); + builder.appendBoolean(elements[i]); + } + builder.endPositionEntry(); + try (var block = builder.build()) { + assertThat(block, instanceOf(BooleanArrayBlock.class)); + assertNull(block.asVector()); + assertThat(block.getPositionCount(), equalTo(1)); + for (int i = 0; i < numElements; i++) { + assertThat(block.getBoolean(i), equalTo(elements[i])); + } + try (var copy = serializeDeserializeBlock(block)) { + assertThat(copy, instanceOf(BooleanArrayBlock.class)); + assertNull(copy.asVector()); + assertThat(copy.getPositionCount(), equalTo(1)); + for (int i = 0; i < numElements; i++) { + assertThat(copy.getBoolean(i), equalTo(elements[i])); + } + } + } + } + assertThat(blockFactory.breaker().getUsed(), equalTo(0L)); + numElements = between(maxPrimitiveElements + 10, maxPrimitiveElements * 2); + try (var builder = blockFactory.newBooleanBlockBuilder(between(1, maxPrimitiveElements * 2))) { + boolean[] elements = new boolean[numElements]; + builder.beginPositionEntry(); + for (int i = 0; i < numElements; i++) { + elements[i] = randomBoolean(); + builder.appendBoolean(elements[i]); + } + builder.endPositionEntry(); + try (var block = builder.build()) { + assertThat(block, instanceOf(BooleanBigArrayBlock.class)); + assertNull(block.asVector()); + assertThat(block.getPositionCount(), equalTo(1)); + assertThat(block.getTotalValueCount(), equalTo(numElements)); + for (int i = 0; i < numElements; i++) { + assertThat(block.getBoolean(i), equalTo(elements[i])); + } + try (var copy = serializeDeserializeBlock(block)) { + assertThat(copy, instanceOf(BooleanBigArrayBlock.class)); + assertNull(block.asVector()); + assertThat(copy.getPositionCount(), equalTo(1)); + assertThat(copy.getTotalValueCount(), equalTo(numElements)); + for (int i = 0; i < numElements; i++) { + assertThat(copy.getBoolean(i), equalTo(elements[i])); + } + } + } + } + assertThat(blockFactory.breaker().getUsed(), equalTo(0L)); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BigArrayVectorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BigArrayVectorTests.java index 3033f672f897f..74d7e3e142d04 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BigArrayVectorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BigArrayVectorTests.java @@ -32,14 +32,15 @@ public void testBoolean() throws IOException { Boolean[] values = IntStream.range(0, positionCount).mapToObj(i -> randomBoolean()).toArray(Boolean[]::new); BitArray array = new BitArray(positionCount, bigArrays); IntStream.range(0, positionCount).filter(i -> values[i]).forEach(array::set); - try (var vector = new BooleanBigArrayVector(array, positionCount)) { + try (var vector = new BooleanBigArrayVector(array, positionCount, blockFactory)) { assertThat(vector.elementType(), is(ElementType.BOOLEAN)); assertThat(positionCount, is(vector.getPositionCount())); IntStream.range(0, positionCount).forEach(i -> assertThat(vector.getBoolean(i), is(values[i]))); assertThat(vector.isConstant(), is(false)); - BooleanVector filtered = vector.filter(IntStream.range(0, positionCount).toArray()); - IntStream.range(0, positionCount).forEach(i -> assertThat(filtered.getBoolean(i), is(values[i]))); - assertThat(filtered.isConstant(), is(false)); + try (BooleanVector filtered = vector.filter(IntStream.range(0, positionCount).toArray())) { + IntStream.range(0, positionCount).forEach(i -> assertThat(filtered.getBoolean(i), is(values[i]))); + assertThat(filtered.isConstant(), is(false)); + } BooleanBlock block = vector.asBlock(); assertThat(block, is(notNullValue())); IntStream.range(0, positionCount).forEach(i -> { @@ -47,7 +48,9 @@ public void testBoolean() throws IOException { assertThat(block.isNull(i), is(false)); assertThat(block.getValueCount(i), is(1)); assertThat(block.getFirstValueIndex(i), is(i)); - assertThat(block.filter(i).getBoolean(0), is(values[i])); + try (BooleanBlock filter = block.filter(i)) { + assertThat(filter.getBoolean(0), is(values[i])); + } }); BasicBlockTests.assertSingleValueDenseBlock(vector.asBlock()); assertSerialization(block); @@ -60,14 +63,15 @@ public void testInt() throws IOException { int[] values = IntStream.range(0, positionCount).map(i -> randomInt()).toArray(); IntArray array = bigArrays.newIntArray(positionCount); IntStream.range(0, positionCount).forEach(i -> array.set(i, values[i])); - try (var vector = new IntBigArrayVector(array, positionCount)) { + try (var vector = new IntBigArrayVector(array, positionCount, blockFactory)) { assertThat(vector.elementType(), is(ElementType.INT)); assertThat(positionCount, is(vector.getPositionCount())); IntStream.range(0, positionCount).forEach(i -> assertThat(vector.getInt(i), is(values[i]))); assertThat(vector.isConstant(), is(false)); - IntVector filtered = vector.filter(IntStream.range(0, positionCount).toArray()); - IntStream.range(0, positionCount).forEach(i -> assertThat(filtered.getInt(i), is(values[i]))); - assertThat(filtered.isConstant(), is(false)); + try (IntVector filtered = vector.filter(IntStream.range(0, positionCount).toArray())) { + IntStream.range(0, positionCount).forEach(i -> assertThat(filtered.getInt(i), is(values[i]))); + assertThat(filtered.isConstant(), is(false)); + } IntBlock block = vector.asBlock(); assertThat(block, is(notNullValue())); IntStream.range(0, positionCount).forEach(i -> { @@ -75,7 +79,9 @@ public void testInt() throws IOException { assertThat(block.isNull(i), is(false)); assertThat(block.getValueCount(i), is(1)); assertThat(block.getFirstValueIndex(i), is(i)); - assertThat(block.filter(i).getInt(0), is(values[i])); + try (IntBlock filter = block.filter(i)) { + assertThat(filter.getInt(0), is(values[i])); + } }); BasicBlockTests.assertSingleValueDenseBlock(vector.asBlock()); assertSerialization(block); @@ -88,14 +94,15 @@ public void testLong() throws IOException { long[] values = IntStream.range(0, positionCount).mapToLong(i -> randomLong()).toArray(); LongArray array = bigArrays.newLongArray(positionCount); IntStream.range(0, positionCount).forEach(i -> array.set(i, values[i])); - try (var vector = new LongBigArrayVector(array, positionCount)) { + try (var vector = new LongBigArrayVector(array, positionCount, blockFactory)) { assertThat(vector.elementType(), is(ElementType.LONG)); assertThat(positionCount, is(vector.getPositionCount())); IntStream.range(0, positionCount).forEach(i -> assertThat(vector.getLong(i), is(values[i]))); assertThat(vector.isConstant(), is(false)); - LongVector filtered = vector.filter(IntStream.range(0, positionCount).toArray()); - IntStream.range(0, positionCount).forEach(i -> assertThat(filtered.getLong(i), is(values[i]))); - assertThat(filtered.isConstant(), is(false)); + try (LongVector filtered = vector.filter(IntStream.range(0, positionCount).toArray())) { + IntStream.range(0, positionCount).forEach(i -> assertThat(filtered.getLong(i), is(values[i]))); + assertThat(filtered.isConstant(), is(false)); + } LongBlock block = vector.asBlock(); assertThat(block, is(notNullValue())); IntStream.range(0, positionCount).forEach(i -> { @@ -103,7 +110,9 @@ public void testLong() throws IOException { assertThat(block.isNull(i), is(false)); assertThat(block.getValueCount(i), is(1)); assertThat(block.getFirstValueIndex(i), is(i)); - assertThat(block.filter(i).getLong(0), is(values[i])); + try (LongBlock filter = block.filter(i)) { + assertThat(filter.getLong(0), is(values[i])); + } }); BasicBlockTests.assertSingleValueDenseBlock(vector.asBlock()); assertSerialization(block); @@ -116,14 +125,15 @@ public void testDouble() throws IOException { double[] values = IntStream.range(0, positionCount).mapToDouble(i -> randomDouble()).toArray(); DoubleArray array = bigArrays.newDoubleArray(positionCount); IntStream.range(0, positionCount).forEach(i -> array.set(i, values[i])); - try (var vector = new DoubleBigArrayVector(array, positionCount)) { + try (var vector = new DoubleBigArrayVector(array, positionCount, blockFactory)) { assertThat(vector.elementType(), is(ElementType.DOUBLE)); assertThat(positionCount, is(vector.getPositionCount())); IntStream.range(0, positionCount).forEach(i -> assertThat(vector.getDouble(i), is(values[i]))); assertThat(vector.isConstant(), is(false)); - DoubleVector filtered = vector.filter(IntStream.range(0, positionCount).toArray()); - IntStream.range(0, positionCount).forEach(i -> assertThat(filtered.getDouble(i), is(values[i]))); - assertThat(filtered.isConstant(), is(false)); + try (DoubleVector filtered = vector.filter(IntStream.range(0, positionCount).toArray())) { + IntStream.range(0, positionCount).forEach(i -> assertThat(filtered.getDouble(i), is(values[i]))); + assertThat(filtered.isConstant(), is(false)); + } DoubleBlock block = vector.asBlock(); assertThat(block, is(notNullValue())); IntStream.range(0, positionCount).forEach(i -> { @@ -131,7 +141,9 @@ public void testDouble() throws IOException { assertThat(block.isNull(i), is(false)); assertThat(block.getValueCount(i), is(1)); assertThat(block.getFirstValueIndex(i), is(i)); - assertThat(block.filter(i).getDouble(0), is(values[i])); + try (DoubleBlock filter = block.filter(i)) { + assertThat(filter.getDouble(0), is(values[i])); + } }); BasicBlockTests.assertSingleValueDenseBlock(vector.asBlock()); assertSerialization(block); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockAccountingTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockAccountingTests.java index d62fd75abbcdd..d34e639c32f0d 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockAccountingTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockAccountingTests.java @@ -13,7 +13,8 @@ import org.elasticsearch.common.util.BigArray; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.BytesRefArray; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.compute.operator.ComputeTestCase; +import org.elasticsearch.core.Releasables; import org.hamcrest.Matcher; import java.lang.reflect.Field; @@ -29,7 +30,7 @@ import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; -public class BlockAccountingTests extends ESTestCase { +public class BlockAccountingTests extends ComputeTestCase { static final Accumulator RAM_USAGE_ACCUMULATOR = new TestRamUsageAccumulator(); @@ -38,182 +39,298 @@ public class BlockAccountingTests extends ESTestCase { // Array Vectors public void testBooleanVector() { - Vector empty = new BooleanArrayVector(new boolean[] {}, 0); + BlockFactory blockFactory = blockFactory(); + Vector empty = blockFactory.newBooleanArrayVector(new boolean[] {}, 0); long expectedEmptyUsed = RamUsageTester.ramUsed(empty, RAM_USAGE_ACCUMULATOR); assertThat(empty.ramBytesUsed(), is(expectedEmptyUsed)); - Vector emptyPlusOne = new BooleanArrayVector(new boolean[] { randomBoolean() }, 1); + Vector emptyPlusOne = blockFactory.newBooleanArrayVector(new boolean[] { randomBoolean() }, 1); assertThat(emptyPlusOne.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + 1))); boolean[] randomData = new boolean[randomIntBetween(2, 1024)]; - Vector emptyPlusSome = new BooleanArrayVector(randomData, randomData.length); + Vector emptyPlusSome = blockFactory.newBooleanArrayVector(randomData, randomData.length); assertThat(emptyPlusSome.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + randomData.length))); Vector filterVector = emptyPlusSome.filter(1); assertThat(filterVector.ramBytesUsed(), lessThan(emptyPlusSome.ramBytesUsed())); + Releasables.close(empty, emptyPlusOne, emptyPlusSome, filterVector); } public void testIntVector() { - Vector empty = new IntArrayVector(new int[] {}, 0); + BlockFactory blockFactory = blockFactory(); + Vector empty = blockFactory.newIntArrayVector(new int[] {}, 0); long expectedEmptyUsed = RamUsageTester.ramUsed(empty, RAM_USAGE_ACCUMULATOR); assertThat(empty.ramBytesUsed(), is(expectedEmptyUsed)); - Vector emptyPlusOne = new IntArrayVector(new int[] { randomInt() }, 1); + Vector emptyPlusOne = blockFactory.newIntArrayVector(new int[] { randomInt() }, 1); assertThat(emptyPlusOne.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + Integer.BYTES))); int[] randomData = new int[randomIntBetween(2, 1024)]; - Vector emptyPlusSome = new IntArrayVector(randomData, randomData.length); + Vector emptyPlusSome = blockFactory.newIntArrayVector(randomData, randomData.length); assertThat(emptyPlusSome.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + (long) Integer.BYTES * randomData.length))); Vector filterVector = emptyPlusSome.filter(1); assertThat(filterVector.ramBytesUsed(), lessThan(emptyPlusSome.ramBytesUsed())); + Releasables.close(empty, emptyPlusOne, emptyPlusSome, filterVector); } public void testLongVector() { - Vector empty = new LongArrayVector(new long[] {}, 0); + BlockFactory blockFactory = blockFactory(); + Vector empty = blockFactory.newLongArrayVector(new long[] {}, 0); long expectedEmptyUsed = RamUsageTester.ramUsed(empty, RAM_USAGE_ACCUMULATOR); assertThat(empty.ramBytesUsed(), is(expectedEmptyUsed)); - Vector emptyPlusOne = new LongArrayVector(new long[] { randomLong() }, 1); + Vector emptyPlusOne = blockFactory.newLongArrayVector(new long[] { randomLong() }, 1); assertThat(emptyPlusOne.ramBytesUsed(), is(empty.ramBytesUsed() + Long.BYTES)); long[] randomData = new long[randomIntBetween(2, 1024)]; - Vector emptyPlusSome = new LongArrayVector(randomData, randomData.length); + Vector emptyPlusSome = blockFactory.newLongArrayVector(randomData, randomData.length); assertThat(emptyPlusSome.ramBytesUsed(), is(empty.ramBytesUsed() + (long) Long.BYTES * randomData.length)); Vector filterVector = emptyPlusSome.filter(1); assertThat(filterVector.ramBytesUsed(), lessThan(emptyPlusSome.ramBytesUsed())); + + Releasables.close(empty, emptyPlusOne, emptyPlusSome, filterVector); } public void testDoubleVector() { - Vector empty = new DoubleArrayVector(new double[] {}, 0); + BlockFactory blockFactory = blockFactory(); + Vector empty = blockFactory.newDoubleArrayVector(new double[] {}, 0); long expectedEmptyUsed = RamUsageTester.ramUsed(empty, RAM_USAGE_ACCUMULATOR); assertThat(empty.ramBytesUsed(), is(expectedEmptyUsed)); - Vector emptyPlusOne = new DoubleArrayVector(new double[] { randomDouble() }, 1); + Vector emptyPlusOne = blockFactory.newDoubleArrayVector(new double[] { randomDouble() }, 1); assertThat(emptyPlusOne.ramBytesUsed(), is(empty.ramBytesUsed() + Double.BYTES)); double[] randomData = new double[randomIntBetween(2, 1024)]; - Vector emptyPlusSome = new DoubleArrayVector(randomData, randomData.length); + Vector emptyPlusSome = blockFactory.newDoubleArrayVector(randomData, randomData.length); assertThat(emptyPlusSome.ramBytesUsed(), is(empty.ramBytesUsed() + (long) Double.BYTES * randomData.length)); // a filter becomes responsible for it's enclosing data, both in terms of accountancy and releasability Vector filterVector = emptyPlusSome.filter(1); assertThat(filterVector.ramBytesUsed(), lessThan(emptyPlusSome.ramBytesUsed())); + + Releasables.close(empty, emptyPlusOne, emptyPlusSome, filterVector); } public void testBytesRefVector() { - try ( - var emptyArray = new BytesRefArray(0, BigArrays.NON_RECYCLING_INSTANCE); - var arrayWithOne = new BytesRefArray(0, BigArrays.NON_RECYCLING_INSTANCE) - ) { - Vector emptyVector = new BytesRefArrayVector(emptyArray, 0); - long expectedEmptyVectorUsed = RamUsageTester.ramUsed(emptyVector, RAM_USAGE_ACCUMULATOR); - assertThat(emptyVector.ramBytesUsed(), is(expectedEmptyVectorUsed)); - - var bytesRef = new BytesRef(randomAlphaOfLengthBetween(1, 16)); - arrayWithOne.append(bytesRef); - Vector emptyPlusOne = new BytesRefArrayVector(arrayWithOne, 1); - assertThat(emptyPlusOne.ramBytesUsed(), between(emptyVector.ramBytesUsed() + bytesRef.length, UPPER_BOUND)); - - Vector filterVector = emptyPlusOne.filter(0); - assertThat(filterVector.ramBytesUsed(), lessThan(emptyPlusOne.ramBytesUsed())); - } + BlockFactory blockFactory = blockFactory(); + var emptyArray = new BytesRefArray(0, blockFactory.bigArrays()); + var arrayWithOne = new BytesRefArray(0, blockFactory.bigArrays()); + Vector emptyVector = blockFactory.newBytesRefArrayVector(emptyArray, 0); + long expectedEmptyVectorUsed = RamUsageTester.ramUsed(emptyVector, RAM_USAGE_ACCUMULATOR); + assertThat(emptyVector.ramBytesUsed(), is(expectedEmptyVectorUsed)); + + var bytesRef = new BytesRef(randomAlphaOfLengthBetween(1, 16)); + arrayWithOne.append(bytesRef); + Vector emptyPlusOne = blockFactory.newBytesRefArrayVector(arrayWithOne, 1); + assertThat(emptyPlusOne.ramBytesUsed(), between(emptyVector.ramBytesUsed() + bytesRef.length, UPPER_BOUND)); + + Vector filterVector = emptyPlusOne.filter(0); + assertThat(filterVector.ramBytesUsed(), lessThan(emptyPlusOne.ramBytesUsed())); + Releasables.close(emptyVector, emptyPlusOne, filterVector); } // Array Blocks public void testBooleanBlock() { - Block empty = new BooleanArrayBlock(new boolean[] {}, 0, new int[0], null, Block.MvOrdering.UNORDERED); + BlockFactory blockFactory = blockFactory(); + Block empty = new BooleanArrayBlock(new boolean[] {}, 0, new int[] { 0 }, null, Block.MvOrdering.UNORDERED, blockFactory); long expectedEmptyUsed = RamUsageTester.ramUsed(empty, RAM_USAGE_ACCUMULATOR); assertThat(empty.ramBytesUsed(), is(expectedEmptyUsed)); - Block emptyPlusOne = new BooleanArrayBlock(new boolean[] { randomBoolean() }, 1, new int[] { 0 }, null, Block.MvOrdering.UNORDERED); - assertThat(emptyPlusOne.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + 1) + alignObjectSize(Integer.BYTES))); + Block emptyPlusOne = new BooleanArrayBlock( + new boolean[] { randomBoolean() }, + 1, + new int[] { 0, 1 }, + null, + Block.MvOrdering.UNORDERED, + blockFactory + ); + assertThat( + emptyPlusOne.ramBytesUsed(), + is(empty.ramBytesUsed() + ramBytesDiffForBooleanArrays(1, 0) + ramBytesDiffForIntArrays(2, 1)) + ); boolean[] randomData = new boolean[randomIntBetween(2, 1024)]; int[] valueIndices = IntStream.range(0, randomData.length + 1).toArray(); - Block emptyPlusSome = new BooleanArrayBlock(randomData, randomData.length, valueIndices, null, Block.MvOrdering.UNORDERED); - long expected = empty.ramBytesUsed() + ramBytesForBooleanArray(randomData) + ramBytesForIntArray(valueIndices); + Block emptyPlusSome = new BooleanArrayBlock( + randomData, + randomData.length, + valueIndices, + null, + Block.MvOrdering.UNORDERED, + blockFactory + ); + long expected = empty.ramBytesUsed() + ramBytesDiffForBooleanArrays(randomData.length, 0) + ramBytesDiffForIntArrays( + valueIndices.length, + 1 + ); assertThat(emptyPlusSome.ramBytesUsed(), is(expected)); Block filterBlock = emptyPlusSome.filter(1); assertThat(filterBlock.ramBytesUsed(), lessThan(emptyPlusOne.ramBytesUsed())); + Releasables.close(filterBlock); } public void testBooleanBlockWithNullFirstValues() { - Block empty = new BooleanArrayBlock(new boolean[] {}, 0, null, BitSet.valueOf(new byte[] { 1 }), Block.MvOrdering.UNORDERED); + Block empty = new BooleanArrayBlock( + new boolean[] {}, + 0, + null, + BitSet.valueOf(new byte[] { 1 }), + Block.MvOrdering.UNORDERED, + blockFactory() + ); long expectedEmptyUsed = RamUsageTester.ramUsed(empty, RAM_USAGE_ACCUMULATOR); assertThat(empty.ramBytesUsed(), lessThanOrEqualTo(expectedEmptyUsed)); } public void testIntBlock() { - Block empty = new IntArrayBlock(new int[] {}, 0, new int[] {}, null, Block.MvOrdering.UNORDERED); + BlockFactory blockFactory = blockFactory(); + Block empty = new IntArrayBlock(new int[] {}, 0, new int[] { 0 }, null, Block.MvOrdering.UNORDERED, blockFactory); long expectedEmptyUsed = RamUsageTester.ramUsed(empty, RAM_USAGE_ACCUMULATOR); assertThat(empty.ramBytesUsed(), is(expectedEmptyUsed)); - Block emptyPlusOne = new IntArrayBlock(new int[] { randomInt() }, 1, new int[] { 0 }, null, Block.MvOrdering.UNORDERED); - assertThat(emptyPlusOne.ramBytesUsed(), is(empty.ramBytesUsed() + alignObjectSize(Integer.BYTES) + alignObjectSize(Integer.BYTES))); + Block emptyPlusOne = new IntArrayBlock( + new int[] { randomInt() }, + 1, + new int[] { 0, 1 }, + null, + Block.MvOrdering.UNORDERED, + blockFactory + ); + assertThat(emptyPlusOne.ramBytesUsed(), is(empty.ramBytesUsed() + ramBytesDiffForIntArrays(1, 0) + ramBytesDiffForIntArrays(2, 1))); int[] randomData = new int[randomIntBetween(2, 1024)]; int[] valueIndices = IntStream.range(0, randomData.length + 1).toArray(); - Block emptyPlusSome = new IntArrayBlock(randomData, randomData.length, valueIndices, null, Block.MvOrdering.UNORDERED); - long expected = empty.ramBytesUsed() + ramBytesForIntArray(randomData) + ramBytesForIntArray(valueIndices); + Block emptyPlusSome = new IntArrayBlock( + randomData, + randomData.length, + valueIndices, + null, + Block.MvOrdering.UNORDERED, + blockFactory + ); + long expected = empty.ramBytesUsed() + ramBytesDiffForIntArrays(randomData.length, 0) + ramBytesDiffForIntArrays( + valueIndices.length, + 1 + ); assertThat(emptyPlusSome.ramBytesUsed(), is(expected)); Block filterBlock = emptyPlusSome.filter(1); assertThat(filterBlock.ramBytesUsed(), lessThan(emptyPlusOne.ramBytesUsed())); + Releasables.close(filterBlock); } public void testIntBlockWithNullFirstValues() { - Block empty = new IntArrayBlock(new int[] {}, 0, null, BitSet.valueOf(new byte[] { 1 }), Block.MvOrdering.UNORDERED); + BlockFactory blockFactory = blockFactory(); + Block empty = new IntArrayBlock(new int[] {}, 0, null, BitSet.valueOf(new byte[] { 1 }), Block.MvOrdering.UNORDERED, blockFactory); long expectedEmptyUsed = RamUsageTester.ramUsed(empty, RAM_USAGE_ACCUMULATOR); assertThat(empty.ramBytesUsed(), is(expectedEmptyUsed)); } public void testLongBlock() { - Block empty = new LongArrayBlock(new long[] {}, 0, new int[0], null, Block.MvOrdering.UNORDERED); + BlockFactory blockFactory = blockFactory(); + Block empty = new LongArrayBlock(new long[] {}, 0, new int[] { 0 }, null, Block.MvOrdering.UNORDERED, blockFactory); long expectedEmptyUsed = RamUsageTester.ramUsed(empty, RAM_USAGE_ACCUMULATOR); assertThat(empty.ramBytesUsed(), is(expectedEmptyUsed)); - Block emptyPlusOne = new LongArrayBlock(new long[] { randomInt() }, 1, new int[] { 0 }, null, Block.MvOrdering.UNORDERED); - assertThat(emptyPlusOne.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + Long.BYTES) + alignObjectSize(Integer.BYTES))); + Block emptyPlusOne = new LongArrayBlock( + new long[] { randomInt() }, + 1, + new int[] { 0, 1 }, + null, + Block.MvOrdering.UNORDERED, + blockFactory + ); + assertThat( + emptyPlusOne.ramBytesUsed(), + is(empty.ramBytesUsed() + ramBytesDiffForLongArrays(1, 0) + ramBytesDiffForIntArrays(2, 1)) + ); long[] randomData = new long[randomIntBetween(2, 1024)]; int[] valueIndices = IntStream.range(0, randomData.length + 1).toArray(); - Block emptyPlusSome = new LongArrayBlock(randomData, randomData.length, valueIndices, null, Block.MvOrdering.UNORDERED); - long expected = empty.ramBytesUsed() + ramBytesForLongArray(randomData) + ramBytesForIntArray(valueIndices); + Block emptyPlusSome = new LongArrayBlock( + randomData, + randomData.length, + valueIndices, + null, + Block.MvOrdering.UNORDERED, + blockFactory + ); + long expected = empty.ramBytesUsed() + ramBytesDiffForLongArrays(randomData.length, 0) + ramBytesDiffForIntArrays( + valueIndices.length, + 1 + ); assertThat(emptyPlusSome.ramBytesUsed(), is(expected)); Block filterBlock = emptyPlusSome.filter(1); assertThat(filterBlock.ramBytesUsed(), lessThan(emptyPlusOne.ramBytesUsed())); + Releasables.close(filterBlock); } public void testLongBlockWithNullFirstValues() { - Block empty = new LongArrayBlock(new long[] {}, 0, null, BitSet.valueOf(new byte[] { 1 }), Block.MvOrdering.UNORDERED); + Block empty = new LongArrayBlock( + new long[] {}, + 0, + null, + BitSet.valueOf(new byte[] { 1 }), + Block.MvOrdering.UNORDERED, + blockFactory() + ); long expectedEmptyUsed = RamUsageTester.ramUsed(empty, RAM_USAGE_ACCUMULATOR); assertThat(empty.ramBytesUsed(), is(expectedEmptyUsed)); } public void testDoubleBlock() { - Block empty = new DoubleArrayBlock(new double[] {}, 0, new int[0], null, Block.MvOrdering.UNORDERED); + BlockFactory blockFactory = blockFactory(); + Block empty = new DoubleArrayBlock(new double[] {}, 0, new int[] { 0 }, null, Block.MvOrdering.UNORDERED, blockFactory); long expectedEmptyUsed = RamUsageTester.ramUsed(empty, RAM_USAGE_ACCUMULATOR); assertThat(empty.ramBytesUsed(), is(expectedEmptyUsed)); - Block emptyPlusOne = new DoubleArrayBlock(new double[] { randomInt() }, 1, new int[] { 0 }, null, Block.MvOrdering.UNORDERED); - assertThat(emptyPlusOne.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + Double.BYTES) + alignObjectSize(Integer.BYTES))); + Block emptyPlusOne = new DoubleArrayBlock( + new double[] { randomInt() }, + 1, + new int[] { 0, 1 }, + null, + Block.MvOrdering.UNORDERED, + blockFactory + ); + assertThat( + emptyPlusOne.ramBytesUsed(), + is(empty.ramBytesUsed() + ramBytesDiffForDoubleArrays(1, 0) + ramBytesDiffForIntArrays(2, 1)) + ); double[] randomData = new double[randomIntBetween(2, 1024)]; int[] valueIndices = IntStream.range(0, randomData.length + 1).toArray(); - Block emptyPlusSome = new DoubleArrayBlock(randomData, randomData.length, valueIndices, null, Block.MvOrdering.UNORDERED); - long expected = empty.ramBytesUsed() + ramBytesForDoubleArray(randomData) + ramBytesForIntArray(valueIndices); + Block emptyPlusSome = new DoubleArrayBlock( + randomData, + randomData.length, + valueIndices, + null, + Block.MvOrdering.UNORDERED, + blockFactory + ); + long expected = empty.ramBytesUsed() + ramBytesDiffForDoubleArrays(randomData.length, 0) + ramBytesDiffForIntArrays( + valueIndices.length, + 1 + ); assertThat(emptyPlusSome.ramBytesUsed(), is(expected)); Block filterBlock = emptyPlusSome.filter(1); assertThat(filterBlock.ramBytesUsed(), lessThan(emptyPlusOne.ramBytesUsed())); + Releasables.close(filterBlock); } public void testDoubleBlockWithNullFirstValues() { - Block empty = new DoubleArrayBlock(new double[] {}, 0, null, BitSet.valueOf(new byte[] { 1 }), Block.MvOrdering.UNORDERED); + Block empty = new DoubleArrayBlock( + new double[] {}, + 0, + null, + BitSet.valueOf(new byte[] { 1 }), + Block.MvOrdering.UNORDERED, + blockFactory() + ); long expectedEmptyUsed = RamUsageTester.ramUsed(empty, RAM_USAGE_ACCUMULATOR); assertThat(empty.ramBytesUsed(), is(expectedEmptyUsed)); } @@ -248,19 +365,35 @@ public long accumulateObject(Object o, long shallowSize, Map fiel } } - static long ramBytesForBooleanArray(boolean[] arr) { - return alignObjectSize((long) Byte.BYTES * arr.length); + static long ramBytesDiffForBooleanArrays(int length1, int lenght2) { + return ramBytesForBooleanArray(length1) - ramBytesForBooleanArray(lenght2); + } + + static long ramBytesDiffForIntArrays(int length1, int lenght2) { + return ramBytesForIntArray(length1) - ramBytesForIntArray(lenght2); + } + + static long ramBytesDiffForLongArrays(int length1, int lenght2) { + return ramBytesForLongArray(length1) - ramBytesForLongArray(lenght2); + } + + static long ramBytesDiffForDoubleArrays(int length1, int lenght2) { + return ramBytesForDoubleArray(length1) - ramBytesForDoubleArray(lenght2); + } + + static long ramBytesForBooleanArray(int length) { + return alignObjectSize((long) Byte.BYTES * length); } - static long ramBytesForIntArray(int[] arr) { - return alignObjectSize((long) Integer.BYTES * arr.length); + static long ramBytesForIntArray(int length) { + return alignObjectSize((long) Integer.BYTES * length); } - static long ramBytesForLongArray(long[] arr) { - return alignObjectSize((long) Long.BYTES * arr.length); + static long ramBytesForLongArray(int length) { + return alignObjectSize((long) Long.BYTES * length); } - static long ramBytesForDoubleArray(double[] arr) { - return alignObjectSize((long) Long.BYTES * arr.length); + static long ramBytesForDoubleArray(int length) { + return alignObjectSize((long) Long.BYTES * length); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockBuilderAppendBlockTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockBuilderAppendBlockTests.java index a9f08eee02d70..9c1b02aa74107 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockBuilderAppendBlockTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockBuilderAppendBlockTests.java @@ -7,17 +7,19 @@ package org.elasticsearch.compute.data; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.compute.operator.ComputeTestCase; import java.util.ArrayList; import java.util.List; import static org.hamcrest.Matchers.equalTo; -public class BlockBuilderAppendBlockTests extends ESTestCase { +public class BlockBuilderAppendBlockTests extends ComputeTestCase { public void testBasic() { - IntBlock src = new IntBlockBuilder(10, BlockFactory.getNonBreakingInstance()).appendInt(1) + BlockFactory blockFactory = blockFactory(); + IntBlock src = blockFactory.newIntBlockBuilder(10) + .appendInt(1) .appendNull() .beginPositionEntry() .appendInt(4) @@ -32,40 +34,48 @@ public void testBasic() { .endPositionEntry() .build(); // copy position by position - { - IntBlock.Builder dst = IntBlock.newBlockBuilder(randomIntBetween(1, 20)); + try (IntBlock.Builder dst = blockFactory.newIntBlockBuilder(randomIntBetween(1, 20))) { for (int i = 0; i < src.getPositionCount(); i++) { - dst.appendAllValuesToCurrentPosition(src.filter(i)); + try (IntBlock filter = src.filter(i)) { + dst.appendAllValuesToCurrentPosition(filter); + } + } + try (IntBlock block = dst.build()) { + assertThat(block, equalTo(src)); } - assertThat(dst.build(), equalTo(src)); } // copy all block - { - IntBlock.Builder dst = IntBlock.newBlockBuilder(randomIntBetween(1, 20)); - IntBlock block = dst.appendAllValuesToCurrentPosition(src).build(); - assertThat(block.getPositionCount(), equalTo(1)); - assertThat(BlockUtils.toJavaObject(block, 0), equalTo(List.of(1, 4, 6, 10, 20, 30, 1))); + try (IntBlock.Builder dst = blockFactory.newIntBlockBuilder(randomIntBetween(1, 20))) { + try (IntBlock block = dst.appendAllValuesToCurrentPosition(src).build()) { + assertThat(block.getPositionCount(), equalTo(1)); + assertThat(BlockUtils.toJavaObject(block, 0), equalTo(List.of(1, 4, 6, 10, 20, 30, 1))); + } } - { - Block dst = randomlyDivideAndMerge(src); + try (Block dst = randomlyDivideAndMerge(src)) { assertThat(dst.getPositionCount(), equalTo(1)); assertThat(BlockUtils.toJavaObject(dst, 0), equalTo(List.of(1, 4, 6, 10, 20, 30, 1))); } } public void testRandomNullBlock() { - IntBlock.Builder src = IntBlock.newBlockBuilder(10); - src.appendAllValuesToCurrentPosition(new ConstantNullBlock(between(1, 100))); + BlockFactory blockFactory = blockFactory(); + IntBlock.Builder src = blockFactory.newIntBlockBuilder(10); + try (var nullBlock = blockFactory.newConstantNullBlock(between(1, 100))) { + src.appendAllValuesToCurrentPosition(nullBlock); + } src.appendInt(101); - src.appendAllValuesToCurrentPosition(new ConstantNullBlock(between(1, 100))); + try (var nullBlock = blockFactory.newConstantNullBlock(between(1, 100))) { + src.appendAllValuesToCurrentPosition(nullBlock); + } IntBlock block = src.build(); assertThat(block.getPositionCount(), equalTo(3)); assertTrue(block.isNull(0)); assertThat(block.getInt(1), equalTo(101)); assertTrue(block.isNull(2)); - Block flatten = randomlyDivideAndMerge(block); - assertThat(flatten.getPositionCount(), equalTo(1)); - assertThat(BlockUtils.toJavaObject(flatten, 0), equalTo(101)); + try (Block flatten = randomlyDivideAndMerge(block)) { + assertThat(flatten.getPositionCount(), equalTo(1)); + assertThat(BlockUtils.toJavaObject(flatten, 0), equalTo(101)); + } } public void testRandom() { @@ -79,14 +89,17 @@ public void testRandom() { 0, between(0, 16) ).block(); - randomlyDivideAndMerge(block); + + block = randomlyDivideAndMerge(block); + block.close(); } private Block randomlyDivideAndMerge(Block block) { while (block.getPositionCount() > 1 || randomBoolean()) { int positionCount = block.getPositionCount(); int offset = 0; - Block.Builder builder = block.elementType().newBlockBuilder(randomIntBetween(1, 100)); + Block.Builder builder = block.elementType() + .newBlockBuilder(randomIntBetween(1, 100), TestBlockFactory.getNonBreakingInstance()); List expected = new ArrayList<>(); while (offset < positionCount) { int length = randomIntBetween(1, positionCount - offset); @@ -98,7 +111,9 @@ private Block randomlyDivideAndMerge(Block block) { Block sub = block.filter(positions); expected.add(extractAndFlattenBlockValues(sub)); builder.appendAllValuesToCurrentPosition(sub); + sub.close(); } + block.close(); block = builder.build(); assertThat(block.getPositionCount(), equalTo(expected.size())); for (int i = 0; i < block.getPositionCount(); i++) { diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockBuilderCopyFromTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockBuilderCopyFromTests.java index 529c1afeaaf44..e3a9aba0d1b7f 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockBuilderCopyFromTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockBuilderCopyFromTests.java @@ -69,22 +69,24 @@ public void testEvensFiltered() { } public void testSmallAllNull() { - assertSmall(Block.constantNullBlock(10)); + assertSmall(TestBlockFactory.getNonBreakingInstance().newConstantNullBlock(10)); } public void testEvensAllNull() { - assertEvens(Block.constantNullBlock(10)); + assertEvens(TestBlockFactory.getNonBreakingInstance().newConstantNullBlock(10)); } private void assertSmall(Block block) { int smallSize = Math.min(block.getPositionCount(), 10); - Block.Builder builder = elementType.newBlockBuilder(smallSize); + BlockFactory blockFactory = TestBlockFactory.getNonBreakingInstance(); + Block.Builder builder = elementType.newBlockBuilder(smallSize, blockFactory); builder.copyFrom(block, 0, smallSize); assertBlockValues(builder.build(), BasicBlockTests.valuesAtPositions(block, 0, smallSize)); } private void assertEvens(Block block) { - Block.Builder builder = elementType.newBlockBuilder(block.getPositionCount() / 2); + BlockFactory blockFactory = TestBlockFactory.getNonBreakingInstance(); + Block.Builder builder = elementType.newBlockBuilder(block.getPositionCount() / 2, blockFactory); List> expected = new ArrayList<>(); for (int i = 0; i < block.getPositionCount(); i += 2) { builder.copyFrom(block, i, i + 1); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockFactoryTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockFactoryTests.java index 96e5de20ba35c..ec9bea2edcb75 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockFactoryTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockFactoryTests.java @@ -87,19 +87,19 @@ public void testPreAdjusters() { int positions = randomIntBetween(1, 16384); long preAdjustBytes = blockFactory.preAdjustBreakerForBoolean(positions); assertThat(preAdjustBytes, is((long) positions)); - blockFactory.adjustBreaker(-preAdjustBytes, true); + blockFactory.adjustBreaker(-preAdjustBytes); preAdjustBytes = blockFactory.preAdjustBreakerForInt(positions); assertThat(preAdjustBytes, is((long) positions * 4)); - blockFactory.adjustBreaker(-preAdjustBytes, true); + blockFactory.adjustBreaker(-preAdjustBytes); preAdjustBytes = blockFactory.preAdjustBreakerForLong(positions); assertThat(preAdjustBytes, is((long) positions * 8)); - blockFactory.adjustBreaker(-preAdjustBytes, true); + blockFactory.adjustBreaker(-preAdjustBytes); preAdjustBytes = blockFactory.preAdjustBreakerForDouble(positions); assertThat(preAdjustBytes, is((long) positions * 8)); - blockFactory.adjustBreaker(-preAdjustBytes, true); + blockFactory.adjustBreaker(-preAdjustBytes); } } @@ -109,7 +109,7 @@ public void testIntBlockBuilderWithPossiblyLargeEstimateEmpty() { var block = builder.build(); releaseAndAssertBreaker(block); - block = blockFactory.newIntArrayBlock(new int[] {}, 0, new int[] {}, new BitSet(), randomOrdering()); + block = blockFactory.newIntArrayBlock(new int[] {}, 0, new int[] { 0 }, new BitSet(), randomOrdering()); assertThat(breaker.getUsed(), greaterThan(0L)); releaseAndAssertBreaker(block); } @@ -121,7 +121,7 @@ public void testIntBlockBuilderWithPossiblyLargeEstimateSingle() { var block = builder.build(); releaseAndAssertBreaker(block); - block = blockFactory.newIntArrayBlock(new int[] { randomInt() }, 1, new int[] {}, new BitSet(), randomOrdering()); + block = blockFactory.newIntArrayBlock(new int[] { randomInt() }, 1, new int[] { 0, 1 }, new BitSet(), randomOrdering()); assertThat(breaker.getUsed(), greaterThan(0L)); releaseAndAssertBreaker(block); @@ -199,7 +199,7 @@ public void testLongBlockBuilderWithPossiblyLargeEstimateEmpty() { var block = builder.build(); releaseAndAssertBreaker(block); - block = blockFactory.newLongArrayBlock(new long[] {}, 0, new int[] {}, new BitSet(), randomOrdering()); + block = blockFactory.newLongArrayBlock(new long[] {}, 0, new int[] { 0 }, new BitSet(), randomOrdering()); assertThat(breaker.getUsed(), greaterThan(0L)); releaseAndAssertBreaker(block); } @@ -211,7 +211,7 @@ public void testLongBlockBuilderWithPossiblyLargeEstimateSingle() { var block = builder.build(); releaseAndAssertBreaker(block); - block = blockFactory.newLongArrayBlock(new long[] { randomLong() }, 1, new int[] {}, new BitSet(), randomOrdering()); + block = blockFactory.newLongArrayBlock(new long[] { randomLong() }, 1, new int[] { 0, 1 }, new BitSet(), randomOrdering()); assertThat(breaker.getUsed(), greaterThan(0L)); releaseAndAssertBreaker(block); @@ -289,7 +289,7 @@ public void testDoubleBlockBuilderWithPossiblyLargeEstimateEmpty() { var block = builder.build(); releaseAndAssertBreaker(block); - block = blockFactory.newDoubleArrayBlock(new double[] {}, 0, new int[] {}, new BitSet(), randomOrdering()); + block = blockFactory.newDoubleArrayBlock(new double[] {}, 0, new int[] { 0 }, new BitSet(), randomOrdering()); assertThat(breaker.getUsed(), greaterThan(0L)); releaseAndAssertBreaker(block); } @@ -301,7 +301,7 @@ public void testDoubleBlockBuilderWithPossiblyLargeEstimateSingle() { var block = builder.build(); releaseAndAssertBreaker(block); - block = blockFactory.newDoubleArrayBlock(new double[] { randomDouble() }, 1, new int[] {}, new BitSet(), randomOrdering()); + block = blockFactory.newDoubleArrayBlock(new double[] { randomDouble() }, 1, new int[] { 0, 1 }, new BitSet(), randomOrdering()); assertThat(breaker.getUsed(), greaterThan(0L)); releaseAndAssertBreaker(block); @@ -379,7 +379,7 @@ public void testBooleanBlockBuilderWithPossiblyLargeEstimateEmpty() { var block = builder.build(); releaseAndAssertBreaker(block); - block = blockFactory.newBooleanArrayBlock(new boolean[] {}, 0, new int[] {}, new BitSet(), randomOrdering()); + block = blockFactory.newBooleanArrayBlock(new boolean[] {}, 0, new int[] { 0 }, new BitSet(), randomOrdering()); assertThat(breaker.getUsed(), greaterThan(0L)); releaseAndAssertBreaker(block); } @@ -391,7 +391,7 @@ public void testBooleanBlockBuilderWithPossiblyLargeEstimateSingle() { var block = builder.build(); releaseAndAssertBreaker(block); - block = blockFactory.newBooleanArrayBlock(new boolean[] { randomBoolean() }, 1, new int[] {}, new BitSet(), randomOrdering()); + block = blockFactory.newBooleanArrayBlock(new boolean[] { randomBoolean() }, 1, new int[] { 0, 1 }, new BitSet(), randomOrdering()); assertThat(breaker.getUsed(), greaterThan(0L)); releaseAndAssertBreaker(block); @@ -470,7 +470,7 @@ public void testBytesRefBlockBuilderWithPossiblyLargeEstimateEmpty() { releaseAndAssertBreaker(block); var emptyArray = new BytesRefArray(0, bigArrays); - block = blockFactory.newBytesRefArrayBlock(emptyArray, 0, new int[] {}, new BitSet(), randomOrdering()); + block = blockFactory.newBytesRefArrayBlock(emptyArray, 0, new int[] { 0 }, new BitSet(), randomOrdering()); assertThat(breaker.getUsed(), greaterThan(0L)); releaseAndAssertBreaker(block); } @@ -484,7 +484,7 @@ public void testBytesRefBlockBuilderWithPossiblyLargeEstimateSingle() { var array = new BytesRefArray(1, bigArrays); array.append(randomBytesRef()); - block = blockFactory.newBytesRefArrayBlock(array, 1, new int[] {}, new BitSet(), randomOrdering()); + block = blockFactory.newBytesRefArrayBlock(array, 1, new int[] { 0, 1 }, new BitSet(), randomOrdering()); assertThat(breaker.getUsed(), greaterThan(0L)); releaseAndAssertBreaker(block); @@ -568,7 +568,6 @@ public void testReleaseVector() { vector.close(); } assertTrue(vector.isReleased()); - assertTrue(vector.asBlock().isReleased()); assertThat(breaker.getUsed(), equalTo(0L)); } @@ -651,7 +650,7 @@ public void testAllowPassingBlockToDifferentContext() throws Exception { public void testOwningFactoryOfVectorBlock() { BlockFactory parentFactory = blockFactory(ByteSizeValue.ofBytes(between(1024, 4096))); LocalCircuitBreaker localBreaker = new LocalCircuitBreaker(parentFactory.breaker(), between(0, 1024), between(0, 1024)); - BlockFactory localFactory = new BlockFactory(localBreaker, bigArrays, parentFactory); + BlockFactory localFactory = parentFactory.newChildFactory(localBreaker); int numValues = between(2, 10); try (var builder = localFactory.newIntVectorBuilder(numValues)) { for (int i = 0; i < numValues; i++) { diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockSerializationTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockSerializationTests.java index e44697ab8534c..b13aa040f307d 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockSerializationTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockSerializationTests.java @@ -30,129 +30,179 @@ public class BlockSerializationTests extends SerializationTestCase { public void testConstantIntBlock() throws IOException { - assertConstantBlockImpl(IntBlock.newConstantBlockWith(randomInt(), randomIntBetween(1, 8192))); + assertConstantBlockImpl(blockFactory.newConstantIntBlockWith(randomInt(), randomIntBetween(1, 8192))); } public void testConstantLongBlockLong() throws IOException { - assertConstantBlockImpl(LongBlock.newConstantBlockWith(randomLong(), randomIntBetween(1, 8192))); + assertConstantBlockImpl(blockFactory.newConstantLongBlockWith(randomLong(), randomIntBetween(1, 8192))); } public void testConstantDoubleBlock() throws IOException { - assertConstantBlockImpl(DoubleBlock.newConstantBlockWith(randomDouble(), randomIntBetween(1, 8192))); + assertConstantBlockImpl(blockFactory.newConstantDoubleBlockWith(randomDouble(), randomIntBetween(1, 8192))); } public void testConstantBytesRefBlock() throws IOException { - Block block = BytesRefBlock.newConstantBlockWith(new BytesRef(((Integer) randomInt()).toString()), randomIntBetween(1, 8192)); + Block block = blockFactory.newConstantBytesRefBlockWith( + new BytesRef(((Integer) randomInt()).toString()), + randomIntBetween(1, 8192) + ); assertConstantBlockImpl(block); } private void assertConstantBlockImpl(Block origBlock) throws IOException { assertThat(origBlock.asVector().isConstant(), is(true)); - try (Block deserBlock = serializeDeserializeBlock(origBlock)) { + try (origBlock; Block deserBlock = serializeDeserializeBlock(origBlock)) { EqualsHashCodeTestUtils.checkEqualsAndHashCode(origBlock, unused -> deserBlock); assertThat(deserBlock.asVector().isConstant(), is(true)); } } public void testEmptyIntBlock() throws IOException { - assertEmptyBlock(IntBlock.newBlockBuilder(0).build()); - assertEmptyBlock(IntBlock.newBlockBuilder(0).appendNull().build().filter()); - assertEmptyBlock(IntVector.newVectorBuilder(0).build().asBlock()); - assertEmptyBlock(IntVector.newVectorBuilder(0).appendInt(randomInt()).build().filter().asBlock()); + assertEmptyBlock(blockFactory.newIntBlockBuilder(0).build()); + try (IntBlock toFilter = blockFactory.newIntBlockBuilder(0).appendNull().build()) { + assertEmptyBlock(toFilter.filter()); + } + assertEmptyBlock(blockFactory.newIntVectorBuilder(0).build().asBlock()); + try (IntVector toFilter = blockFactory.newIntVectorBuilder(0).appendInt(randomInt()).build()) { + assertEmptyBlock(toFilter.filter().asBlock()); + } } public void testEmptyLongBlock() throws IOException { - assertEmptyBlock(LongBlock.newBlockBuilder(0).build()); - assertEmptyBlock(LongBlock.newBlockBuilder(0).appendNull().build().filter()); - assertEmptyBlock(LongVector.newVectorBuilder(0).build().asBlock()); - assertEmptyBlock(LongVector.newVectorBuilder(0).appendLong(randomLong()).build().filter().asBlock()); + assertEmptyBlock(blockFactory.newLongBlockBuilder(0).build()); + try (LongBlock toFilter = blockFactory.newLongBlockBuilder(0).appendNull().build()) { + assertEmptyBlock(toFilter.filter()); + } + assertEmptyBlock(blockFactory.newLongVectorBuilder(0).build().asBlock()); + try (LongVector toFilter = blockFactory.newLongVectorBuilder(0).appendLong(randomLong()).build()) { + assertEmptyBlock(toFilter.filter().asBlock()); + } } public void testEmptyDoubleBlock() throws IOException { - assertEmptyBlock(DoubleBlock.newBlockBuilder(0).build()); - assertEmptyBlock(DoubleBlock.newBlockBuilder(0).appendNull().build().filter()); - assertEmptyBlock(DoubleVector.newVectorBuilder(0).build().asBlock()); - assertEmptyBlock(DoubleVector.newVectorBuilder(0).appendDouble(randomDouble()).build().filter().asBlock()); + assertEmptyBlock(blockFactory.newDoubleBlockBuilder(0).build()); + try (DoubleBlock toFilter = blockFactory.newDoubleBlockBuilder(0).appendNull().build()) { + assertEmptyBlock(toFilter.filter()); + } + assertEmptyBlock(blockFactory.newDoubleVectorBuilder(0).build().asBlock()); + try (DoubleVector toFilter = blockFactory.newDoubleVectorBuilder(0).appendDouble(randomDouble()).build()) { + assertEmptyBlock(toFilter.filter().asBlock()); + } } public void testEmptyBytesRefBlock() throws IOException { - assertEmptyBlock(BytesRefBlock.newBlockBuilder(0).build()); - assertEmptyBlock(BytesRefBlock.newBlockBuilder(0).appendNull().build().filter()); - assertEmptyBlock(BytesRefVector.newVectorBuilder(0).build().asBlock()); - assertEmptyBlock(BytesRefVector.newVectorBuilder(0).appendBytesRef(randomBytesRef()).build().filter().asBlock()); + assertEmptyBlock(blockFactory.newBytesRefBlockBuilder(0).build()); + try (BytesRefBlock toFilter = blockFactory.newBytesRefBlockBuilder(0).appendNull().build()) { + assertEmptyBlock(toFilter.filter()); + } + assertEmptyBlock(blockFactory.newBytesRefVectorBuilder(0).build().asBlock()); + try (BytesRefVector toFilter = blockFactory.newBytesRefVectorBuilder(0).appendBytesRef(randomBytesRef()).build()) { + assertEmptyBlock(toFilter.filter().asBlock()); + } } private void assertEmptyBlock(Block origBlock) throws IOException { assertThat(origBlock.getPositionCount(), is(0)); - try (Block deserBlock = serializeDeserializeBlock(origBlock)) { + try (origBlock; Block deserBlock = serializeDeserializeBlock(origBlock)) { EqualsHashCodeTestUtils.checkEqualsAndHashCode(origBlock, unused -> deserBlock); } } public void testFilterIntBlock() throws IOException { - assertFilterBlock(IntBlock.newBlockBuilder(0).appendInt(1).appendInt(2).build().filter(1)); - assertFilterBlock(IntBlock.newBlockBuilder(1).appendInt(randomInt()).appendNull().build().filter(0)); - assertFilterBlock(IntVector.newVectorBuilder(1).appendInt(randomInt()).build().filter(0).asBlock()); - assertFilterBlock(IntVector.newVectorBuilder(1).appendInt(randomInt()).appendInt(randomInt()).build().filter(0).asBlock()); + try (IntBlock toFilter = blockFactory.newIntBlockBuilder(0).appendInt(1).appendInt(2).build()) { + assertFilterBlock(toFilter.filter(1)); + } + try (IntBlock toFilter = blockFactory.newIntBlockBuilder(1).appendInt(randomInt()).appendNull().build()) { + assertFilterBlock(toFilter.filter(0)); + } + try (IntVector toFilter = blockFactory.newIntVectorBuilder(1).appendInt(randomInt()).build()) { + assertFilterBlock(toFilter.filter(0).asBlock()); + } + try (IntVector toFilter = blockFactory.newIntVectorBuilder(1).appendInt(randomInt()).appendInt(randomInt()).build()) { + assertFilterBlock(toFilter.filter(0).asBlock()); + } } public void testFilterLongBlock() throws IOException { - assertFilterBlock(LongBlock.newBlockBuilder(0).appendLong(1).appendLong(2).build().filter(1)); - assertFilterBlock(LongBlock.newBlockBuilder(1).appendLong(randomLong()).appendNull().build().filter(0)); - assertFilterBlock(LongVector.newVectorBuilder(1).appendLong(randomLong()).build().filter(0).asBlock()); - assertFilterBlock(LongVector.newVectorBuilder(1).appendLong(randomLong()).appendLong(randomLong()).build().filter(0).asBlock()); + try (LongBlock toFilter = blockFactory.newLongBlockBuilder(0).appendLong(1).appendLong(2).build()) { + assertFilterBlock(toFilter.filter(1)); + } + try (LongBlock toFilter = blockFactory.newLongBlockBuilder(1).appendLong(randomLong()).appendNull().build()) { + assertFilterBlock(toFilter.filter(0)); + } + try (LongVector toFilter = blockFactory.newLongVectorBuilder(1).appendLong(randomLong()).build()) { + assertFilterBlock(toFilter.filter(0).asBlock()); + } + try (LongVector toFilter = blockFactory.newLongVectorBuilder(1).appendLong(randomLong()).appendLong(randomLong()).build()) { + assertFilterBlock(toFilter.filter(0).asBlock()); + } } public void testFilterDoubleBlock() throws IOException { - assertFilterBlock(DoubleBlock.newBlockBuilder(0).appendDouble(1).appendDouble(2).build().filter(1)); - assertFilterBlock(DoubleBlock.newBlockBuilder(1).appendDouble(randomDouble()).appendNull().build().filter(0)); - assertFilterBlock(DoubleVector.newVectorBuilder(1).appendDouble(randomDouble()).build().filter(0).asBlock()); - assertFilterBlock( - DoubleVector.newVectorBuilder(1).appendDouble(randomDouble()).appendDouble(randomDouble()).build().filter(0).asBlock() - ); + try (DoubleBlock toFilter = blockFactory.newDoubleBlockBuilder(0).appendDouble(1).appendDouble(2).build()) { + assertFilterBlock(toFilter.filter(1)); + } + try (DoubleBlock toFilter = blockFactory.newDoubleBlockBuilder(1).appendDouble(randomDouble()).appendNull().build()) { + assertFilterBlock(toFilter.filter(0)); + } + try (DoubleVector toFilter = blockFactory.newDoubleVectorBuilder(1).appendDouble(randomDouble()).build()) { + assertFilterBlock(toFilter.filter(0).asBlock()); + + } + try ( + DoubleVector toFilter = blockFactory.newDoubleVectorBuilder(1).appendDouble(randomDouble()).appendDouble(randomDouble()).build() + ) { + assertFilterBlock(toFilter.filter(0).asBlock()); + } } public void testFilterBytesRefBlock() throws IOException { - assertFilterBlock( - BytesRefBlock.newBlockBuilder(0) + try ( + BytesRefBlock toFilter = blockFactory.newBytesRefBlockBuilder(0) .appendBytesRef(randomBytesRef()) .appendBytesRef(randomBytesRef()) .build() - .filter(randomIntBetween(0, 1)) - ); - assertFilterBlock( - BytesRefBlock.newBlockBuilder(0).appendBytesRef(randomBytesRef()).appendNull().build().filter(randomIntBetween(0, 1)) - ); - assertFilterBlock(BytesRefVector.newVectorBuilder(0).appendBytesRef(randomBytesRef()).build().asBlock().filter(0)); - assertFilterBlock( - BytesRefVector.newVectorBuilder(0) + ) { + assertFilterBlock(toFilter.filter(randomIntBetween(0, 1))); + } + + try (BytesRefBlock toFilter = blockFactory.newBytesRefBlockBuilder(0).appendBytesRef(randomBytesRef()).appendNull().build()) { + assertFilterBlock(toFilter.filter(randomIntBetween(0, 1))); + } + + try (BytesRefVector toFilter = blockFactory.newBytesRefVectorBuilder(0).appendBytesRef(randomBytesRef()).build()) { + assertFilterBlock(toFilter.asBlock().filter(0)); + } + try ( + BytesRefVector toFilter = blockFactory.newBytesRefVectorBuilder(0) .appendBytesRef(randomBytesRef()) .appendBytesRef(randomBytesRef()) .build() - .asBlock() - .filter(randomIntBetween(0, 1)) - ); + ) { + assertFilterBlock(toFilter.asBlock().filter(randomIntBetween(0, 1))); + } } private void assertFilterBlock(Block origBlock) throws IOException { assertThat(origBlock.getPositionCount(), is(1)); - try (Block deserBlock = serializeDeserializeBlock(origBlock)) { + try (origBlock; Block deserBlock = serializeDeserializeBlock(origBlock)) { EqualsHashCodeTestUtils.checkEqualsAndHashCode(origBlock, unused -> deserBlock); assertThat(deserBlock.getPositionCount(), is(1)); } } public void testConstantNullBlock() throws IOException { - Block origBlock = new ConstantNullBlock(randomIntBetween(1, 8192)); - try (Block deserBlock = serializeDeserializeBlock(origBlock)) { - EqualsHashCodeTestUtils.checkEqualsAndHashCode(origBlock, unused -> deserBlock); + try (Block origBlock = blockFactory.newConstantNullBlock(randomIntBetween(1, 8192))) { + try (Block deserBlock = serializeDeserializeBlock(origBlock)) { + EqualsHashCodeTestUtils.checkEqualsAndHashCode(origBlock, unused -> deserBlock); + } } } // TODO: more types, grouping, etc... public void testSimulateAggs() { DriverContext driverCtx = driverContext(); - Page page = new Page(new LongArrayVector(new long[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }, 10).asBlock()); + Page page = new Page(blockFactory.newLongArrayVector(new long[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }, 10).asBlock()); var bigArrays = BigArrays.NON_RECYCLING_INSTANCE; var params = new Object[] {}; var function = SumLongAggregatorFunction.create(driverCtx, List.of(0)); @@ -167,18 +217,20 @@ public void testSimulateAggs() { .forEach(i -> EqualsHashCodeTestUtils.checkEqualsAndHashCode(blocks[i], unused -> deserBlocks[i])); var inputChannels = IntStream.range(0, SumLongAggregatorFunction.intermediateStateDesc().size()).boxed().toList(); - var finalAggregator = SumLongAggregatorFunction.create(driverCtx, inputChannels); - finalAggregator.addIntermediateInput(new Page(deserBlocks)); - Block[] finalBlocks = new Block[1]; - finalAggregator.evaluateFinal(finalBlocks, 0, driverCtx); - try (var finalBlock = (LongBlock) finalBlocks[0]) { - assertThat(finalBlock.getLong(0), is(55L)); + try (var finalAggregator = SumLongAggregatorFunction.create(driverCtx, inputChannels)) { + finalAggregator.addIntermediateInput(new Page(deserBlocks)); + Block[] finalBlocks = new Block[1]; + finalAggregator.evaluateFinal(finalBlocks, 0, driverCtx); + try (var finalBlock = (LongBlock) finalBlocks[0]) { + assertThat(finalBlock.getLong(0), is(55L)); + } } } finally { Releasables.close(deserBlocks); } } finally { Releasables.close(blocks); + page.releaseBlocks(); } } @@ -201,6 +253,6 @@ protected final BigArrays nonBreakingBigArrays() { * A {@link DriverContext} with a nonBreakingBigArrays. */ protected DriverContext driverContext() { // TODO make this final and return a breaking block factory - return new DriverContext(nonBreakingBigArrays(), BlockFactory.getNonBreakingInstance()); + return new DriverContext(nonBreakingBigArrays(), TestBlockFactory.getNonBreakingInstance()); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockTestUtils.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockTestUtils.java index 82fb784846c72..c1c2c8845a962 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockTestUtils.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockTestUtils.java @@ -32,7 +32,7 @@ public static Object randomValue(ElementType e) { case INT -> randomInt(); case LONG -> randomLong(); case DOUBLE -> randomDouble(); - case BYTES_REF -> new BytesRef(randomRealisticUnicodeOfCodepointLengthBetween(0, 5)); + case BYTES_REF -> new BytesRef(randomRealisticUnicodeOfCodepointLengthBetween(0, 5)); // TODO: also test spatial WKB case BOOLEAN -> randomBoolean(); case DOC -> new BlockUtils.Doc(randomInt(), randomInt(), between(0, Integer.MAX_VALUE)); case NULL -> null; diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BooleanBlockEqualityTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BooleanBlockEqualityTests.java index ae0d56d8612ce..3e2322200dcf0 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BooleanBlockEqualityTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BooleanBlockEqualityTests.java @@ -14,15 +14,17 @@ public class BooleanBlockEqualityTests extends ESTestCase { + static final BlockFactory blockFactory = TestBlockFactory.getNonBreakingInstance(); + public void testEmptyVector() { // all these "empty" vectors should be equivalent List vectors = List.of( - new BooleanArrayVector(new boolean[] {}, 0), - new BooleanArrayVector(new boolean[] { randomBoolean() }, 0), - BooleanBlock.newConstantBlockWith(randomBoolean(), 0).asVector(), - BooleanBlock.newConstantBlockWith(randomBoolean(), 0).filter().asVector(), - BooleanBlock.newBlockBuilder(0).build().asVector(), - BooleanBlock.newBlockBuilder(0).appendBoolean(randomBoolean()).build().asVector().filter() + blockFactory.newBooleanArrayVector(new boolean[] {}, 0), + blockFactory.newBooleanArrayVector(new boolean[] { randomBoolean() }, 0), + blockFactory.newConstantBooleanBlockWith(randomBoolean(), 0).asVector(), + blockFactory.newConstantBooleanBlockWith(randomBoolean(), 0).filter().asVector(), + blockFactory.newBooleanBlockBuilder(0).build().asVector(), + blockFactory.newBooleanBlockBuilder(0).appendBoolean(randomBoolean()).build().asVector().filter() ); assertAllEquals(vectors); } @@ -33,21 +35,23 @@ public void testEmptyBlock() { new BooleanArrayBlock( new boolean[] {}, 0, - new int[] {}, + new int[] { 0 }, BitSet.valueOf(new byte[] { 0b00 }), - randomFrom(Block.MvOrdering.values()) + randomFrom(Block.MvOrdering.values()), + blockFactory ), new BooleanArrayBlock( new boolean[] { randomBoolean() }, 0, - new int[] {}, + new int[] { 0 }, BitSet.valueOf(new byte[] { 0b00 }), - randomFrom(Block.MvOrdering.values()) + randomFrom(Block.MvOrdering.values()), + blockFactory ), - BooleanBlock.newConstantBlockWith(randomBoolean(), 0), - BooleanBlock.newBlockBuilder(0).build(), - BooleanBlock.newBlockBuilder(0).appendBoolean(randomBoolean()).build().filter(), - BooleanBlock.newBlockBuilder(0).appendNull().build().filter() + blockFactory.newConstantBooleanBlockWith(randomBoolean(), 0), + blockFactory.newBooleanBlockBuilder(0).build(), + blockFactory.newBooleanBlockBuilder(0).appendBoolean(randomBoolean()).build().filter(), + blockFactory.newBooleanBlockBuilder(0).appendNull().build().filter() ); assertAllEquals(blocks); } @@ -55,16 +59,16 @@ public void testEmptyBlock() { public void testVectorEquality() { // all these vectors should be equivalent List vectors = List.of( - new BooleanArrayVector(new boolean[] { true, false, true }, 3), - new BooleanArrayVector(new boolean[] { true, false, true }, 3).asBlock().asVector(), - new BooleanArrayVector(new boolean[] { true, false, true, false }, 3), - new BooleanArrayVector(new boolean[] { true, false, true }, 3).filter(0, 1, 2), - new BooleanArrayVector(new boolean[] { true, false, true, false }, 4).filter(0, 1, 2), - new BooleanArrayVector(new boolean[] { false, true, false, true }, 4).filter(1, 2, 3), - new BooleanArrayVector(new boolean[] { true, true, false, true }, 4).filter(0, 2, 3), - BooleanBlock.newBlockBuilder(3).appendBoolean(true).appendBoolean(false).appendBoolean(true).build().asVector(), - BooleanBlock.newBlockBuilder(3).appendBoolean(true).appendBoolean(false).appendBoolean(true).build().asVector().filter(0, 1, 2), - BooleanBlock.newBlockBuilder(3) + blockFactory.newBooleanArrayVector(new boolean[] { true, false, true }, 3), + blockFactory.newBooleanArrayVector(new boolean[] { true, false, true }, 3).asBlock().asVector(), + blockFactory.newBooleanArrayVector(new boolean[] { true, false, true, false }, 3), + blockFactory.newBooleanArrayVector(new boolean[] { true, false, true }, 3).filter(0, 1, 2), + blockFactory.newBooleanArrayVector(new boolean[] { true, false, true, false }, 4).filter(0, 1, 2), + blockFactory.newBooleanArrayVector(new boolean[] { false, true, false, true }, 4).filter(1, 2, 3), + blockFactory.newBooleanArrayVector(new boolean[] { true, true, false, true }, 4).filter(0, 2, 3), + blockFactory.newBooleanVectorBuilder(3).appendBoolean(true).appendBoolean(false).appendBoolean(true).build(), + blockFactory.newBooleanVectorBuilder(3).appendBoolean(true).appendBoolean(false).appendBoolean(true).build().filter(0, 1, 2), + blockFactory.newBooleanBlockBuilder(3) .appendBoolean(true) .appendBoolean(true) .appendBoolean(false) @@ -72,7 +76,7 @@ public void testVectorEquality() { .build() .filter(0, 2, 3) .asVector(), - BooleanBlock.newBlockBuilder(3) + blockFactory.newBooleanBlockBuilder(3) .appendBoolean(true) .appendBoolean(true) .appendBoolean(false) @@ -85,17 +89,23 @@ public void testVectorEquality() { // all these constant-like vectors should be equivalent List moreVectors = List.of( - new BooleanArrayVector(new boolean[] { true, true, true }, 3), - new BooleanArrayVector(new boolean[] { true, true, true }, 3).asBlock().asVector(), - new BooleanArrayVector(new boolean[] { true, true, true, true }, 3), - new BooleanArrayVector(new boolean[] { true, true, true }, 3).filter(0, 1, 2), - new BooleanArrayVector(new boolean[] { true, true, true, false }, 4).filter(0, 1, 2), - new BooleanArrayVector(new boolean[] { false, true, true, true }, 4).filter(1, 2, 3), - new BooleanArrayVector(new boolean[] { true, false, true, true }, 4).filter(0, 2, 3), - BooleanBlock.newConstantBlockWith(true, 3).asVector(), - BooleanBlock.newBlockBuilder(3).appendBoolean(true).appendBoolean(true).appendBoolean(true).build().asVector(), - BooleanBlock.newBlockBuilder(3).appendBoolean(true).appendBoolean(true).appendBoolean(true).build().asVector().filter(0, 1, 2), - BooleanBlock.newBlockBuilder(3) + blockFactory.newBooleanArrayVector(new boolean[] { true, true, true }, 3), + blockFactory.newBooleanArrayVector(new boolean[] { true, true, true }, 3).asBlock().asVector(), + blockFactory.newBooleanArrayVector(new boolean[] { true, true, true, true }, 3), + blockFactory.newBooleanArrayVector(new boolean[] { true, true, true }, 3).filter(0, 1, 2), + blockFactory.newBooleanArrayVector(new boolean[] { true, true, true, false }, 4).filter(0, 1, 2), + blockFactory.newBooleanArrayVector(new boolean[] { false, true, true, true }, 4).filter(1, 2, 3), + blockFactory.newBooleanArrayVector(new boolean[] { true, false, true, true }, 4).filter(0, 2, 3), + blockFactory.newConstantBooleanBlockWith(true, 3).asVector(), + blockFactory.newBooleanBlockBuilder(3).appendBoolean(true).appendBoolean(true).appendBoolean(true).build().asVector(), + blockFactory.newBooleanBlockBuilder(3) + .appendBoolean(true) + .appendBoolean(true) + .appendBoolean(true) + .build() + .asVector() + .filter(0, 1, 2), + blockFactory.newBooleanBlockBuilder(3) .appendBoolean(true) .appendBoolean(false) .appendBoolean(true) @@ -103,7 +113,7 @@ public void testVectorEquality() { .build() .filter(0, 2, 3) .asVector(), - BooleanBlock.newBlockBuilder(3) + blockFactory.newBooleanBlockBuilder(3) .appendBoolean(true) .appendBoolean(false) .appendBoolean(true) @@ -118,35 +128,37 @@ public void testVectorEquality() { public void testBlockEquality() { // all these blocks should be equivalent List blocks = List.of( - new BooleanArrayVector(new boolean[] { true, false, true }, 3).asBlock(), + blockFactory.newBooleanArrayVector(new boolean[] { true, false, true }, 3).asBlock(), new BooleanArrayBlock( new boolean[] { true, false, true }, 3, new int[] { 0, 1, 2, 3 }, BitSet.valueOf(new byte[] { 0b000 }), - randomFrom(Block.MvOrdering.values()) + randomFrom(Block.MvOrdering.values()), + blockFactory ), new BooleanArrayBlock( new boolean[] { true, false, true, false }, 3, new int[] { 0, 1, 2, 3 }, BitSet.valueOf(new byte[] { 0b1000 }), - randomFrom(Block.MvOrdering.values()) + randomFrom(Block.MvOrdering.values()), + blockFactory ), - new BooleanArrayVector(new boolean[] { true, false, true }, 3).filter(0, 1, 2).asBlock(), - new BooleanArrayVector(new boolean[] { true, false, true, false }, 3).filter(0, 1, 2).asBlock(), - new BooleanArrayVector(new boolean[] { true, false, true, false }, 4).filter(0, 1, 2).asBlock(), - new BooleanArrayVector(new boolean[] { true, false, false, true }, 4).filter(0, 1, 3).asBlock(), - BooleanBlock.newBlockBuilder(3).appendBoolean(true).appendBoolean(false).appendBoolean(true).build(), - BooleanBlock.newBlockBuilder(3).appendBoolean(true).appendBoolean(false).appendBoolean(true).build().filter(0, 1, 2), - BooleanBlock.newBlockBuilder(3) + blockFactory.newBooleanArrayVector(new boolean[] { true, false, true }, 3).filter(0, 1, 2).asBlock(), + blockFactory.newBooleanArrayVector(new boolean[] { true, false, true, false }, 3).filter(0, 1, 2).asBlock(), + blockFactory.newBooleanArrayVector(new boolean[] { true, false, true, false }, 4).filter(0, 1, 2).asBlock(), + blockFactory.newBooleanArrayVector(new boolean[] { true, false, false, true }, 4).filter(0, 1, 3).asBlock(), + blockFactory.newBooleanBlockBuilder(3).appendBoolean(true).appendBoolean(false).appendBoolean(true).build(), + blockFactory.newBooleanBlockBuilder(3).appendBoolean(true).appendBoolean(false).appendBoolean(true).build().filter(0, 1, 2), + blockFactory.newBooleanBlockBuilder(3) .appendBoolean(true) .appendBoolean(true) .appendBoolean(false) .appendBoolean(true) .build() .filter(0, 2, 3), - BooleanBlock.newBlockBuilder(3) + blockFactory.newBooleanBlockBuilder(3) .appendBoolean(true) .appendNull() .appendBoolean(false) @@ -158,30 +170,32 @@ public void testBlockEquality() { // all these constant-like blocks should be equivalent List moreBlocks = List.of( - new BooleanArrayVector(new boolean[] { true, true }, 2).asBlock(), + blockFactory.newBooleanArrayVector(new boolean[] { true, true }, 2).asBlock(), new BooleanArrayBlock( new boolean[] { true, true }, 2, new int[] { 0, 1, 2 }, BitSet.valueOf(new byte[] { 0b000 }), - randomFrom(Block.MvOrdering.values()) + randomFrom(Block.MvOrdering.values()), + blockFactory ), new BooleanArrayBlock( new boolean[] { true, true, false }, 2, new int[] { 0, 1, 2 }, BitSet.valueOf(new byte[] { 0b100 }), - randomFrom(Block.MvOrdering.values()) + randomFrom(Block.MvOrdering.values()), + blockFactory ), - new BooleanArrayVector(new boolean[] { true, true }, 2).filter(0, 1).asBlock(), - new BooleanArrayVector(new boolean[] { true, true, false }, 2).filter(0, 1).asBlock(), - new BooleanArrayVector(new boolean[] { true, true, false }, 3).filter(0, 1).asBlock(), - new BooleanArrayVector(new boolean[] { true, false, true }, 3).filter(0, 2).asBlock(), - BooleanBlock.newConstantBlockWith(true, 2), - BooleanBlock.newBlockBuilder(2).appendBoolean(true).appendBoolean(true).build(), - BooleanBlock.newBlockBuilder(2).appendBoolean(true).appendBoolean(true).build().filter(0, 1), - BooleanBlock.newBlockBuilder(2).appendBoolean(true).appendBoolean(true).appendBoolean(true).build().filter(0, 2), - BooleanBlock.newBlockBuilder(2).appendBoolean(true).appendNull().appendBoolean(true).build().filter(0, 2) + blockFactory.newBooleanArrayVector(new boolean[] { true, true }, 2).filter(0, 1).asBlock(), + blockFactory.newBooleanArrayVector(new boolean[] { true, true, false }, 2).filter(0, 1).asBlock(), + blockFactory.newBooleanArrayVector(new boolean[] { true, true, false }, 3).filter(0, 1).asBlock(), + blockFactory.newBooleanArrayVector(new boolean[] { true, false, true }, 3).filter(0, 2).asBlock(), + blockFactory.newConstantBooleanBlockWith(true, 2), + blockFactory.newBooleanBlockBuilder(2).appendBoolean(true).appendBoolean(true).build(), + blockFactory.newBooleanBlockBuilder(2).appendBoolean(true).appendBoolean(true).build().filter(0, 1), + blockFactory.newBooleanBlockBuilder(2).appendBoolean(true).appendBoolean(true).appendBoolean(true).build().filter(0, 2), + blockFactory.newBooleanBlockBuilder(2).appendBoolean(true).appendNull().appendBoolean(true).build().filter(0, 2) ); assertAllEquals(moreBlocks); } @@ -189,15 +203,15 @@ public void testBlockEquality() { public void testVectorInequality() { // all these vectors should NOT be equivalent List notEqualVectors = List.of( - new BooleanArrayVector(new boolean[] { true }, 1), - new BooleanArrayVector(new boolean[] { false }, 1), - new BooleanArrayVector(new boolean[] { true, false }, 2), - new BooleanArrayVector(new boolean[] { true, false, true }, 3), - new BooleanArrayVector(new boolean[] { false, true, false }, 3), - BooleanBlock.newConstantBlockWith(true, 2).asVector(), - BooleanBlock.newBlockBuilder(2).appendBoolean(false).appendBoolean(true).build().asVector(), - BooleanBlock.newBlockBuilder(3).appendBoolean(false).appendBoolean(false).appendBoolean(true).build().asVector(), - BooleanBlock.newBlockBuilder(1) + blockFactory.newBooleanArrayVector(new boolean[] { true }, 1), + blockFactory.newBooleanArrayVector(new boolean[] { false }, 1), + blockFactory.newBooleanArrayVector(new boolean[] { true, false }, 2), + blockFactory.newBooleanArrayVector(new boolean[] { true, false, true }, 3), + blockFactory.newBooleanArrayVector(new boolean[] { false, true, false }, 3), + blockFactory.newConstantBooleanBlockWith(true, 2).asVector(), + blockFactory.newBooleanBlockBuilder(2).appendBoolean(false).appendBoolean(true).build().asVector(), + blockFactory.newBooleanBlockBuilder(3).appendBoolean(false).appendBoolean(false).appendBoolean(true).build().asVector(), + blockFactory.newBooleanBlockBuilder(1) .appendBoolean(false) .appendBoolean(false) .appendBoolean(false) @@ -211,18 +225,28 @@ public void testVectorInequality() { public void testBlockInequality() { // all these blocks should NOT be equivalent List notEqualBlocks = List.of( - new BooleanArrayVector(new boolean[] { false }, 1).asBlock(), - new BooleanArrayVector(new boolean[] { true }, 1).asBlock(), - new BooleanArrayVector(new boolean[] { false, true }, 2).asBlock(), - new BooleanArrayVector(new boolean[] { false, true, false }, 3).asBlock(), - new BooleanArrayVector(new boolean[] { false, false, true }, 3).asBlock(), - BooleanBlock.newConstantBlockWith(true, 2), - BooleanBlock.newBlockBuilder(3).appendBoolean(true).appendBoolean(false).appendBoolean(false).build(), - BooleanBlock.newBlockBuilder(1).appendBoolean(true).appendBoolean(false).appendBoolean(true).appendBoolean(false).build(), - BooleanBlock.newBlockBuilder(1).appendBoolean(true).appendNull().build(), - BooleanBlock.newBlockBuilder(1).appendBoolean(true).appendNull().appendBoolean(false).build(), - BooleanBlock.newBlockBuilder(1).appendBoolean(true).appendBoolean(false).build(), - BooleanBlock.newBlockBuilder(3).appendBoolean(true).beginPositionEntry().appendBoolean(false).appendBoolean(false).build() + blockFactory.newBooleanArrayVector(new boolean[] { false }, 1).asBlock(), + blockFactory.newBooleanArrayVector(new boolean[] { true }, 1).asBlock(), + blockFactory.newBooleanArrayVector(new boolean[] { false, true }, 2).asBlock(), + blockFactory.newBooleanArrayVector(new boolean[] { false, true, false }, 3).asBlock(), + blockFactory.newBooleanArrayVector(new boolean[] { false, false, true }, 3).asBlock(), + blockFactory.newConstantBooleanBlockWith(true, 2), + blockFactory.newBooleanBlockBuilder(3).appendBoolean(true).appendBoolean(false).appendBoolean(false).build(), + blockFactory.newBooleanBlockBuilder(1) + .appendBoolean(true) + .appendBoolean(false) + .appendBoolean(true) + .appendBoolean(false) + .build(), + blockFactory.newBooleanBlockBuilder(1).appendBoolean(true).appendNull().build(), + blockFactory.newBooleanBlockBuilder(1).appendBoolean(true).appendNull().appendBoolean(false).build(), + blockFactory.newBooleanBlockBuilder(1).appendBoolean(true).appendBoolean(false).build(), + blockFactory.newBooleanBlockBuilder(3) + .appendBoolean(true) + .beginPositionEntry() + .appendBoolean(false) + .appendBoolean(false) + .build() ); assertAllNotEquals(notEqualBlocks); } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BytesRefBlockEqualityTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BytesRefBlockEqualityTests.java index ee654497c1ec3..6b3fa5df9b9ff 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BytesRefBlockEqualityTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BytesRefBlockEqualityTests.java @@ -12,27 +12,28 @@ import org.elasticsearch.common.util.BytesRefArray; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.compute.operator.ComputeTestCase; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; -import org.elasticsearch.test.ESTestCase; import java.util.Arrays; import java.util.BitSet; import java.util.List; -public class BytesRefBlockEqualityTests extends ESTestCase { +public class BytesRefBlockEqualityTests extends ComputeTestCase { final BigArrays bigArrays = new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, new NoneCircuitBreakerService()); + final BlockFactory blockFactory = TestBlockFactory.getNonBreakingInstance(); public void testEmptyVector() { // all these "empty" vectors should be equivalent try (var bytesRefArray1 = new BytesRefArray(0, bigArrays); var bytesRefArray2 = new BytesRefArray(1, bigArrays)) { List vectors = List.of( - new BytesRefArrayVector(bytesRefArray1, 0), - new BytesRefArrayVector(bytesRefArray2, 0), - BytesRefBlock.newConstantBlockWith(new BytesRef(), 0).asVector(), - BytesRefBlock.newConstantBlockWith(new BytesRef(), 0).filter().asVector(), - BytesRefBlock.newBlockBuilder(0).build().asVector(), - BytesRefBlock.newBlockBuilder(0).appendBytesRef(new BytesRef()).build().asVector().filter() + new BytesRefArrayVector(bytesRefArray1, 0, blockFactory), + new BytesRefArrayVector(bytesRefArray2, 0, blockFactory), + blockFactory.newConstantBytesRefBlockWith(new BytesRef(), 0).asVector(), + blockFactory.newConstantBytesRefBlockWith(new BytesRef(), 0).filter().asVector(), + blockFactory.newBytesRefBlockBuilder(0).build().asVector(), + blockFactory.newBytesRefBlockBuilder(0).appendBytesRef(new BytesRef()).build().asVector().filter() ); assertAllEquals(vectors); } @@ -45,21 +46,23 @@ public void testEmptyBlock() { new BytesRefArrayBlock( bytesRefArray1, 0, - new int[] {}, + new int[] { 0 }, BitSet.valueOf(new byte[] { 0b00 }), - randomFrom(Block.MvOrdering.values()) + randomFrom(Block.MvOrdering.values()), + blockFactory ), new BytesRefArrayBlock( bytesRefArray2, 0, - new int[] {}, + new int[] { 0 }, BitSet.valueOf(new byte[] { 0b00 }), - randomFrom(Block.MvOrdering.values()) + randomFrom(Block.MvOrdering.values()), + blockFactory ), - BytesRefBlock.newConstantBlockWith(new BytesRef(), 0), - BytesRefBlock.newBlockBuilder(0).build(), - BytesRefBlock.newBlockBuilder(0).appendBytesRef(new BytesRef()).build().filter(), - BytesRefBlock.newBlockBuilder(0).appendNull().build().filter() + blockFactory.newConstantBytesRefBlockWith(new BytesRef(), 0), + blockFactory.newBytesRefBlockBuilder(0).build(), + blockFactory.newBytesRefBlockBuilder(0).appendBytesRef(new BytesRef()).build().filter(), + blockFactory.newBytesRefBlockBuilder(0).appendNull().build().filter() ); assertAllEquals(blocks); } @@ -69,25 +72,25 @@ public void testVectorEquality() { // all these vectors should be equivalent try (var bytesRefArray1 = arrayOf("1", "2", "3"); var bytesRefArray2 = arrayOf("1", "2", "3", "4")) { List vectors = List.of( - new BytesRefArrayVector(bytesRefArray1, 3), - new BytesRefArrayVector(bytesRefArray1, 3).asBlock().asVector(), - new BytesRefArrayVector(bytesRefArray2, 3), - new BytesRefArrayVector(bytesRefArray1, 3).filter(0, 1, 2), - new BytesRefArrayVector(bytesRefArray2, 4).filter(0, 1, 2), - BytesRefBlock.newBlockBuilder(3) + new BytesRefArrayVector(bytesRefArray1, 3, blockFactory), + new BytesRefArrayVector(bytesRefArray1, 3, blockFactory).asBlock().asVector(), + new BytesRefArrayVector(bytesRefArray2, 3, blockFactory), + new BytesRefArrayVector(bytesRefArray1, 3, blockFactory).filter(0, 1, 2), + new BytesRefArrayVector(bytesRefArray2, 4, blockFactory).filter(0, 1, 2), + blockFactory.newBytesRefBlockBuilder(3) .appendBytesRef(new BytesRef("1")) .appendBytesRef(new BytesRef("2")) .appendBytesRef(new BytesRef("3")) .build() .asVector(), - BytesRefBlock.newBlockBuilder(3) + blockFactory.newBytesRefBlockBuilder(3) .appendBytesRef(new BytesRef("1")) .appendBytesRef(new BytesRef("2")) .appendBytesRef(new BytesRef("3")) .build() .asVector() .filter(0, 1, 2), - BytesRefBlock.newBlockBuilder(3) + blockFactory.newBytesRefBlockBuilder(3) .appendBytesRef(new BytesRef("1")) .appendBytesRef(new BytesRef("4")) .appendBytesRef(new BytesRef("2")) @@ -95,7 +98,7 @@ public void testVectorEquality() { .build() .filter(0, 2, 3) .asVector(), - BytesRefBlock.newBlockBuilder(3) + blockFactory.newBytesRefBlockBuilder(3) .appendBytesRef(new BytesRef("1")) .appendBytesRef(new BytesRef("4")) .appendBytesRef(new BytesRef("2")) @@ -110,26 +113,26 @@ public void testVectorEquality() { // all these constant-like vectors should be equivalent try (var bytesRefArray1 = arrayOf("1", "1", "1"); var bytesRefArray2 = arrayOf("1", "1", "1", "4")) { List moreVectors = List.of( - new BytesRefArrayVector(bytesRefArray1, 3), - new BytesRefArrayVector(bytesRefArray1, 3).asBlock().asVector(), - new BytesRefArrayVector(bytesRefArray2, 3), - new BytesRefArrayVector(bytesRefArray1, 3).filter(0, 1, 2), - new BytesRefArrayVector(bytesRefArray2, 4).filter(0, 1, 2), - BytesRefBlock.newConstantBlockWith(new BytesRef("1"), 3).asVector(), - BytesRefBlock.newBlockBuilder(3) + new BytesRefArrayVector(bytesRefArray1, 3, blockFactory), + new BytesRefArrayVector(bytesRefArray1, 3, blockFactory).asBlock().asVector(), + new BytesRefArrayVector(bytesRefArray2, 3, blockFactory), + new BytesRefArrayVector(bytesRefArray1, 3, blockFactory).filter(0, 1, 2), + new BytesRefArrayVector(bytesRefArray2, 4, blockFactory).filter(0, 1, 2), + blockFactory.newConstantBytesRefBlockWith(new BytesRef("1"), 3).asVector(), + blockFactory.newBytesRefBlockBuilder(3) .appendBytesRef(new BytesRef("1")) .appendBytesRef(new BytesRef("1")) .appendBytesRef(new BytesRef("1")) .build() .asVector(), - BytesRefBlock.newBlockBuilder(3) + blockFactory.newBytesRefBlockBuilder(3) .appendBytesRef(new BytesRef("1")) .appendBytesRef(new BytesRef("1")) .appendBytesRef(new BytesRef("1")) .build() .asVector() .filter(0, 1, 2), - BytesRefBlock.newBlockBuilder(3) + blockFactory.newBytesRefBlockBuilder(3) .appendBytesRef(new BytesRef("1")) .appendBytesRef(new BytesRef("4")) .appendBytesRef(new BytesRef("1")) @@ -137,7 +140,7 @@ public void testVectorEquality() { .build() .filter(0, 2, 3) .asVector(), - BytesRefBlock.newBlockBuilder(3) + blockFactory.newBytesRefBlockBuilder(3) .appendBytesRef(new BytesRef("1")) .appendBytesRef(new BytesRef("4")) .appendBytesRef(new BytesRef("1")) @@ -154,43 +157,45 @@ public void testBlockEquality() { // all these blocks should be equivalent try (var bytesRefArray1 = arrayOf("1", "2", "3"); var bytesRefArray2 = arrayOf("1", "2", "3", "4")) { List blocks = List.of( - new BytesRefArrayVector(bytesRefArray1, 3).asBlock(), + new BytesRefArrayVector(bytesRefArray1, 3, blockFactory).asBlock(), new BytesRefArrayBlock( bytesRefArray1, 3, new int[] { 0, 1, 2, 3 }, BitSet.valueOf(new byte[] { 0b000 }), - randomFrom(Block.MvOrdering.values()) + randomFrom(Block.MvOrdering.values()), + blockFactory ), new BytesRefArrayBlock( bytesRefArray2, 3, new int[] { 0, 1, 2, 3 }, BitSet.valueOf(new byte[] { 0b1000 }), - randomFrom(Block.MvOrdering.values()) + randomFrom(Block.MvOrdering.values()), + blockFactory ), - new BytesRefArrayVector(bytesRefArray1, 3).filter(0, 1, 2).asBlock(), - new BytesRefArrayVector(bytesRefArray2, 3).filter(0, 1, 2).asBlock(), - new BytesRefArrayVector(bytesRefArray2, 4).filter(0, 1, 2).asBlock(), - BytesRefBlock.newBlockBuilder(3) + new BytesRefArrayVector(bytesRefArray1, 3, blockFactory).filter(0, 1, 2).asBlock(), + new BytesRefArrayVector(bytesRefArray2, 3, blockFactory).filter(0, 1, 2).asBlock(), + new BytesRefArrayVector(bytesRefArray2, 4, blockFactory).filter(0, 1, 2).asBlock(), + blockFactory.newBytesRefBlockBuilder(3) .appendBytesRef(new BytesRef("1")) .appendBytesRef(new BytesRef("2")) .appendBytesRef(new BytesRef("3")) .build(), - BytesRefBlock.newBlockBuilder(3) + blockFactory.newBytesRefBlockBuilder(3) .appendBytesRef(new BytesRef("1")) .appendBytesRef(new BytesRef("2")) .appendBytesRef(new BytesRef("3")) .build() .filter(0, 1, 2), - BytesRefBlock.newBlockBuilder(3) + blockFactory.newBytesRefBlockBuilder(3) .appendBytesRef(new BytesRef("1")) .appendBytesRef(new BytesRef("4")) .appendBytesRef(new BytesRef("2")) .appendBytesRef(new BytesRef("3")) .build() .filter(0, 2, 3), - BytesRefBlock.newBlockBuilder(3) + blockFactory.newBytesRefBlockBuilder(3) .appendBytesRef(new BytesRef("1")) .appendNull() .appendBytesRef(new BytesRef("2")) @@ -204,34 +209,40 @@ public void testBlockEquality() { // all these constant-like blocks should be equivalent try (var bytesRefArray1 = arrayOf("9", "9"); var bytesRefArray2 = arrayOf("9", "9", "4")) { List moreBlocks = List.of( - new BytesRefArrayVector(bytesRefArray1, 2).asBlock(), + new BytesRefArrayVector(bytesRefArray1, 2, blockFactory).asBlock(), new BytesRefArrayBlock( bytesRefArray1, 2, new int[] { 0, 1, 2 }, BitSet.valueOf(new byte[] { 0b000 }), - randomFrom(Block.MvOrdering.values()) + randomFrom(Block.MvOrdering.values()), + blockFactory ), new BytesRefArrayBlock( bytesRefArray2, 2, new int[] { 0, 1, 2 }, BitSet.valueOf(new byte[] { 0b100 }), - randomFrom(Block.MvOrdering.values()) + randomFrom(Block.MvOrdering.values()), + blockFactory ), - new BytesRefArrayVector(bytesRefArray1, 2).filter(0, 1).asBlock(), - new BytesRefArrayVector(bytesRefArray2, 2).filter(0, 1).asBlock(), - new BytesRefArrayVector(bytesRefArray2, 3).filter(0, 1).asBlock(), - BytesRefBlock.newConstantBlockWith(new BytesRef("9"), 2), - BytesRefBlock.newBlockBuilder(2).appendBytesRef(new BytesRef("9")).appendBytesRef(new BytesRef("9")).build(), - BytesRefBlock.newBlockBuilder(2).appendBytesRef(new BytesRef("9")).appendBytesRef(new BytesRef("9")).build().filter(0, 1), - BytesRefBlock.newBlockBuilder(2) + new BytesRefArrayVector(bytesRefArray1, 2, blockFactory).filter(0, 1).asBlock(), + new BytesRefArrayVector(bytesRefArray2, 2, blockFactory).filter(0, 1).asBlock(), + new BytesRefArrayVector(bytesRefArray2, 3, blockFactory).filter(0, 1).asBlock(), + blockFactory.newConstantBytesRefBlockWith(new BytesRef("9"), 2), + blockFactory.newBytesRefBlockBuilder(2).appendBytesRef(new BytesRef("9")).appendBytesRef(new BytesRef("9")).build(), + blockFactory.newBytesRefBlockBuilder(2) + .appendBytesRef(new BytesRef("9")) + .appendBytesRef(new BytesRef("9")) + .build() + .filter(0, 1), + blockFactory.newBytesRefBlockBuilder(2) .appendBytesRef(new BytesRef("9")) .appendBytesRef(new BytesRef("4")) .appendBytesRef(new BytesRef("9")) .build() .filter(0, 2), - BytesRefBlock.newBlockBuilder(2) + blockFactory.newBytesRefBlockBuilder(2) .appendBytesRef(new BytesRef("9")) .appendNull() .appendBytesRef(new BytesRef("9")) @@ -252,25 +263,25 @@ public void testVectorInequality() { var bytesRefArray5 = arrayOf("1", "2", "4") ) { List notEqualVectors = List.of( - new BytesRefArrayVector(bytesRefArray1, 1), - new BytesRefArrayVector(bytesRefArray2, 1), - new BytesRefArrayVector(bytesRefArray3, 2), - new BytesRefArrayVector(bytesRefArray4, 3), - new BytesRefArrayVector(bytesRefArray5, 3), - BytesRefBlock.newConstantBlockWith(new BytesRef("9"), 2).asVector(), - BytesRefBlock.newBlockBuilder(2) + new BytesRefArrayVector(bytesRefArray1, 1, blockFactory), + new BytesRefArrayVector(bytesRefArray2, 1, blockFactory), + new BytesRefArrayVector(bytesRefArray3, 2, blockFactory), + new BytesRefArrayVector(bytesRefArray4, 3, blockFactory), + new BytesRefArrayVector(bytesRefArray5, 3, blockFactory), + blockFactory.newConstantBytesRefBlockWith(new BytesRef("9"), 2).asVector(), + blockFactory.newBytesRefBlockBuilder(2) .appendBytesRef(new BytesRef("1")) .appendBytesRef(new BytesRef("2")) .build() .asVector() .filter(1), - BytesRefBlock.newBlockBuilder(3) + blockFactory.newBytesRefBlockBuilder(3) .appendBytesRef(new BytesRef("1")) .appendBytesRef(new BytesRef("2")) .appendBytesRef(new BytesRef("5")) .build() .asVector(), - BytesRefBlock.newBlockBuilder(1) + blockFactory.newBytesRefBlockBuilder(1) .appendBytesRef(new BytesRef("1")) .appendBytesRef(new BytesRef("2")) .appendBytesRef(new BytesRef("3")) @@ -292,27 +303,35 @@ public void testBlockInequality() { var bytesRefArray5 = arrayOf("1", "2", "4") ) { List notEqualBlocks = List.of( - new BytesRefArrayVector(bytesRefArray1, 1).asBlock(), - new BytesRefArrayVector(bytesRefArray2, 1).asBlock(), - new BytesRefArrayVector(bytesRefArray3, 2).asBlock(), - new BytesRefArrayVector(bytesRefArray4, 3).asBlock(), - new BytesRefArrayVector(bytesRefArray5, 3).asBlock(), - BytesRefBlock.newConstantBlockWith(new BytesRef("9"), 2), - BytesRefBlock.newBlockBuilder(2).appendBytesRef(new BytesRef("1")).appendBytesRef(new BytesRef("2")).build().filter(1), - BytesRefBlock.newBlockBuilder(3) + new BytesRefArrayVector(bytesRefArray1, 1, blockFactory).asBlock(), + new BytesRefArrayVector(bytesRefArray2, 1, blockFactory).asBlock(), + new BytesRefArrayVector(bytesRefArray3, 2, blockFactory).asBlock(), + new BytesRefArrayVector(bytesRefArray4, 3, blockFactory).asBlock(), + new BytesRefArrayVector(bytesRefArray5, 3, blockFactory).asBlock(), + blockFactory.newConstantBytesRefBlockWith(new BytesRef("9"), 2), + blockFactory.newBytesRefBlockBuilder(2) + .appendBytesRef(new BytesRef("1")) + .appendBytesRef(new BytesRef("2")) + .build() + .filter(1), + blockFactory.newBytesRefBlockBuilder(3) .appendBytesRef(new BytesRef("1")) .appendBytesRef(new BytesRef("2")) .appendBytesRef(new BytesRef("5")) .build(), - BytesRefBlock.newBlockBuilder(1) + blockFactory.newBytesRefBlockBuilder(1) .appendBytesRef(new BytesRef("1")) .appendBytesRef(new BytesRef("2")) .appendBytesRef(new BytesRef("3")) .appendBytesRef(new BytesRef("4")) .build(), - BytesRefBlock.newBlockBuilder(1).appendBytesRef(new BytesRef("1")).appendNull().build(), - BytesRefBlock.newBlockBuilder(1).appendBytesRef(new BytesRef("1")).appendNull().appendBytesRef(new BytesRef("3")).build(), - BytesRefBlock.newBlockBuilder(1).appendBytesRef(new BytesRef("1")).appendBytesRef(new BytesRef("3")).build() + blockFactory.newBytesRefBlockBuilder(1).appendBytesRef(new BytesRef("1")).appendNull().build(), + blockFactory.newBytesRefBlockBuilder(1) + .appendBytesRef(new BytesRef("1")) + .appendNull() + .appendBytesRef(new BytesRef("3")) + .build(), + blockFactory.newBytesRefBlockBuilder(1).appendBytesRef(new BytesRef("1")).appendBytesRef(new BytesRef("3")).build() ); assertAllNotEquals(notEqualBlocks); } @@ -320,8 +339,12 @@ public void testBlockInequality() { public void testSimpleBlockWithSingleNull() { List blocks = List.of( - BytesRefBlock.newBlockBuilder(3).appendBytesRef(new BytesRef("1")).appendNull().appendBytesRef(new BytesRef("3")).build(), - BytesRefBlock.newBlockBuilder(3).appendBytesRef(new BytesRef("1")).appendNull().appendBytesRef(new BytesRef("3")).build() + blockFactory.newBytesRefBlockBuilder(3) + .appendBytesRef(new BytesRef("1")) + .appendNull() + .appendBytesRef(new BytesRef("3")) + .build(), + blockFactory.newBytesRefBlockBuilder(3).appendBytesRef(new BytesRef("1")).appendNull().appendBytesRef(new BytesRef("3")).build() ); assertEquals(3, blocks.get(0).getPositionCount()); assertTrue(blocks.get(0).isNull(1)); @@ -331,8 +354,8 @@ public void testSimpleBlockWithSingleNull() { public void testSimpleBlockWithManyNulls() { int positions = randomIntBetween(1, 256); boolean grow = randomBoolean(); - BytesRefBlock.Builder builder1 = BytesRefBlock.newBlockBuilder(grow ? 0 : positions); - BytesRefBlock.Builder builder2 = BytesRefBlock.newBlockBuilder(grow ? 0 : positions); + BytesRefBlock.Builder builder1 = blockFactory.newBytesRefBlockBuilder(grow ? 0 : positions); + BytesRefBlock.Builder builder2 = blockFactory.newBytesRefBlockBuilder(grow ? 0 : positions); for (int p = 0; p < positions; p++) { builder1.appendNull(); builder2.appendNull(); @@ -349,12 +372,12 @@ public void testSimpleBlockWithManyNulls() { public void testSimpleBlockWithSingleMultiValue() { List blocks = List.of( - BytesRefBlock.newBlockBuilder(1) + blockFactory.newBytesRefBlockBuilder(1) .beginPositionEntry() .appendBytesRef(new BytesRef("1a")) .appendBytesRef(new BytesRef("2b")) .build(), - BytesRefBlock.newBlockBuilder(1) + blockFactory.newBytesRefBlockBuilder(1) .beginPositionEntry() .appendBytesRef(new BytesRef("1a")) .appendBytesRef(new BytesRef("2b")) @@ -368,9 +391,9 @@ public void testSimpleBlockWithSingleMultiValue() { public void testSimpleBlockWithManyMultiValues() { int positions = randomIntBetween(1, 256); boolean grow = randomBoolean(); - BytesRefBlock.Builder builder1 = BytesRefBlock.newBlockBuilder(grow ? 0 : positions); - BytesRefBlock.Builder builder2 = BytesRefBlock.newBlockBuilder(grow ? 0 : positions); - BytesRefBlock.Builder builder3 = BytesRefBlock.newBlockBuilder(grow ? 0 : positions); + BytesRefBlock.Builder builder1 = blockFactory.newBytesRefBlockBuilder(grow ? 0 : positions); + BytesRefBlock.Builder builder2 = blockFactory.newBytesRefBlockBuilder(grow ? 0 : positions); + BytesRefBlock.Builder builder3 = blockFactory.newBytesRefBlockBuilder(grow ? 0 : positions); for (int pos = 0; pos < positions; pos++) { builder1.beginPositionEntry(); builder2.beginPositionEntry(); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/DocVectorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/DocVectorTests.java index e2eff15fcb769..7adf975c2b6d7 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/DocVectorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/DocVectorTests.java @@ -8,9 +8,10 @@ package org.elasticsearch.compute.data; import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.compute.operator.ComputeTestCase; import org.elasticsearch.core.Releasables; -import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; import java.util.Collections; @@ -21,7 +22,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; -public class DocVectorTests extends ESTestCase { +public class DocVectorTests extends ComputeTestCase { public void testNonDecreasingSetTrue() { int length = between(1, 100); DocVector docs = new DocVector(intRange(0, length), intRange(0, length), intRange(0, length), true); @@ -29,28 +30,64 @@ public void testNonDecreasingSetTrue() { } public void testNonDecreasingSetFalse() { - DocVector docs = new DocVector(intRange(0, 2), intRange(0, 2), new IntArrayVector(new int[] { 1, 0 }, 2), false); + BlockFactory blockFactory = blockFactory(); + DocVector docs = new DocVector(intRange(0, 2), intRange(0, 2), blockFactory.newIntArrayVector(new int[] { 1, 0 }, 2), false); assertFalse(docs.singleSegmentNonDecreasing()); + docs.close(); } public void testNonDecreasingNonConstantShard() { - DocVector docs = new DocVector(intRange(0, 2), IntBlock.newConstantBlockWith(0, 2).asVector(), intRange(0, 2), null); + BlockFactory blockFactory = blockFactory(); + DocVector docs = new DocVector(intRange(0, 2), blockFactory.newConstantIntVector(0, 2), intRange(0, 2), null); assertFalse(docs.singleSegmentNonDecreasing()); + docs.close(); } public void testNonDecreasingNonConstantSegment() { - DocVector docs = new DocVector(IntBlock.newConstantBlockWith(0, 2).asVector(), intRange(0, 2), intRange(0, 2), null); + BlockFactory blockFactory = blockFactory(); + DocVector docs = new DocVector(blockFactory.newConstantIntVector(0, 2), intRange(0, 2), intRange(0, 2), null); assertFalse(docs.singleSegmentNonDecreasing()); + docs.close(); } public void testNonDecreasingDescendingDocs() { + BlockFactory blockFactory = blockFactory(); DocVector docs = new DocVector( - IntBlock.newConstantBlockWith(0, 2).asVector(), - IntBlock.newConstantBlockWith(0, 2).asVector(), - new IntArrayVector(new int[] { 1, 0 }, 2), + blockFactory.newConstantIntVector(0, 2), + blockFactory.newConstantIntVector(0, 2), + blockFactory.newIntArrayVector(new int[] { 1, 0 }, 2), null ); assertFalse(docs.singleSegmentNonDecreasing()); + docs.close(); + } + + private static int MAX_BUILD_BREAKS_LIMIT = 1391; + + public void testBuildBreaks() { + testBuildBreaks(ByteSizeValue.ofBytes(between(0, MAX_BUILD_BREAKS_LIMIT))); + } + + public void testBuildBreaksMax() { + testBuildBreaks(ByteSizeValue.ofBytes(MAX_BUILD_BREAKS_LIMIT)); + } + + private void testBuildBreaks(ByteSizeValue limit) { + int size = 100; + BlockFactory blockFactory = blockFactory(limit); + Exception e = expectThrows(CircuitBreakingException.class, () -> { + try (DocBlock.Builder builder = DocBlock.newBlockBuilder(blockFactory, size)) { + for (int r = 0; r < size; r++) { + builder.appendShard(3 - size % 4); + builder.appendSegment(size % 10); + builder.appendDoc(size); + } + builder.build().close(); + } + }); + assertThat(e.getMessage(), equalTo("over test limit")); + logger.info("break position", e); + assertThat(blockFactory.breaker().getUsed(), equalTo(0L)); } public void testShardSegmentDocMap() { @@ -99,15 +136,17 @@ public void testRandomShardSegmentDocMap() { } private void assertShardSegmentDocMap(int[][] data, int[][] expected) { - BlockFactory blockFactory = BlockFactoryTests.blockFactory(ByteSizeValue.ofGb(1)); - try (DocBlock.Builder builder = DocBlock.newBlockBuilder(data.length, blockFactory)) { + BlockFactory blockFactory = blockFactory(); + try (DocBlock.Builder builder = DocBlock.newBlockBuilder(blockFactory, data.length)) { for (int r = 0; r < data.length; r++) { builder.appendShard(data[r][0]); builder.appendSegment(data[r][1]); builder.appendDoc(data[r][2]); } try (DocVector docVector = builder.build().asVector()) { + assertThat(blockFactory.breaker().getUsed(), equalTo(docVector.ramBytesUsed())); int[] forwards = docVector.shardSegmentDocMapForwards(); + assertThat(blockFactory.breaker().getUsed(), equalTo(docVector.ramBytesUsed())); int[][] result = new int[docVector.getPositionCount()][]; for (int p = 0; p < result.length; p++) { @@ -132,8 +171,38 @@ private void assertShardSegmentDocMap(int[][] data, int[][] expected) { assertThat(blockFactory.breaker().getUsed(), equalTo(0L)); } + // TODO these are really difficult to maintain. can we figure these out of the fly? + private static final int MAX_SHARD_SEGMENT_DOC_MAP_BREAKS = 2220; + + public void testShardSegmentDocMapBreaks() { + testShardSegmentDocMapBreaks(ByteSizeValue.ofBytes(between(MAX_BUILD_BREAKS_LIMIT + 1, MAX_SHARD_SEGMENT_DOC_MAP_BREAKS))); + } + + public void testShardSegmentDocMapBreaksMax() { + testShardSegmentDocMapBreaks(ByteSizeValue.ofBytes(MAX_SHARD_SEGMENT_DOC_MAP_BREAKS)); + } + + private void testShardSegmentDocMapBreaks(ByteSizeValue limit) { + int size = 100; + BlockFactory blockFactory = blockFactory(limit); + try (DocBlock.Builder builder = DocBlock.newBlockBuilder(blockFactory, size)) { + for (int r = 0; r < size; r++) { + builder.appendShard(3 - size % 4); + builder.appendSegment(size % 10); + builder.appendDoc(size); + } + try (DocBlock docBlock = builder.build()) { + Exception e = expectThrows(CircuitBreakingException.class, docBlock.asVector()::shardSegmentDocMapForwards); + assertThat(e.getMessage(), equalTo("over test limit")); + logger.info("broke at", e); + } + } + assertThat(blockFactory.breaker().getUsed(), equalTo(0L)); + } + public void testCannotDoubleRelease() { - var block = new DocVector(intRange(0, 2), IntBlock.newConstantBlockWith(0, 2).asVector(), intRange(0, 2), null).asBlock(); + BlockFactory blockFactory = blockFactory(); + var block = new DocVector(intRange(0, 2), blockFactory.newConstantIntBlockWith(0, 2).asVector(), intRange(0, 2), null).asBlock(); assertThat(block.isReleased(), is(false)); Page page = new Page(block); @@ -141,7 +210,7 @@ public void testCannotDoubleRelease() { assertThat(block.isReleased(), is(true)); Exception e = expectThrows(IllegalStateException.class, () -> block.close()); - assertThat(e.getMessage(), containsString("can't release already released block")); + assertThat(e.getMessage(), containsString("can't release already released object")); e = expectThrows(IllegalStateException.class, () -> page.getBlock(0)); assertThat(e.getMessage(), containsString("can't read released block")); @@ -151,17 +220,55 @@ public void testCannotDoubleRelease() { } public void testRamBytesUsedWithout() { + BlockFactory blockFactory = blockFactory(); DocVector docs = new DocVector( - IntBlock.newConstantBlockWith(0, 1).asVector(), - IntBlock.newConstantBlockWith(0, 1).asVector(), - IntBlock.newConstantBlockWith(0, 1).asVector(), + blockFactory.newConstantIntBlockWith(0, 1).asVector(), + blockFactory.newConstantIntBlockWith(0, 1).asVector(), + blockFactory.newConstantIntBlockWith(0, 1).asVector(), false ); assertThat(docs.singleSegmentNonDecreasing(), is(false)); docs.ramBytesUsed(); // ensure non-singleSegmentNonDecreasing handles nulls in ramByteUsed + docs.close(); + } + + public void testFilter() { + BlockFactory factory = blockFactory(); + try ( + DocVector docs = new DocVector( + factory.newConstantIntVector(0, 10), + factory.newConstantIntVector(0, 10), + factory.newIntArrayVector(new int[] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }, 10), + false + ); + DocVector filtered = docs.filter(1, 2, 3); + DocVector expected = new DocVector( + factory.newConstantIntVector(0, 3), + factory.newConstantIntVector(0, 3), + factory.newIntArrayVector(new int[] { 1, 2, 3 }, 3), + false + ); + ) { + assertThat(filtered, equalTo(expected)); + } + } + + public void testFilterBreaks() { + BlockFactory factory = blockFactory(ByteSizeValue.ofBytes(between(250, 370))); + try ( + DocVector docs = new DocVector( + factory.newConstantIntVector(0, 10), + factory.newConstantIntVector(0, 10), + factory.newIntArrayVector(new int[] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }, 10), + false + ) + ) { + Exception e = expectThrows(CircuitBreakingException.class, () -> docs.filter(1, 2, 3)); + assertThat(e.getMessage(), equalTo("over test limit")); + } } IntVector intRange(int startInclusive, int endExclusive) { - return IntVector.range(startInclusive, endExclusive, BlockFactory.getNonBreakingInstance()); + return IntVector.range(startInclusive, endExclusive, TestBlockFactory.getNonBreakingInstance()); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/DoubleBlockEqualityTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/DoubleBlockEqualityTests.java index 7dda97f52834e..c0a3b3b8ac751 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/DoubleBlockEqualityTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/DoubleBlockEqualityTests.java @@ -7,22 +7,25 @@ package org.elasticsearch.compute.data; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.compute.operator.ComputeTestCase; +import org.elasticsearch.core.Releasables; import java.util.BitSet; import java.util.List; -public class DoubleBlockEqualityTests extends ESTestCase { +public class DoubleBlockEqualityTests extends ComputeTestCase { + + static final BlockFactory blockFactory = TestBlockFactory.getNonBreakingInstance(); public void testEmptyVector() { // all these "empty" vectors should be equivalent List vectors = List.of( - new DoubleArrayVector(new double[] {}, 0), - new DoubleArrayVector(new double[] { 0 }, 0), - DoubleBlock.newConstantBlockWith(0, 0).asVector(), - DoubleBlock.newConstantBlockWith(0, 0).filter().asVector(), - DoubleBlock.newBlockBuilder(0).build().asVector(), - DoubleBlock.newBlockBuilder(0).appendDouble(1).build().asVector().filter() + blockFactory.newDoubleArrayVector(new double[] {}, 0), + blockFactory.newDoubleArrayVector(new double[] { 0 }, 0), + blockFactory.newConstantDoubleVector(0, 0), + blockFactory.newConstantDoubleBlockWith(0, 0).filter().asVector(), + blockFactory.newDoubleBlockBuilder(0).build().asVector(), + blockFactory.newDoubleBlockBuilder(0).appendDouble(1).build().asVector().filter() ); assertAllEquals(vectors); } @@ -30,41 +33,42 @@ public void testEmptyVector() { public void testEmptyBlock() { // all these "empty" vectors should be equivalent List blocks = List.of( - new DoubleArrayBlock( + blockFactory.newDoubleArrayBlock( new double[] {}, 0, - new int[] {}, + new int[] { 0 }, BitSet.valueOf(new byte[] { 0b00 }), randomFrom(Block.MvOrdering.values()) ), - new DoubleArrayBlock( + blockFactory.newDoubleArrayBlock( new double[] { 0 }, 0, - new int[] {}, + new int[] { 0 }, BitSet.valueOf(new byte[] { 0b00 }), randomFrom(Block.MvOrdering.values()) ), - DoubleBlock.newConstantBlockWith(0, 0), - DoubleBlock.newBlockBuilder(0).build(), - DoubleBlock.newBlockBuilder(0).appendDouble(1).build().filter(), - DoubleBlock.newBlockBuilder(0).appendNull().build().filter() + blockFactory.newConstantDoubleBlockWith(0, 0), + blockFactory.newDoubleBlockBuilder(0).build(), + blockFactory.newDoubleBlockBuilder(0).appendDouble(1).build().filter(), + blockFactory.newDoubleBlockBuilder(0).appendNull().build().filter() ); assertAllEquals(blocks); + Releasables.close(blocks); } public void testVectorEquality() { // all these vectors should be equivalent List vectors = List.of( - new DoubleArrayVector(new double[] { 1, 2, 3 }, 3), - new DoubleArrayVector(new double[] { 1, 2, 3 }, 3).asBlock().asVector(), - new DoubleArrayVector(new double[] { 1, 2, 3, 4 }, 3), - new DoubleArrayVector(new double[] { 1, 2, 3 }, 3).filter(0, 1, 2), - new DoubleArrayVector(new double[] { 1, 2, 3, 4 }, 4).filter(0, 1, 2), - new DoubleArrayVector(new double[] { 0, 1, 2, 3 }, 4).filter(1, 2, 3), - new DoubleArrayVector(new double[] { 1, 4, 2, 3 }, 4).filter(0, 2, 3), - DoubleBlock.newBlockBuilder(3).appendDouble(1).appendDouble(2).appendDouble(3).build().asVector(), - DoubleBlock.newBlockBuilder(3).appendDouble(1).appendDouble(2).appendDouble(3).build().asVector().filter(0, 1, 2), - DoubleBlock.newBlockBuilder(3) + blockFactory.newDoubleArrayVector(new double[] { 1, 2, 3 }, 3), + blockFactory.newDoubleArrayVector(new double[] { 1, 2, 3 }, 3).asBlock().asVector(), + blockFactory.newDoubleArrayVector(new double[] { 1, 2, 3, 4 }, 3), + blockFactory.newDoubleArrayVector(new double[] { 1, 2, 3 }, 3).filter(0, 1, 2), + blockFactory.newDoubleArrayVector(new double[] { 1, 2, 3, 4 }, 4).filter(0, 1, 2), + blockFactory.newDoubleArrayVector(new double[] { 0, 1, 2, 3 }, 4).filter(1, 2, 3), + blockFactory.newDoubleArrayVector(new double[] { 1, 4, 2, 3 }, 4).filter(0, 2, 3), + blockFactory.newDoubleBlockBuilder(3).appendDouble(1).appendDouble(2).appendDouble(3).build().asVector(), + blockFactory.newDoubleBlockBuilder(3).appendDouble(1).appendDouble(2).appendDouble(3).build().asVector().filter(0, 1, 2), + blockFactory.newDoubleBlockBuilder(3) .appendDouble(1) .appendDouble(4) .appendDouble(2) @@ -72,7 +76,7 @@ public void testVectorEquality() { .build() .filter(0, 2, 3) .asVector(), - DoubleBlock.newBlockBuilder(3) + blockFactory.newDoubleBlockBuilder(3) .appendDouble(1) .appendDouble(4) .appendDouble(2) @@ -85,17 +89,17 @@ public void testVectorEquality() { // all these constant-like vectors should be equivalent List moreVectors = List.of( - new DoubleArrayVector(new double[] { 1, 1, 1 }, 3), - new DoubleArrayVector(new double[] { 1, 1, 1 }, 3).asBlock().asVector(), - new DoubleArrayVector(new double[] { 1, 1, 1, 1 }, 3), - new DoubleArrayVector(new double[] { 1, 1, 1 }, 3).filter(0, 1, 2), - new DoubleArrayVector(new double[] { 1, 1, 1, 4 }, 4).filter(0, 1, 2), - new DoubleArrayVector(new double[] { 3, 1, 1, 1 }, 4).filter(1, 2, 3), - new DoubleArrayVector(new double[] { 1, 4, 1, 1 }, 4).filter(0, 2, 3), - DoubleBlock.newConstantBlockWith(1, 3).asVector(), - DoubleBlock.newBlockBuilder(3).appendDouble(1).appendDouble(1).appendDouble(1).build().asVector(), - DoubleBlock.newBlockBuilder(3).appendDouble(1).appendDouble(1).appendDouble(1).build().asVector().filter(0, 1, 2), - DoubleBlock.newBlockBuilder(3) + blockFactory.newDoubleArrayVector(new double[] { 1, 1, 1 }, 3), + blockFactory.newDoubleArrayVector(new double[] { 1, 1, 1 }, 3).asBlock().asVector(), + blockFactory.newDoubleArrayVector(new double[] { 1, 1, 1, 1 }, 3), + blockFactory.newDoubleArrayVector(new double[] { 1, 1, 1 }, 3).filter(0, 1, 2), + blockFactory.newDoubleArrayVector(new double[] { 1, 1, 1, 4 }, 4).filter(0, 1, 2), + blockFactory.newDoubleArrayVector(new double[] { 3, 1, 1, 1 }, 4).filter(1, 2, 3), + blockFactory.newDoubleArrayVector(new double[] { 1, 4, 1, 1 }, 4).filter(0, 2, 3), + blockFactory.newConstantDoubleBlockWith(1, 3).asVector(), + blockFactory.newDoubleBlockBuilder(3).appendDouble(1).appendDouble(1).appendDouble(1).build().asVector(), + blockFactory.newDoubleBlockBuilder(3).appendDouble(1).appendDouble(1).appendDouble(1).build().asVector().filter(0, 1, 2), + blockFactory.newDoubleBlockBuilder(3) .appendDouble(1) .appendDouble(4) .appendDouble(1) @@ -103,7 +107,7 @@ public void testVectorEquality() { .build() .filter(0, 2, 3) .asVector(), - DoubleBlock.newBlockBuilder(3) + blockFactory.newDoubleBlockBuilder(3) .appendDouble(1) .appendDouble(4) .appendDouble(1) @@ -118,58 +122,62 @@ public void testVectorEquality() { public void testBlockEquality() { // all these blocks should be equivalent List blocks = List.of( - new DoubleArrayVector(new double[] { 1, 2, 3 }, 3).asBlock(), + blockFactory.newDoubleArrayVector(new double[] { 1, 2, 3 }, 3).asBlock(), new DoubleArrayBlock( new double[] { 1, 2, 3 }, 3, new int[] { 0, 1, 2, 3 }, BitSet.valueOf(new byte[] { 0b000 }), - randomFrom(Block.MvOrdering.values()) + randomFrom(Block.MvOrdering.values()), + blockFactory ), new DoubleArrayBlock( new double[] { 1, 2, 3, 4 }, 3, new int[] { 0, 1, 2, 3 }, BitSet.valueOf(new byte[] { 0b1000 }), - randomFrom(Block.MvOrdering.values()) + randomFrom(Block.MvOrdering.values()), + blockFactory ), - new DoubleArrayVector(new double[] { 1, 2, 3 }, 3).filter(0, 1, 2).asBlock(), - new DoubleArrayVector(new double[] { 1, 2, 3, 4 }, 3).filter(0, 1, 2).asBlock(), - new DoubleArrayVector(new double[] { 1, 2, 3, 4 }, 4).filter(0, 1, 2).asBlock(), - new DoubleArrayVector(new double[] { 1, 2, 4, 3 }, 4).filter(0, 1, 3).asBlock(), - DoubleBlock.newBlockBuilder(3).appendDouble(1).appendDouble(2).appendDouble(3).build(), - DoubleBlock.newBlockBuilder(3).appendDouble(1).appendDouble(2).appendDouble(3).build().filter(0, 1, 2), - DoubleBlock.newBlockBuilder(3).appendDouble(1).appendDouble(4).appendDouble(2).appendDouble(3).build().filter(0, 2, 3), - DoubleBlock.newBlockBuilder(3).appendDouble(1).appendNull().appendDouble(2).appendDouble(3).build().filter(0, 2, 3) + blockFactory.newDoubleArrayVector(new double[] { 1, 2, 3 }, 3).filter(0, 1, 2).asBlock(), + blockFactory.newDoubleArrayVector(new double[] { 1, 2, 3, 4 }, 3).filter(0, 1, 2).asBlock(), + blockFactory.newDoubleArrayVector(new double[] { 1, 2, 3, 4 }, 4).filter(0, 1, 2).asBlock(), + blockFactory.newDoubleArrayVector(new double[] { 1, 2, 4, 3 }, 4).filter(0, 1, 3).asBlock(), + blockFactory.newDoubleBlockBuilder(3).appendDouble(1).appendDouble(2).appendDouble(3).build(), + blockFactory.newDoubleBlockBuilder(3).appendDouble(1).appendDouble(2).appendDouble(3).build().filter(0, 1, 2), + blockFactory.newDoubleBlockBuilder(3).appendDouble(1).appendDouble(4).appendDouble(2).appendDouble(3).build().filter(0, 2, 3), + blockFactory.newDoubleBlockBuilder(3).appendDouble(1).appendNull().appendDouble(2).appendDouble(3).build().filter(0, 2, 3) ); assertAllEquals(blocks); // all these constant-like blocks should be equivalent List moreBlocks = List.of( - new DoubleArrayVector(new double[] { 9, 9 }, 2).asBlock(), + blockFactory.newDoubleArrayVector(new double[] { 9, 9 }, 2).asBlock(), new DoubleArrayBlock( new double[] { 9, 9 }, 2, new int[] { 0, 1, 2 }, BitSet.valueOf(new byte[] { 0b000 }), - randomFrom(Block.MvOrdering.values()) + randomFrom(Block.MvOrdering.values()), + blockFactory ), new DoubleArrayBlock( new double[] { 9, 9, 4 }, 2, new int[] { 0, 1, 2 }, BitSet.valueOf(new byte[] { 0b100 }), - randomFrom(Block.MvOrdering.values()) + randomFrom(Block.MvOrdering.values()), + blockFactory ), - new DoubleArrayVector(new double[] { 9, 9 }, 2).filter(0, 1).asBlock(), - new DoubleArrayVector(new double[] { 9, 9, 4 }, 2).filter(0, 1).asBlock(), - new DoubleArrayVector(new double[] { 9, 9, 4 }, 3).filter(0, 1).asBlock(), - new DoubleArrayVector(new double[] { 9, 4, 9 }, 3).filter(0, 2).asBlock(), - DoubleBlock.newConstantBlockWith(9, 2), - DoubleBlock.newBlockBuilder(2).appendDouble(9).appendDouble(9).build(), - DoubleBlock.newBlockBuilder(2).appendDouble(9).appendDouble(9).build().filter(0, 1), - DoubleBlock.newBlockBuilder(2).appendDouble(9).appendDouble(4).appendDouble(9).build().filter(0, 2), - DoubleBlock.newBlockBuilder(2).appendDouble(9).appendNull().appendDouble(9).build().filter(0, 2) + blockFactory.newDoubleArrayVector(new double[] { 9, 9 }, 2).filter(0, 1).asBlock(), + blockFactory.newDoubleArrayVector(new double[] { 9, 9, 4 }, 2).filter(0, 1).asBlock(), + blockFactory.newDoubleArrayVector(new double[] { 9, 9, 4 }, 3).filter(0, 1).asBlock(), + blockFactory.newDoubleArrayVector(new double[] { 9, 4, 9 }, 3).filter(0, 2).asBlock(), + blockFactory.newConstantDoubleBlockWith(9, 2), + blockFactory.newDoubleBlockBuilder(2).appendDouble(9).appendDouble(9).build(), + blockFactory.newDoubleBlockBuilder(2).appendDouble(9).appendDouble(9).build().filter(0, 1), + blockFactory.newDoubleBlockBuilder(2).appendDouble(9).appendDouble(4).appendDouble(9).build().filter(0, 2), + blockFactory.newDoubleBlockBuilder(2).appendDouble(9).appendNull().appendDouble(9).build().filter(0, 2) ); assertAllEquals(moreBlocks); } @@ -177,15 +185,15 @@ public void testBlockEquality() { public void testVectorInequality() { // all these vectors should NOT be equivalent List notEqualVectors = List.of( - new DoubleArrayVector(new double[] { 1 }, 1), - new DoubleArrayVector(new double[] { 9 }, 1), - new DoubleArrayVector(new double[] { 1, 2 }, 2), - new DoubleArrayVector(new double[] { 1, 2, 3 }, 3), - new DoubleArrayVector(new double[] { 1, 2, 4 }, 3), - DoubleBlock.newConstantBlockWith(9, 2).asVector(), - DoubleBlock.newBlockBuilder(2).appendDouble(1).appendDouble(2).build().asVector().filter(1), - DoubleBlock.newBlockBuilder(3).appendDouble(1).appendDouble(2).appendDouble(5).build().asVector(), - DoubleBlock.newBlockBuilder(1).appendDouble(1).appendDouble(2).appendDouble(3).appendDouble(4).build().asVector() + blockFactory.newDoubleArrayVector(new double[] { 1 }, 1), + blockFactory.newDoubleArrayVector(new double[] { 9 }, 1), + blockFactory.newDoubleArrayVector(new double[] { 1, 2 }, 2), + blockFactory.newDoubleArrayVector(new double[] { 1, 2, 3 }, 3), + blockFactory.newDoubleArrayVector(new double[] { 1, 2, 4 }, 3), + blockFactory.newConstantDoubleBlockWith(9, 2).asVector(), + blockFactory.newDoubleBlockBuilder(2).appendDouble(1).appendDouble(2).build().asVector().filter(1), + blockFactory.newDoubleBlockBuilder(3).appendDouble(1).appendDouble(2).appendDouble(5).build().asVector(), + blockFactory.newDoubleBlockBuilder(1).appendDouble(1).appendDouble(2).appendDouble(3).appendDouble(4).build().asVector() ); assertAllNotEquals(notEqualVectors); } @@ -193,27 +201,27 @@ public void testVectorInequality() { public void testBlockInequality() { // all these blocks should NOT be equivalent List notEqualBlocks = List.of( - new DoubleArrayVector(new double[] { 1 }, 1).asBlock(), - new DoubleArrayVector(new double[] { 9 }, 1).asBlock(), - new DoubleArrayVector(new double[] { 1, 2 }, 2).asBlock(), - new DoubleArrayVector(new double[] { 1, 2, 3 }, 3).asBlock(), - new DoubleArrayVector(new double[] { 1, 2, 4 }, 3).asBlock(), - DoubleBlock.newConstantBlockWith(9, 2), - DoubleBlock.newBlockBuilder(2).appendDouble(1).appendDouble(2).build().filter(1), - DoubleBlock.newBlockBuilder(3).appendDouble(1).appendDouble(2).appendDouble(5).build(), - DoubleBlock.newBlockBuilder(1).appendDouble(1).appendDouble(2).appendDouble(3).appendDouble(4).build(), - DoubleBlock.newBlockBuilder(1).appendDouble(1).appendNull().build(), - DoubleBlock.newBlockBuilder(1).appendDouble(1).appendNull().appendDouble(3).build(), - DoubleBlock.newBlockBuilder(1).appendDouble(1).appendDouble(3).build(), - DoubleBlock.newBlockBuilder(3).appendDouble(1).beginPositionEntry().appendDouble(2).appendDouble(3).build() + blockFactory.newDoubleArrayVector(new double[] { 1 }, 1).asBlock(), + blockFactory.newDoubleArrayVector(new double[] { 9 }, 1).asBlock(), + blockFactory.newDoubleArrayVector(new double[] { 1, 2 }, 2).asBlock(), + blockFactory.newDoubleArrayVector(new double[] { 1, 2, 3 }, 3).asBlock(), + blockFactory.newDoubleArrayVector(new double[] { 1, 2, 4 }, 3).asBlock(), + blockFactory.newConstantDoubleBlockWith(9, 2), + blockFactory.newDoubleBlockBuilder(2).appendDouble(1).appendDouble(2).build().filter(1), + blockFactory.newDoubleBlockBuilder(3).appendDouble(1).appendDouble(2).appendDouble(5).build(), + blockFactory.newDoubleBlockBuilder(1).appendDouble(1).appendDouble(2).appendDouble(3).appendDouble(4).build(), + blockFactory.newDoubleBlockBuilder(1).appendDouble(1).appendNull().build(), + blockFactory.newDoubleBlockBuilder(1).appendDouble(1).appendNull().appendDouble(3).build(), + blockFactory.newDoubleBlockBuilder(1).appendDouble(1).appendDouble(3).build(), + blockFactory.newDoubleBlockBuilder(3).appendDouble(1).beginPositionEntry().appendDouble(2).appendDouble(3).build() ); assertAllNotEquals(notEqualBlocks); } public void testSimpleBlockWithSingleNull() { List blocks = List.of( - DoubleBlock.newBlockBuilder(3).appendDouble(1.1).appendNull().appendDouble(3.1).build(), - DoubleBlock.newBlockBuilder(3).appendDouble(1.1).appendNull().appendDouble(3.1).build() + blockFactory.newDoubleBlockBuilder(3).appendDouble(1.1).appendNull().appendDouble(3.1).build(), + blockFactory.newDoubleBlockBuilder(3).appendDouble(1.1).appendNull().appendDouble(3.1).build() ); assertEquals(3, blocks.get(0).getPositionCount()); assertTrue(blocks.get(0).isNull(1)); @@ -223,8 +231,8 @@ public void testSimpleBlockWithSingleNull() { public void testSimpleBlockWithManyNulls() { int positions = randomIntBetween(1, 256); boolean grow = randomBoolean(); - DoubleBlock.Builder builder1 = DoubleBlock.newBlockBuilder(grow ? 0 : positions); - DoubleBlock.Builder builder2 = DoubleBlock.newBlockBuilder(grow ? 0 : positions); + DoubleBlock.Builder builder1 = blockFactory.newDoubleBlockBuilder(grow ? 0 : positions); + DoubleBlock.Builder builder2 = blockFactory.newDoubleBlockBuilder(grow ? 0 : positions); for (int p = 0; p < positions; p++) { builder1.appendNull(); builder2.appendNull(); @@ -241,8 +249,8 @@ public void testSimpleBlockWithManyNulls() { public void testSimpleBlockWithSingleMultiValue() { List blocks = List.of( - DoubleBlock.newBlockBuilder(1).beginPositionEntry().appendDouble(1.1).appendDouble(2.2).build(), - DoubleBlock.newBlockBuilder(1).beginPositionEntry().appendDouble(1.1).appendDouble(2.2).build() + blockFactory.newDoubleBlockBuilder(1).beginPositionEntry().appendDouble(1.1).appendDouble(2.2).build(), + blockFactory.newDoubleBlockBuilder(1).beginPositionEntry().appendDouble(1.1).appendDouble(2.2).build() ); assert blocks.get(0).getPositionCount() == 1 && blocks.get(0).getValueCount(0) == 2; assertAllEquals(blocks); @@ -251,9 +259,9 @@ public void testSimpleBlockWithSingleMultiValue() { public void testSimpleBlockWithManyMultiValues() { int positions = randomIntBetween(1, 256); boolean grow = randomBoolean(); - DoubleBlock.Builder builder1 = DoubleBlock.newBlockBuilder(grow ? 0 : positions); - DoubleBlock.Builder builder2 = DoubleBlock.newBlockBuilder(grow ? 0 : positions); - DoubleBlock.Builder builder3 = DoubleBlock.newBlockBuilder(grow ? 0 : positions); + DoubleBlock.Builder builder1 = blockFactory.newDoubleBlockBuilder(grow ? 0 : positions); + DoubleBlock.Builder builder2 = blockFactory.newDoubleBlockBuilder(grow ? 0 : positions); + DoubleBlock.Builder builder3 = blockFactory.newDoubleBlockBuilder(grow ? 0 : positions); for (int pos = 0; pos < positions; pos++) { builder1.beginPositionEntry(); builder2.beginPositionEntry(); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/FilteredBlockTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/FilteredBlockTests.java index f43159b7ce9bd..dc78b3715d12a 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/FilteredBlockTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/FilteredBlockTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.util.BytesRefArray; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.test.ESTestCase; @@ -196,57 +197,85 @@ public void testFilterOnNoNullsBlock() { public void testFilterToStringSimple() { BitSet nulls = BitSet.valueOf(new byte[] { 0x08 }); // any non-empty bitset, that does not affect the filter, should suffice - var boolVector = new BooleanArrayVector(new boolean[] { true, false, false, true }, 4); - var boolBlock = new BooleanArrayBlock( + var boolVector = blockFactory.newBooleanArrayVector(new boolean[] { true, false, false, true }, 4); + var boolBlock = blockFactory.newBooleanArrayBlock( new boolean[] { true, false, false, true }, 4, null, nulls, randomFrom(Block.MvOrdering.values()) ); - for (Object obj : List.of(boolVector.filter(0, 2), boolVector.asBlock().filter(0, 2), boolBlock.filter(0, 2))) { + for (Releasable obj : List.of(boolVector.filter(0, 2), boolVector.asBlock().filter(0, 2), boolBlock.filter(0, 2))) { String s = obj.toString(); assertThat(s, containsString("[true, false]")); assertThat(s, containsString("positions=2")); + Releasables.close(obj); } + Releasables.close(boolVector, boolBlock); - var intVector = new IntArrayVector(new int[] { 10, 20, 30, 40 }, 4); - var intBlock = new IntArrayBlock(new int[] { 10, 20, 30, 40 }, 4, null, nulls, randomFrom(Block.MvOrdering.values())); - for (Object obj : List.of(intVector.filter(0, 2), intVector.asBlock().filter(0, 2), intBlock.filter(0, 2))) { + var intVector = blockFactory.newIntArrayVector(new int[] { 10, 20, 30, 40 }, 4); + var intBlock = blockFactory.newIntArrayBlock(new int[] { 10, 20, 30, 40 }, 4, null, nulls, randomFrom(Block.MvOrdering.values())); + for (Releasable obj : List.of(intVector.filter(0, 2), intVector.asBlock().filter(0, 2), intBlock.filter(0, 2))) { String s = obj.toString(); assertThat(s, containsString("[10, 30]")); assertThat(s, containsString("positions=2")); + Releasables.close(obj); } + Releasables.close(intVector, intBlock); - var longVector = new LongArrayVector(new long[] { 100L, 200L, 300L, 400L }, 4); - var longBlock = new LongArrayBlock(new long[] { 100L, 200L, 300L, 400L }, 4, null, nulls, randomFrom(Block.MvOrdering.values())); - for (Object obj : List.of(longVector.filter(0, 2), longVector.asBlock().filter(0, 2), longBlock.filter(0, 2))) { + var longVector = blockFactory.newLongArrayVector(new long[] { 100L, 200L, 300L, 400L }, 4); + var longBlock = blockFactory.newLongArrayBlock( + new long[] { 100L, 200L, 300L, 400L }, + 4, + null, + nulls, + randomFrom(Block.MvOrdering.values()) + ); + for (Releasable obj : List.of(longVector.filter(0, 2), longVector.asBlock().filter(0, 2), longBlock.filter(0, 2))) { String s = obj.toString(); assertThat(s, containsString("[100, 300]")); assertThat(s, containsString("positions=2")); + Releasables.close(obj); } - var doubleVector = new DoubleArrayVector(new double[] { 1.1, 2.2, 3.3, 4.4 }, 4); - var doubleBlock = new DoubleArrayBlock(new double[] { 1.1, 2.2, 3.3, 4.4 }, 4, null, nulls, randomFrom(Block.MvOrdering.values())); - for (Object obj : List.of(doubleVector.filter(0, 2), doubleVector.asBlock().filter(0, 2), doubleBlock.filter(0, 2))) { + Releasables.close(longVector, longBlock); + + var doubleVector = blockFactory.newDoubleArrayVector(new double[] { 1.1, 2.2, 3.3, 4.4 }, 4); + var doubleBlock = blockFactory.newDoubleArrayBlock( + new double[] { 1.1, 2.2, 3.3, 4.4 }, + 4, + null, + nulls, + randomFrom(Block.MvOrdering.values()) + ); + for (Releasable obj : List.of(doubleVector.filter(0, 2), doubleVector.asBlock().filter(0, 2), doubleBlock.filter(0, 2))) { String s = obj.toString(); assertThat(s, containsString("[1.1, 3.3]")); assertThat(s, containsString("positions=2")); + Releasables.close(obj); } + Releasables.close(doubleVector, doubleBlock); + assert new BytesRef("1a").toString().equals("[31 61]") && new BytesRef("3c").toString().equals("[33 63]"); - try (var bytesRefArray = arrayOf("1a", "2b", "3c", "4d")) { - var bytesRefVector = new BytesRefArrayVector(bytesRefArray, 4); - var bytesRefBlock = new BytesRefArrayBlock(bytesRefArray, 4, null, nulls, randomFrom(Block.MvOrdering.values())); - for (Object obj : List.of(bytesRefVector.filter(0, 2), bytesRefVector.asBlock().filter(0, 2), bytesRefBlock.filter(0, 2))) { - assertThat( - obj.toString(), - either(equalTo("BytesRefArrayVector[positions=2]")).or( - equalTo("BytesRefVectorBlock[vector=BytesRefArrayVector[positions=2]]") - ) - ); - } + var bytesRefVector = blockFactory.newBytesRefArrayVector(arrayOf("1a", "2b", "3c", "4d"), 4); + var bytesRefBlock = blockFactory.newBytesRefArrayBlock( + arrayOf("1a", "2b", "3c", "4d"), + 4, + null, + nulls, + randomFrom(Block.MvOrdering.values()) + ); + for (Releasable obj : List.of(bytesRefVector.filter(0, 2), bytesRefVector.asBlock().filter(0, 2), bytesRefBlock.filter(0, 2))) { + assertThat( + obj.toString(), + either(equalTo("BytesRefArrayVector[positions=2]")).or( + equalTo("BytesRefVectorBlock[vector=BytesRefArrayVector[positions=2]]") + ) + ); + Releasables.close(obj); } + Releasables.close(bytesRefVector, bytesRefBlock); } public void testFilterToStringMultiValue() { @@ -259,7 +288,10 @@ public void testFilterToStringMultiValue() { var filter = block.filter(0, 1); assertThat( filter.toString(), - containsString("BooleanArrayBlock[positions=2, mvOrdering=UNORDERED, values=[true, true, false, false]]") + containsString( + "BooleanArrayBlock[positions=2, mvOrdering=UNORDERED, " + + "vector=BooleanArrayVector[positions=4, values=[true, true, false, false]]]" + ) ); Releasables.close(builder, block); releaseAndAssertBreaker(filter); @@ -271,7 +303,12 @@ public void testFilterToStringMultiValue() { builder.beginPositionEntry().appendInt(90).appendInt(1000).endPositionEntry(); var block = builder.build(); var filter = block.filter(0, 1); - assertThat(filter.toString(), containsString("IntArrayBlock[positions=2, mvOrdering=UNORDERED, values=[0, 10, 20, 50]]")); + assertThat( + filter.toString(), + containsString( + "IntArrayBlock[positions=2, mvOrdering=UNORDERED, vector=IntArrayVector[positions=4, values=[0, 10, 20, 50]]]" + ) + ); Releasables.close(builder, block); releaseAndAssertBreaker(filter); } @@ -282,7 +319,12 @@ public void testFilterToStringMultiValue() { builder.beginPositionEntry().appendLong(90).appendLong(1000).endPositionEntry(); var block = builder.build(); var filter = block.filter(0, 1); - assertThat(filter.toString(), containsString("LongArrayBlock[positions=2, mvOrdering=UNORDERED, values=[0, 10, 20, 50]]")); + assertThat( + filter.toString(), + containsString( + "LongArrayBlock[positions=2, mvOrdering=UNORDERED, vector=LongArrayVector[positions=4, values=[0, 10, 20, 50]]]" + ) + ); Releasables.close(builder, block); releaseAndAssertBreaker(filter); } @@ -295,7 +337,10 @@ public void testFilterToStringMultiValue() { var filter = block.filter(0, 1); assertThat( filter.toString(), - containsString("DoubleArrayBlock[positions=2, mvOrdering=UNORDERED, values=[0.0, 10.0, 0.002, 1.0E9]]") + containsString( + "DoubleArrayBlock[positions=2, mvOrdering=UNORDERED, " + + "vector=DoubleArrayVector[positions=4, values=[0.0, 10.0, 0.002, 1.0E9]]]" + ) ); Releasables.close(builder, block); releaseAndAssertBreaker(filter); @@ -309,7 +354,10 @@ public void testFilterToStringMultiValue() { builder.beginPositionEntry().appendBytesRef(new BytesRef("pig")).appendBytesRef(new BytesRef("chicken")).endPositionEntry(); var block = builder.build(); var filter = block.filter(0, 1); - assertThat(filter.toString(), containsString("BytesRefArrayBlock[positions=2, mvOrdering=UNORDERED, values=4]")); + assertThat( + filter.toString(), + containsString("BytesRefArrayBlock[positions=2, mvOrdering=UNORDERED, vector=BytesRefArrayVector[positions=4]]") + ); assertThat(filter.getPositionCount(), equalTo(2)); Releasables.close(builder, block); releaseAndAssertBreaker(filter); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/IntBlockEqualityTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/IntBlockEqualityTests.java index 40c84324f13d2..5beb091cbfaca 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/IntBlockEqualityTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/IntBlockEqualityTests.java @@ -7,22 +7,24 @@ package org.elasticsearch.compute.data; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.compute.operator.ComputeTestCase; import java.util.BitSet; import java.util.List; -public class IntBlockEqualityTests extends ESTestCase { +public class IntBlockEqualityTests extends ComputeTestCase { + + static final BlockFactory blockFactory = TestBlockFactory.getNonBreakingInstance(); public void testEmptyVector() { // all these "empty" vectors should be equivalent List vectors = List.of( - new IntArrayVector(new int[] {}, 0), - new IntArrayVector(new int[] { 0 }, 0), - IntBlock.newConstantBlockWith(0, 0).asVector(), - IntBlock.newConstantBlockWith(0, 0).filter().asVector(), - IntBlock.newBlockBuilder(0).build().asVector(), - IntBlock.newBlockBuilder(0).appendInt(1).build().asVector().filter() + blockFactory.newIntArrayVector(new int[] {}, 0), + blockFactory.newIntArrayVector(new int[] { 0 }, 0), + blockFactory.newConstantIntVector(0, 0), + blockFactory.newConstantIntVector(0, 0).filter(), + blockFactory.newIntBlockBuilder(0).build().asVector(), + blockFactory.newIntBlockBuilder(0).appendInt(1).build().asVector().filter() ); assertAllEquals(vectors); } @@ -30,12 +32,24 @@ public void testEmptyVector() { public void testEmptyBlock() { // all these "empty" vectors should be equivalent List blocks = List.of( - new IntArrayBlock(new int[] {}, 0, new int[] {}, BitSet.valueOf(new byte[] { 0b00 }), randomFrom(Block.MvOrdering.values())), - new IntArrayBlock(new int[] { 0 }, 0, new int[] {}, BitSet.valueOf(new byte[] { 0b00 }), randomFrom(Block.MvOrdering.values())), - IntBlock.newConstantBlockWith(0, 0), - IntBlock.newBlockBuilder(0).build(), - IntBlock.newBlockBuilder(0).appendInt(1).build().filter(), - IntBlock.newBlockBuilder(0).appendNull().build().filter() + blockFactory.newIntArrayBlock( + new int[] {}, + 0, + new int[] { 0 }, + BitSet.valueOf(new byte[] { 0b00 }), + randomFrom(Block.MvOrdering.values()) + ), + blockFactory.newIntArrayBlock( + new int[] { 0 }, + 0, + new int[] { 0 }, + BitSet.valueOf(new byte[] { 0b00 }), + randomFrom(Block.MvOrdering.values()) + ), + blockFactory.newConstantIntBlockWith(0, 0), + blockFactory.newIntBlockBuilder(0).build(), + blockFactory.newIntBlockBuilder(0).appendInt(1).build().filter(), + blockFactory.newIntBlockBuilder(0).appendNull().build().filter() ); assertAllEquals(blocks); } @@ -43,34 +57,34 @@ public void testEmptyBlock() { public void testVectorEquality() { // all these vectors should be equivalent List vectors = List.of( - new IntArrayVector(new int[] { 1, 2, 3 }, 3), - new IntArrayVector(new int[] { 1, 2, 3 }, 3).asBlock().asVector(), - new IntArrayVector(new int[] { 1, 2, 3, 4 }, 3), - new IntArrayVector(new int[] { 1, 2, 3 }, 3).filter(0, 1, 2), - new IntArrayVector(new int[] { 1, 2, 3, 4 }, 4).filter(0, 1, 2), - new IntArrayVector(new int[] { 0, 1, 2, 3 }, 4).filter(1, 2, 3), - new IntArrayVector(new int[] { 1, 4, 2, 3 }, 4).filter(0, 2, 3), - IntBlock.newBlockBuilder(3).appendInt(1).appendInt(2).appendInt(3).build().asVector(), - IntBlock.newBlockBuilder(3).appendInt(1).appendInt(2).appendInt(3).build().asVector().filter(0, 1, 2), - IntBlock.newBlockBuilder(3).appendInt(1).appendInt(4).appendInt(2).appendInt(3).build().filter(0, 2, 3).asVector(), - IntBlock.newBlockBuilder(3).appendInt(1).appendInt(4).appendInt(2).appendInt(3).build().asVector().filter(0, 2, 3) + blockFactory.newIntArrayVector(new int[] { 1, 2, 3 }, 3), + blockFactory.newIntArrayVector(new int[] { 1, 2, 3 }, 3).asBlock().asVector(), + blockFactory.newIntArrayVector(new int[] { 1, 2, 3, 4 }, 3), + blockFactory.newIntArrayVector(new int[] { 1, 2, 3 }, 3).filter(0, 1, 2), + blockFactory.newIntArrayVector(new int[] { 1, 2, 3, 4 }, 4).filter(0, 1, 2), + blockFactory.newIntArrayVector(new int[] { 0, 1, 2, 3 }, 4).filter(1, 2, 3), + blockFactory.newIntArrayVector(new int[] { 1, 4, 2, 3 }, 4).filter(0, 2, 3), + blockFactory.newIntBlockBuilder(3).appendInt(1).appendInt(2).appendInt(3).build().asVector(), + blockFactory.newIntBlockBuilder(3).appendInt(1).appendInt(2).appendInt(3).build().asVector().filter(0, 1, 2), + blockFactory.newIntBlockBuilder(3).appendInt(1).appendInt(4).appendInt(2).appendInt(3).build().filter(0, 2, 3).asVector(), + blockFactory.newIntBlockBuilder(3).appendInt(1).appendInt(4).appendInt(2).appendInt(3).build().asVector().filter(0, 2, 3) ); assertAllEquals(vectors); // all these constant-like vectors should be equivalent List moreVectors = List.of( - new IntArrayVector(new int[] { 1, 1, 1 }, 3), - new IntArrayVector(new int[] { 1, 1, 1 }, 3).asBlock().asVector(), - new IntArrayVector(new int[] { 1, 1, 1, 1 }, 3), - new IntArrayVector(new int[] { 1, 1, 1 }, 3).filter(0, 1, 2), - new IntArrayVector(new int[] { 1, 1, 1, 4 }, 4).filter(0, 1, 2), - new IntArrayVector(new int[] { 3, 1, 1, 1 }, 4).filter(1, 2, 3), - new IntArrayVector(new int[] { 1, 4, 1, 1 }, 4).filter(0, 2, 3), - IntBlock.newConstantBlockWith(1, 3).asVector(), - IntBlock.newBlockBuilder(3).appendInt(1).appendInt(1).appendInt(1).build().asVector(), - IntBlock.newBlockBuilder(3).appendInt(1).appendInt(1).appendInt(1).build().asVector().filter(0, 1, 2), - IntBlock.newBlockBuilder(3).appendInt(1).appendInt(4).appendInt(1).appendInt(1).build().filter(0, 2, 3).asVector(), - IntBlock.newBlockBuilder(3).appendInt(1).appendInt(4).appendInt(1).appendInt(1).build().asVector().filter(0, 2, 3) + blockFactory.newIntArrayVector(new int[] { 1, 1, 1 }, 3), + blockFactory.newIntArrayVector(new int[] { 1, 1, 1 }, 3).asBlock().asVector(), + blockFactory.newIntArrayVector(new int[] { 1, 1, 1, 1 }, 3), + blockFactory.newIntArrayVector(new int[] { 1, 1, 1 }, 3).filter(0, 1, 2), + blockFactory.newIntArrayVector(new int[] { 1, 1, 1, 4 }, 4).filter(0, 1, 2), + blockFactory.newIntArrayVector(new int[] { 3, 1, 1, 1 }, 4).filter(1, 2, 3), + blockFactory.newIntArrayVector(new int[] { 1, 4, 1, 1 }, 4).filter(0, 2, 3), + blockFactory.newConstantIntBlockWith(1, 3).asVector(), + blockFactory.newIntBlockBuilder(3).appendInt(1).appendInt(1).appendInt(1).build().asVector(), + blockFactory.newIntBlockBuilder(3).appendInt(1).appendInt(1).appendInt(1).build().asVector().filter(0, 1, 2), + blockFactory.newIntBlockBuilder(3).appendInt(1).appendInt(4).appendInt(1).appendInt(1).build().filter(0, 2, 3).asVector(), + blockFactory.newIntBlockBuilder(3).appendInt(1).appendInt(4).appendInt(1).appendInt(1).build().asVector().filter(0, 2, 3) ); assertAllEquals(moreVectors); } @@ -78,58 +92,60 @@ public void testVectorEquality() { public void testBlockEquality() { // all these blocks should be equivalent List blocks = List.of( - new IntArrayVector(new int[] { 1, 2, 3 }, 3).asBlock(), + new IntArrayVector(new int[] { 1, 2, 3 }, 3, blockFactory).asBlock(), new IntArrayBlock( new int[] { 1, 2, 3 }, 3, new int[] { 0, 1, 2, 3 }, BitSet.valueOf(new byte[] { 0b000 }), - randomFrom(Block.MvOrdering.values()) + randomFrom(Block.MvOrdering.values()), + blockFactory ), new IntArrayBlock( new int[] { 1, 2, 3, 4 }, 3, new int[] { 0, 1, 2, 3 }, BitSet.valueOf(new byte[] { 0b1000 }), - randomFrom(Block.MvOrdering.values()) + randomFrom(Block.MvOrdering.values()), + blockFactory ), - new IntArrayVector(new int[] { 1, 2, 3 }, 3).filter(0, 1, 2).asBlock(), - new IntArrayVector(new int[] { 1, 2, 3, 4 }, 3).filter(0, 1, 2).asBlock(), - new IntArrayVector(new int[] { 1, 2, 3, 4 }, 4).filter(0, 1, 2).asBlock(), - new IntArrayVector(new int[] { 1, 2, 4, 3 }, 4).filter(0, 1, 3).asBlock(), - IntBlock.newBlockBuilder(3).appendInt(1).appendInt(2).appendInt(3).build(), - IntBlock.newBlockBuilder(3).appendInt(1).appendInt(2).appendInt(3).build().filter(0, 1, 2), - IntBlock.newBlockBuilder(3).appendInt(1).appendInt(4).appendInt(2).appendInt(3).build().filter(0, 2, 3), - IntBlock.newBlockBuilder(3).appendInt(1).appendNull().appendInt(2).appendInt(3).build().filter(0, 2, 3) + new IntArrayVector(new int[] { 1, 2, 3 }, 3, blockFactory).filter(0, 1, 2).asBlock(), + new IntArrayVector(new int[] { 1, 2, 3, 4 }, 3, blockFactory).filter(0, 1, 2).asBlock(), + new IntArrayVector(new int[] { 1, 2, 3, 4 }, 4, blockFactory).filter(0, 1, 2).asBlock(), + new IntArrayVector(new int[] { 1, 2, 4, 3 }, 4, blockFactory).filter(0, 1, 3).asBlock(), + blockFactory.newIntBlockBuilder(3).appendInt(1).appendInt(2).appendInt(3).build(), + blockFactory.newIntBlockBuilder(3).appendInt(1).appendInt(2).appendInt(3).build().filter(0, 1, 2), + blockFactory.newIntBlockBuilder(3).appendInt(1).appendInt(4).appendInt(2).appendInt(3).build().filter(0, 2, 3), + blockFactory.newIntBlockBuilder(3).appendInt(1).appendNull().appendInt(2).appendInt(3).build().filter(0, 2, 3) ); assertAllEquals(blocks); // all these constant-like blocks should be equivalent List moreBlocks = List.of( - new IntArrayVector(new int[] { 9, 9 }, 2).asBlock(), - new IntArrayBlock( + blockFactory.newIntArrayVector(new int[] { 9, 9 }, 2).asBlock(), + blockFactory.newIntArrayBlock( new int[] { 9, 9 }, 2, new int[] { 0, 1, 2 }, BitSet.valueOf(new byte[] { 0b000 }), randomFrom(Block.MvOrdering.values()) ), - new IntArrayBlock( + blockFactory.newIntArrayBlock( new int[] { 9, 9, 4 }, 2, new int[] { 0, 1, 2 }, BitSet.valueOf(new byte[] { 0b100 }), randomFrom(Block.MvOrdering.values()) ), - new IntArrayVector(new int[] { 9, 9 }, 2).filter(0, 1).asBlock(), - new IntArrayVector(new int[] { 9, 9, 4 }, 2).filter(0, 1).asBlock(), - new IntArrayVector(new int[] { 9, 9, 4 }, 3).filter(0, 1).asBlock(), - new IntArrayVector(new int[] { 9, 4, 9 }, 3).filter(0, 2).asBlock(), - IntBlock.newConstantBlockWith(9, 2), - IntBlock.newBlockBuilder(2).appendInt(9).appendInt(9).build(), - IntBlock.newBlockBuilder(2).appendInt(9).appendInt(9).build().filter(0, 1), - IntBlock.newBlockBuilder(2).appendInt(9).appendInt(4).appendInt(9).build().filter(0, 2), - IntBlock.newBlockBuilder(2).appendInt(9).appendNull().appendInt(9).build().filter(0, 2) + blockFactory.newIntArrayVector(new int[] { 9, 9 }, 2).filter(0, 1).asBlock(), + blockFactory.newIntArrayVector(new int[] { 9, 9, 4 }, 2).filter(0, 1).asBlock(), + blockFactory.newIntArrayVector(new int[] { 9, 9, 4 }, 3).filter(0, 1).asBlock(), + blockFactory.newIntArrayVector(new int[] { 9, 4, 9 }, 3).filter(0, 2).asBlock(), + blockFactory.newConstantIntBlockWith(9, 2), + blockFactory.newIntBlockBuilder(2).appendInt(9).appendInt(9).build(), + blockFactory.newIntBlockBuilder(2).appendInt(9).appendInt(9).build().filter(0, 1), + blockFactory.newIntBlockBuilder(2).appendInt(9).appendInt(4).appendInt(9).build().filter(0, 2), + blockFactory.newIntBlockBuilder(2).appendInt(9).appendNull().appendInt(9).build().filter(0, 2) ); assertAllEquals(moreBlocks); } @@ -137,15 +153,15 @@ public void testBlockEquality() { public void testVectorInequality() { // all these vectors should NOT be equivalent List notEqualVectors = List.of( - new IntArrayVector(new int[] { 1 }, 1), - new IntArrayVector(new int[] { 9 }, 1), - new IntArrayVector(new int[] { 1, 2 }, 2), - new IntArrayVector(new int[] { 1, 2, 3 }, 3), - new IntArrayVector(new int[] { 1, 2, 4 }, 3), - IntBlock.newConstantBlockWith(9, 2).asVector(), - IntBlock.newBlockBuilder(2).appendInt(1).appendInt(2).build().asVector().filter(1), - IntBlock.newBlockBuilder(3).appendInt(1).appendInt(2).appendInt(5).build().asVector(), - IntBlock.newBlockBuilder(1).appendInt(1).appendInt(2).appendInt(3).appendInt(4).build().asVector() + blockFactory.newIntArrayVector(new int[] { 1 }, 1), + blockFactory.newIntArrayVector(new int[] { 9 }, 1), + blockFactory.newIntArrayVector(new int[] { 1, 2 }, 2), + blockFactory.newIntArrayVector(new int[] { 1, 2, 3 }, 3), + blockFactory.newIntArrayVector(new int[] { 1, 2, 4 }, 3), + blockFactory.newConstantIntBlockWith(9, 2).asVector(), + blockFactory.newIntBlockBuilder(2).appendInt(1).appendInt(2).build().asVector().filter(1), + blockFactory.newIntBlockBuilder(3).appendInt(1).appendInt(2).appendInt(5).build().asVector(), + blockFactory.newIntBlockBuilder(1).appendInt(1).appendInt(2).appendInt(3).appendInt(4).build().asVector() ); assertAllNotEquals(notEqualVectors); } @@ -153,27 +169,27 @@ public void testVectorInequality() { public void testBlockInequality() { // all these blocks should NOT be equivalent List notEqualBlocks = List.of( - new IntArrayVector(new int[] { 1 }, 1).asBlock(), - new IntArrayVector(new int[] { 9 }, 1).asBlock(), - new IntArrayVector(new int[] { 1, 2 }, 2).asBlock(), - new IntArrayVector(new int[] { 1, 2, 3 }, 3).asBlock(), - new IntArrayVector(new int[] { 1, 2, 4 }, 3).asBlock(), - IntBlock.newConstantBlockWith(9, 2), - IntBlock.newBlockBuilder(2).appendInt(1).appendInt(2).build().filter(1), - IntBlock.newBlockBuilder(3).appendInt(1).appendInt(2).appendInt(5).build(), - IntBlock.newBlockBuilder(1).appendInt(1).appendInt(2).appendInt(3).appendInt(4).build(), - IntBlock.newBlockBuilder(1).appendInt(1).appendNull().build(), - IntBlock.newBlockBuilder(1).appendInt(1).appendNull().appendInt(3).build(), - IntBlock.newBlockBuilder(1).appendInt(1).appendInt(3).build(), - IntBlock.newBlockBuilder(3).appendInt(1).beginPositionEntry().appendInt(2).appendInt(3).build() + blockFactory.newIntArrayVector(new int[] { 1 }, 1).asBlock(), + blockFactory.newIntArrayVector(new int[] { 9 }, 1).asBlock(), + blockFactory.newIntArrayVector(new int[] { 1, 2 }, 2).asBlock(), + blockFactory.newIntArrayVector(new int[] { 1, 2, 3 }, 3).asBlock(), + blockFactory.newIntArrayVector(new int[] { 1, 2, 4 }, 3).asBlock(), + blockFactory.newConstantIntBlockWith(9, 2), + blockFactory.newIntBlockBuilder(2).appendInt(1).appendInt(2).build().filter(1), + blockFactory.newIntBlockBuilder(3).appendInt(1).appendInt(2).appendInt(5).build(), + blockFactory.newIntBlockBuilder(1).appendInt(1).appendInt(2).appendInt(3).appendInt(4).build(), + blockFactory.newIntBlockBuilder(1).appendInt(1).appendNull().build(), + blockFactory.newIntBlockBuilder(1).appendInt(1).appendNull().appendInt(3).build(), + blockFactory.newIntBlockBuilder(1).appendInt(1).appendInt(3).build(), + blockFactory.newIntBlockBuilder(3).appendInt(1).beginPositionEntry().appendInt(2).appendInt(3).build() ); assertAllNotEquals(notEqualBlocks); } public void testSimpleBlockWithSingleNull() { List blocks = List.of( - IntBlock.newBlockBuilder(1).appendInt(1).appendNull().appendInt(3).build(), - IntBlock.newBlockBuilder(1).appendInt(1).appendNull().appendInt(3).build() + blockFactory.newIntBlockBuilder(1).appendInt(1).appendNull().appendInt(3).build(), + blockFactory.newIntBlockBuilder(1).appendInt(1).appendNull().appendInt(3).build() ); assertEquals(3, blocks.get(0).getPositionCount()); assertTrue(blocks.get(0).isNull(1)); @@ -184,8 +200,8 @@ public void testSimpleBlockWithSingleNull() { public void testSimpleBlockWithManyNulls() { int positions = randomIntBetween(1, 256); boolean grow = randomBoolean(); - IntBlock.Builder builder1 = IntBlock.newBlockBuilder(grow ? 0 : positions); - IntBlock.Builder builder2 = IntBlock.newBlockBuilder(grow ? 0 : positions); + IntBlock.Builder builder1 = blockFactory.newIntBlockBuilder(grow ? 0 : positions); + IntBlock.Builder builder2 = blockFactory.newIntBlockBuilder(grow ? 0 : positions); for (int p = 0; p < positions; p++) { builder1.appendNull(); builder2.appendNull(); @@ -202,8 +218,8 @@ public void testSimpleBlockWithManyNulls() { public void testSimpleBlockWithSingleMultiValue() { List blocks = List.of( - IntBlock.newBlockBuilder(1).beginPositionEntry().appendInt(1).appendInt(2).build(), - IntBlock.newBlockBuilder(1).beginPositionEntry().appendInt(1).appendInt(2).build() + blockFactory.newIntBlockBuilder(1).beginPositionEntry().appendInt(1).appendInt(2).build(), + blockFactory.newIntBlockBuilder(1).beginPositionEntry().appendInt(1).appendInt(2).build() ); assertEquals(1, blocks.get(0).getPositionCount()); assertEquals(2, blocks.get(0).getValueCount(0)); @@ -213,9 +229,9 @@ public void testSimpleBlockWithSingleMultiValue() { public void testSimpleBlockWithManyMultiValues() { int positions = randomIntBetween(1, 256); boolean grow = randomBoolean(); - IntBlock.Builder builder1 = IntBlock.newBlockBuilder(grow ? 0 : positions); - IntBlock.Builder builder2 = IntBlock.newBlockBuilder(grow ? 0 : positions); - IntBlock.Builder builder3 = IntBlock.newBlockBuilder(grow ? 0 : positions); + IntBlock.Builder builder1 = blockFactory.newIntBlockBuilder(grow ? 0 : positions); + IntBlock.Builder builder2 = blockFactory.newIntBlockBuilder(grow ? 0 : positions); + IntBlock.Builder builder3 = blockFactory.newIntBlockBuilder(grow ? 0 : positions); for (int pos = 0; pos < positions; pos++) { builder1.beginPositionEntry(); builder2.beginPositionEntry(); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/LongBlockEqualityTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/LongBlockEqualityTests.java index a24b4a4dd6fa6..3e425439bb800 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/LongBlockEqualityTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/LongBlockEqualityTests.java @@ -7,22 +7,24 @@ package org.elasticsearch.compute.data; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.compute.operator.ComputeTestCase; import java.util.BitSet; import java.util.List; -public class LongBlockEqualityTests extends ESTestCase { +public class LongBlockEqualityTests extends ComputeTestCase { + + static final BlockFactory blockFactory = TestBlockFactory.getNonBreakingInstance(); public void testEmptyVector() { // all these "empty" vectors should be equivalent List vectors = List.of( - new LongArrayVector(new long[] {}, 0), - new LongArrayVector(new long[] { 0 }, 0), - LongBlock.newConstantBlockWith(0, 0).asVector(), - LongBlock.newConstantBlockWith(0, 0).filter().asVector(), - LongBlock.newBlockBuilder(0).build().asVector(), - LongBlock.newBlockBuilder(0).appendLong(1).build().asVector().filter() + blockFactory.newLongArrayVector(new long[] {}, 0), + blockFactory.newLongArrayVector(new long[] { 0 }, 0), + blockFactory.newConstantLongBlockWith(0, 0).asVector(), + blockFactory.newConstantLongBlockWith(0, 0).filter().asVector(), + blockFactory.newLongBlockBuilder(0).build().asVector(), + blockFactory.newLongBlockBuilder(0).appendLong(1).build().asVector().filter() ); assertAllEquals(vectors); } @@ -30,18 +32,24 @@ public void testEmptyVector() { public void testEmptyBlock() { // all these "empty" vectors should be equivalent List blocks = List.of( - new LongArrayBlock(new long[] {}, 0, new int[] {}, BitSet.valueOf(new byte[] { 0b00 }), randomFrom(Block.MvOrdering.values())), - new LongArrayBlock( + blockFactory.newLongArrayBlock( + new long[] {}, + 0, + new int[] { 0 }, + BitSet.valueOf(new byte[] { 0b00 }), + randomFrom(Block.MvOrdering.values()) + ), + blockFactory.newLongArrayBlock( new long[] { 0 }, 0, - new int[] {}, + new int[] { 0 }, BitSet.valueOf(new byte[] { 0b00 }), randomFrom(Block.MvOrdering.values()) ), - LongBlock.newConstantBlockWith(0, 0), - LongBlock.newBlockBuilder(0).build(), - LongBlock.newBlockBuilder(0).appendLong(1).build().filter(), - LongBlock.newBlockBuilder(0).appendNull().build().filter() + blockFactory.newConstantLongBlockWith(0, 0), + blockFactory.newLongBlockBuilder(0).build(), + blockFactory.newLongBlockBuilder(0).appendLong(1).build().filter(), + blockFactory.newLongBlockBuilder(0).appendNull().build().filter() ); assertAllEquals(blocks); } @@ -49,34 +57,34 @@ public void testEmptyBlock() { public void testVectorEquality() { // all these vectors should be equivalent List vectors = List.of( - new LongArrayVector(new long[] { 1, 2, 3 }, 3), - new LongArrayVector(new long[] { 1, 2, 3 }, 3).asBlock().asVector(), - new LongArrayVector(new long[] { 1, 2, 3, 4 }, 3), - new LongArrayVector(new long[] { 1, 2, 3 }, 3).filter(0, 1, 2), - new LongArrayVector(new long[] { 1, 2, 3, 4 }, 4).filter(0, 1, 2), - new LongArrayVector(new long[] { 0, 1, 2, 3 }, 4).filter(1, 2, 3), - new LongArrayVector(new long[] { 1, 4, 2, 3 }, 4).filter(0, 2, 3), - LongBlock.newBlockBuilder(3).appendLong(1).appendLong(2).appendLong(3).build().asVector(), - LongBlock.newBlockBuilder(3).appendLong(1).appendLong(2).appendLong(3).build().asVector().filter(0, 1, 2), - LongBlock.newBlockBuilder(3).appendLong(1).appendLong(4).appendLong(2).appendLong(3).build().filter(0, 2, 3).asVector(), - LongBlock.newBlockBuilder(3).appendLong(1).appendLong(4).appendLong(2).appendLong(3).build().asVector().filter(0, 2, 3) + blockFactory.newLongArrayVector(new long[] { 1, 2, 3 }, 3), + blockFactory.newLongArrayVector(new long[] { 1, 2, 3 }, 3).asBlock().asVector(), + blockFactory.newLongArrayVector(new long[] { 1, 2, 3, 4 }, 3), + blockFactory.newLongArrayVector(new long[] { 1, 2, 3 }, 3).filter(0, 1, 2), + blockFactory.newLongArrayVector(new long[] { 1, 2, 3, 4 }, 4).filter(0, 1, 2), + blockFactory.newLongArrayVector(new long[] { 0, 1, 2, 3 }, 4).filter(1, 2, 3), + blockFactory.newLongArrayVector(new long[] { 1, 4, 2, 3 }, 4).filter(0, 2, 3), + blockFactory.newLongBlockBuilder(3).appendLong(1).appendLong(2).appendLong(3).build().asVector(), + blockFactory.newLongBlockBuilder(3).appendLong(1).appendLong(2).appendLong(3).build().asVector().filter(0, 1, 2), + blockFactory.newLongBlockBuilder(3).appendLong(1).appendLong(4).appendLong(2).appendLong(3).build().filter(0, 2, 3).asVector(), + blockFactory.newLongBlockBuilder(3).appendLong(1).appendLong(4).appendLong(2).appendLong(3).build().asVector().filter(0, 2, 3) ); assertAllEquals(vectors); // all these constant-like vectors should be equivalent List moreVectors = List.of( - new LongArrayVector(new long[] { 1, 1, 1 }, 3), - new LongArrayVector(new long[] { 1, 1, 1 }, 3).asBlock().asVector(), - new LongArrayVector(new long[] { 1, 1, 1, 1 }, 3), - new LongArrayVector(new long[] { 1, 1, 1 }, 3).filter(0, 1, 2), - new LongArrayVector(new long[] { 1, 1, 1, 4 }, 4).filter(0, 1, 2), - new LongArrayVector(new long[] { 3, 1, 1, 1 }, 4).filter(1, 2, 3), - new LongArrayVector(new long[] { 1, 4, 1, 1 }, 4).filter(0, 2, 3), - LongBlock.newConstantBlockWith(1, 3).asVector(), - LongBlock.newBlockBuilder(3).appendLong(1).appendLong(1).appendLong(1).build().asVector(), - LongBlock.newBlockBuilder(3).appendLong(1).appendLong(1).appendLong(1).build().asVector().filter(0, 1, 2), - LongBlock.newBlockBuilder(3).appendLong(1).appendLong(4).appendLong(1).appendLong(1).build().filter(0, 2, 3).asVector(), - LongBlock.newBlockBuilder(3).appendLong(1).appendLong(4).appendLong(1).appendLong(1).build().asVector().filter(0, 2, 3) + blockFactory.newLongArrayVector(new long[] { 1, 1, 1 }, 3), + blockFactory.newLongArrayVector(new long[] { 1, 1, 1 }, 3).asBlock().asVector(), + blockFactory.newLongArrayVector(new long[] { 1, 1, 1, 1 }, 3), + blockFactory.newLongArrayVector(new long[] { 1, 1, 1 }, 3).filter(0, 1, 2), + blockFactory.newLongArrayVector(new long[] { 1, 1, 1, 4 }, 4).filter(0, 1, 2), + blockFactory.newLongArrayVector(new long[] { 3, 1, 1, 1 }, 4).filter(1, 2, 3), + blockFactory.newLongArrayVector(new long[] { 1, 4, 1, 1 }, 4).filter(0, 2, 3), + blockFactory.newConstantLongBlockWith(1, 3).asVector(), + blockFactory.newLongBlockBuilder(3).appendLong(1).appendLong(1).appendLong(1).build().asVector(), + blockFactory.newLongBlockBuilder(3).appendLong(1).appendLong(1).appendLong(1).build().asVector().filter(0, 1, 2), + blockFactory.newLongBlockBuilder(3).appendLong(1).appendLong(4).appendLong(1).appendLong(1).build().filter(0, 2, 3).asVector(), + blockFactory.newLongBlockBuilder(3).appendLong(1).appendLong(4).appendLong(1).appendLong(1).build().asVector().filter(0, 2, 3) ); assertAllEquals(moreVectors); } @@ -84,58 +92,58 @@ public void testVectorEquality() { public void testBlockEquality() { // all these blocks should be equivalent List blocks = List.of( - new LongArrayVector(new long[] { 1, 2, 3 }, 3).asBlock(), - new LongArrayBlock( + blockFactory.newLongArrayVector(new long[] { 1, 2, 3 }, 3).asBlock(), + blockFactory.newLongArrayBlock( new long[] { 1, 2, 3 }, 3, new int[] { 0, 1, 2, 3 }, BitSet.valueOf(new byte[] { 0b000 }), randomFrom(Block.MvOrdering.values()) ), - new LongArrayBlock( + blockFactory.newLongArrayBlock( new long[] { 1, 2, 3, 4 }, 3, new int[] { 0, 1, 2, 3 }, BitSet.valueOf(new byte[] { 0b1000 }), randomFrom(Block.MvOrdering.values()) ), - new LongArrayVector(new long[] { 1, 2, 3 }, 3).filter(0, 1, 2).asBlock(), - new LongArrayVector(new long[] { 1, 2, 3, 4 }, 3).filter(0, 1, 2).asBlock(), - new LongArrayVector(new long[] { 1, 2, 3, 4 }, 4).filter(0, 1, 2).asBlock(), - new LongArrayVector(new long[] { 1, 2, 4, 3 }, 4).filter(0, 1, 3).asBlock(), - LongBlock.newBlockBuilder(3).appendLong(1).appendLong(2).appendLong(3).build(), - LongBlock.newBlockBuilder(3).appendLong(1).appendLong(2).appendLong(3).build().filter(0, 1, 2), - LongBlock.newBlockBuilder(3).appendLong(1).appendLong(4).appendLong(2).appendLong(3).build().filter(0, 2, 3), - LongBlock.newBlockBuilder(3).appendLong(1).appendNull().appendLong(2).appendLong(3).build().filter(0, 2, 3) + blockFactory.newLongArrayVector(new long[] { 1, 2, 3 }, 3).filter(0, 1, 2).asBlock(), + blockFactory.newLongArrayVector(new long[] { 1, 2, 3, 4 }, 3).filter(0, 1, 2).asBlock(), + blockFactory.newLongArrayVector(new long[] { 1, 2, 3, 4 }, 4).filter(0, 1, 2).asBlock(), + blockFactory.newLongArrayVector(new long[] { 1, 2, 4, 3 }, 4).filter(0, 1, 3).asBlock(), + blockFactory.newLongBlockBuilder(3).appendLong(1).appendLong(2).appendLong(3).build(), + blockFactory.newLongBlockBuilder(3).appendLong(1).appendLong(2).appendLong(3).build().filter(0, 1, 2), + blockFactory.newLongBlockBuilder(3).appendLong(1).appendLong(4).appendLong(2).appendLong(3).build().filter(0, 2, 3), + blockFactory.newLongBlockBuilder(3).appendLong(1).appendNull().appendLong(2).appendLong(3).build().filter(0, 2, 3) ); assertAllEquals(blocks); // all these constant-like blocks should be equivalent List moreBlocks = List.of( - new LongArrayVector(new long[] { 9, 9 }, 2).asBlock(), - new LongArrayBlock( + blockFactory.newLongArrayVector(new long[] { 9, 9 }, 2).asBlock(), + blockFactory.newLongArrayBlock( new long[] { 9, 9 }, 2, new int[] { 0, 1, 2 }, BitSet.valueOf(new byte[] { 0b000 }), randomFrom(Block.MvOrdering.values()) ), - new LongArrayBlock( + blockFactory.newLongArrayBlock( new long[] { 9, 9, 4 }, 2, new int[] { 0, 1, 2 }, BitSet.valueOf(new byte[] { 0b100 }), randomFrom(Block.MvOrdering.values()) ), - new LongArrayVector(new long[] { 9, 9 }, 2).filter(0, 1).asBlock(), - new LongArrayVector(new long[] { 9, 9, 4 }, 2).filter(0, 1).asBlock(), - new LongArrayVector(new long[] { 9, 9, 4 }, 3).filter(0, 1).asBlock(), - new LongArrayVector(new long[] { 9, 4, 9 }, 3).filter(0, 2).asBlock(), - LongBlock.newConstantBlockWith(9, 2), - LongBlock.newBlockBuilder(2).appendLong(9).appendLong(9).build(), - LongBlock.newBlockBuilder(2).appendLong(9).appendLong(9).build().filter(0, 1), - LongBlock.newBlockBuilder(2).appendLong(9).appendLong(4).appendLong(9).build().filter(0, 2), - LongBlock.newBlockBuilder(2).appendLong(9).appendNull().appendLong(9).build().filter(0, 2) + blockFactory.newLongArrayVector(new long[] { 9, 9 }, 2).filter(0, 1).asBlock(), + blockFactory.newLongArrayVector(new long[] { 9, 9, 4 }, 2).filter(0, 1).asBlock(), + blockFactory.newLongArrayVector(new long[] { 9, 9, 4 }, 3).filter(0, 1).asBlock(), + blockFactory.newLongArrayVector(new long[] { 9, 4, 9 }, 3).filter(0, 2).asBlock(), + blockFactory.newConstantLongBlockWith(9, 2), + blockFactory.newLongBlockBuilder(2).appendLong(9).appendLong(9).build(), + blockFactory.newLongBlockBuilder(2).appendLong(9).appendLong(9).build().filter(0, 1), + blockFactory.newLongBlockBuilder(2).appendLong(9).appendLong(4).appendLong(9).build().filter(0, 2), + blockFactory.newLongBlockBuilder(2).appendLong(9).appendNull().appendLong(9).build().filter(0, 2) ); assertAllEquals(moreBlocks); } @@ -143,15 +151,15 @@ public void testBlockEquality() { public void testVectorInequality() { // all these vectors should NOT be equivalent List notEqualVectors = List.of( - new LongArrayVector(new long[] { 1 }, 1), - new LongArrayVector(new long[] { 9 }, 1), - new LongArrayVector(new long[] { 1, 2 }, 2), - new LongArrayVector(new long[] { 1, 2, 3 }, 3), - new LongArrayVector(new long[] { 1, 2, 4 }, 3), - LongBlock.newConstantBlockWith(9, 2).asVector(), - LongBlock.newBlockBuilder(2).appendLong(1).appendLong(2).build().asVector().filter(1), - LongBlock.newBlockBuilder(3).appendLong(1).appendLong(2).appendLong(5).build().asVector(), - LongBlock.newBlockBuilder(1).appendLong(1).appendLong(2).appendLong(3).appendLong(4).build().asVector() + blockFactory.newLongArrayVector(new long[] { 1 }, 1), + blockFactory.newLongArrayVector(new long[] { 9 }, 1), + blockFactory.newLongArrayVector(new long[] { 1, 2 }, 2), + blockFactory.newLongArrayVector(new long[] { 1, 2, 3 }, 3), + blockFactory.newLongArrayVector(new long[] { 1, 2, 4 }, 3), + blockFactory.newConstantLongBlockWith(9, 2).asVector(), + blockFactory.newLongBlockBuilder(2).appendLong(1).appendLong(2).build().asVector().filter(1), + blockFactory.newLongBlockBuilder(3).appendLong(1).appendLong(2).appendLong(5).build().asVector(), + blockFactory.newLongBlockBuilder(1).appendLong(1).appendLong(2).appendLong(3).appendLong(4).build().asVector() ); assertAllNotEquals(notEqualVectors); } @@ -159,27 +167,27 @@ public void testVectorInequality() { public void testBlockInequality() { // all these blocks should NOT be equivalent List notEqualBlocks = List.of( - new LongArrayVector(new long[] { 1 }, 1).asBlock(), - new LongArrayVector(new long[] { 9 }, 1).asBlock(), - new LongArrayVector(new long[] { 1, 2 }, 2).asBlock(), - new LongArrayVector(new long[] { 1, 2, 3 }, 3).asBlock(), - new LongArrayVector(new long[] { 1, 2, 4 }, 3).asBlock(), - LongBlock.newConstantBlockWith(9, 2), - LongBlock.newBlockBuilder(2).appendLong(1).appendLong(2).build().filter(1), - LongBlock.newBlockBuilder(3).appendLong(1).appendLong(2).appendLong(5).build(), - LongBlock.newBlockBuilder(1).appendLong(1).appendLong(2).appendLong(3).appendLong(4).build(), - LongBlock.newBlockBuilder(1).appendLong(1).appendNull().build(), - LongBlock.newBlockBuilder(1).appendLong(1).appendNull().appendLong(3).build(), - LongBlock.newBlockBuilder(1).appendLong(1).appendLong(3).build(), - LongBlock.newBlockBuilder(3).appendLong(1).beginPositionEntry().appendLong(2).appendLong(3).build() + blockFactory.newLongArrayVector(new long[] { 1 }, 1).asBlock(), + blockFactory.newLongArrayVector(new long[] { 9 }, 1).asBlock(), + blockFactory.newLongArrayVector(new long[] { 1, 2 }, 2).asBlock(), + blockFactory.newLongArrayVector(new long[] { 1, 2, 3 }, 3).asBlock(), + blockFactory.newLongArrayVector(new long[] { 1, 2, 4 }, 3).asBlock(), + blockFactory.newConstantLongBlockWith(9, 2), + blockFactory.newLongBlockBuilder(2).appendLong(1).appendLong(2).build().filter(1), + blockFactory.newLongBlockBuilder(3).appendLong(1).appendLong(2).appendLong(5).build(), + blockFactory.newLongBlockBuilder(1).appendLong(1).appendLong(2).appendLong(3).appendLong(4).build(), + blockFactory.newLongBlockBuilder(1).appendLong(1).appendNull().build(), + blockFactory.newLongBlockBuilder(1).appendLong(1).appendNull().appendLong(3).build(), + blockFactory.newLongBlockBuilder(1).appendLong(1).appendLong(3).build(), + blockFactory.newLongBlockBuilder(3).appendLong(1).beginPositionEntry().appendLong(2).appendLong(3).build() ); assertAllNotEquals(notEqualBlocks); } public void testSimpleBlockWithSingleNull() { List blocks = List.of( - LongBlock.newBlockBuilder(1).appendLong(1).appendNull().appendLong(3).build(), - LongBlock.newBlockBuilder(1).appendLong(1).appendNull().appendLong(3).build() + blockFactory.newLongBlockBuilder(1).appendLong(1).appendNull().appendLong(3).build(), + blockFactory.newLongBlockBuilder(1).appendLong(1).appendNull().appendLong(3).build() ); assertEquals(3, blocks.get(0).getPositionCount()); assertTrue(blocks.get(0).isNull(1)); @@ -190,8 +198,8 @@ public void testSimpleBlockWithSingleNull() { public void testSimpleBlockWithManyNulls() { int positions = randomIntBetween(1, 256); boolean grow = randomBoolean(); - LongBlock.Builder builder1 = LongBlock.newBlockBuilder(grow ? 0 : positions); - LongBlock.Builder builder2 = LongBlock.newBlockBuilder(grow ? 0 : positions); + LongBlock.Builder builder1 = blockFactory.newLongBlockBuilder(grow ? 0 : positions); + LongBlock.Builder builder2 = blockFactory.newLongBlockBuilder(grow ? 0 : positions); for (int p = 0; p < positions; p++) { builder1.appendNull(); builder2.appendNull(); @@ -208,8 +216,8 @@ public void testSimpleBlockWithManyNulls() { public void testSimpleBlockWithSingleMultiValue() { List blocks = List.of( - LongBlock.newBlockBuilder(1).beginPositionEntry().appendLong(1).appendLong(2).build(), - LongBlock.newBlockBuilder(1).beginPositionEntry().appendLong(1).appendLong(2).build() + blockFactory.newLongBlockBuilder(1).beginPositionEntry().appendLong(1).appendLong(2).build(), + blockFactory.newLongBlockBuilder(1).beginPositionEntry().appendLong(1).appendLong(2).build() ); assertEquals(1, blocks.get(0).getPositionCount()); assertEquals(2, blocks.get(0).getValueCount(0)); @@ -219,9 +227,9 @@ public void testSimpleBlockWithSingleMultiValue() { public void testSimpleBlockWithManyMultiValues() { int positions = randomIntBetween(1, 256); boolean grow = randomBoolean(); - LongBlock.Builder builder1 = LongBlock.newBlockBuilder(grow ? 0 : positions); - LongBlock.Builder builder2 = LongBlock.newBlockBuilder(grow ? 0 : positions); - LongBlock.Builder builder3 = LongBlock.newBlockBuilder(grow ? 0 : positions); + LongBlock.Builder builder1 = blockFactory.newLongBlockBuilder(grow ? 0 : positions); + LongBlock.Builder builder2 = blockFactory.newLongBlockBuilder(grow ? 0 : positions); + LongBlock.Builder builder3 = blockFactory.newLongBlockBuilder(grow ? 0 : positions); for (int pos = 0; pos < positions; pos++) { builder1.beginPositionEntry(); builder2.beginPositionEntry(); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/MockBlockFactory.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/MockBlockFactory.java index 35623b93357df..dc2aef4bca0eb 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/MockBlockFactory.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/MockBlockFactory.java @@ -10,6 +10,7 @@ import org.apache.lucene.tests.util.LuceneTestCase; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.BytesRefArray; import org.elasticsearch.compute.data.Block.MvOrdering; @@ -31,7 +32,9 @@ public class MockBlockFactory extends BlockFactory { static final boolean TRACK_ALLOCATIONS = true; static Object trackDetail() { - return TRACK_ALLOCATIONS ? new RuntimeException("Block allocated from test: " + LuceneTestCase.getTestClass().getName()) : true; + return TRACK_ALLOCATIONS + ? new RuntimeException("Releasable allocated from test: " + LuceneTestCase.getTestClass().getName()) + : true; } final ConcurrentMap TRACKED_BLOCKS = new ConcurrentHashMap<>(); @@ -49,7 +52,7 @@ public void ensureAllBlocksAreReleased() { Iterator causes = copy.values().iterator(); Object firstCause = causes.next(); RuntimeException exception = new RuntimeException( - copy.size() + " blocks have not been released", + copy.size() + " releasables have not been released", firstCause instanceof Throwable ? (Throwable) firstCause : null ); while (causes.hasNext()) { @@ -63,11 +66,15 @@ public void ensureAllBlocksAreReleased() { } public MockBlockFactory(CircuitBreaker breaker, BigArrays bigArrays) { - this(breaker, bigArrays, null); + this(breaker, bigArrays, BlockFactory.DEFAULT_MAX_BLOCK_PRIMITIVE_ARRAY_SIZE); } - protected MockBlockFactory(CircuitBreaker breaker, BigArrays bigArrays, BlockFactory parent) { - super(breaker, bigArrays, parent); + public MockBlockFactory(CircuitBreaker breaker, BigArrays bigArrays, ByteSizeValue maxPrimitiveArraySize) { + this(breaker, bigArrays, maxPrimitiveArraySize, null); + } + + private MockBlockFactory(CircuitBreaker breaker, BigArrays bigArrays, ByteSizeValue maxPrimitiveArraySize, BlockFactory parent) { + super(breaker, bigArrays, maxPrimitiveArraySize, parent); } @Override @@ -75,13 +82,13 @@ public BlockFactory newChildFactory(LocalCircuitBreaker childBreaker) { if (childBreaker.parentBreaker() != breaker()) { throw new IllegalStateException("Different parent breaker"); } - return new MockBlockFactory(childBreaker, bigArrays(), this); + return new MockBlockFactory(childBreaker, bigArrays(), ByteSizeValue.ofBytes(maxPrimitiveArrayBytes()), this); } @Override - void adjustBreaker(final long delta, final boolean isDataAlreadyCreated) { + void adjustBreaker(final long delta) { purgeTrackBlocks(); - super.adjustBreaker(delta, isDataAlreadyCreated); + super.adjustBreaker(delta); } void purgeTrackBlocks() { @@ -116,7 +123,7 @@ void purgeTrackBlocks() { TRACKED_BLOCKS.remove(vecBuilder); } } else if (b instanceof Vector vector) { - if (vector.asBlock().isReleased()) { + if (vector.isReleased()) { TRACKED_BLOCKS.remove(vector); } } else { diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/MultiValueBlockTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/MultiValueBlockTests.java index f067999a04ff1..d3572377912ac 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/MultiValueBlockTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/MultiValueBlockTests.java @@ -23,7 +23,7 @@ public class MultiValueBlockTests extends SerializationTestCase { public void testIntBlockTrivial1() { - var blockBuilder = IntBlock.newBlockBuilder(4); + var blockBuilder = blockFactory.newIntBlockBuilder(4); blockBuilder.appendInt(10); blockBuilder.beginPositionEntry(); blockBuilder.appendInt(21); @@ -54,10 +54,11 @@ public void testIntBlockTrivial1() { // cannot get a Vector view assertNull(block.asVector()); EqualsHashCodeTestUtils.checkEqualsAndHashCode(block, this::serializeDeserializeBlock, null, Releasable::close); + block.close(); } public void testIntBlockTrivial() { - var blockBuilder = IntBlock.newBlockBuilder(10); + var blockBuilder = blockFactory.newIntBlockBuilder(10); blockBuilder.appendInt(1); blockBuilder.beginPositionEntry(); blockBuilder.appendInt(21); @@ -79,57 +80,66 @@ public void testIntBlockTrivial() { assertThat(block.getInt(block.getFirstValueIndex(0)), is(1)); assertNull(block.asVector()); EqualsHashCodeTestUtils.checkEqualsAndHashCode(block, this::serializeDeserializeBlock, null, Releasable::close); + block.close(); } public void testEmpty() { for (int initialSize : new int[] { 0, 10, 100, randomInt(512) }) { - IntBlock intBlock = IntBlock.newBlockBuilder(initialSize).build(); + IntBlock intBlock = blockFactory.newIntBlockBuilder(initialSize).build(); assertThat(intBlock.getPositionCount(), is(0)); assertThat(intBlock.asVector(), is(notNullValue())); EqualsHashCodeTestUtils.checkEqualsAndHashCode(intBlock, this::serializeDeserializeBlock, null, Releasable::close); + intBlock.close(); - LongBlock longBlock = LongBlock.newBlockBuilder(initialSize).build(); + LongBlock longBlock = blockFactory.newLongBlockBuilder(initialSize).build(); assertThat(longBlock.getPositionCount(), is(0)); assertThat(longBlock.asVector(), is(notNullValue())); EqualsHashCodeTestUtils.checkEqualsAndHashCode(longBlock, this::serializeDeserializeBlock, null, Releasable::close); + longBlock.close(); - DoubleBlock doubleBlock = DoubleBlock.newBlockBuilder(initialSize).build(); + DoubleBlock doubleBlock = blockFactory.newDoubleBlockBuilder(initialSize).build(); assertThat(doubleBlock.getPositionCount(), is(0)); assertThat(doubleBlock.asVector(), is(notNullValue())); EqualsHashCodeTestUtils.checkEqualsAndHashCode(doubleBlock, this::serializeDeserializeBlock, null, Releasable::close); + doubleBlock.close(); - BytesRefBlock bytesRefBlock = BytesRefBlock.newBlockBuilder(initialSize).build(); + BytesRefBlock bytesRefBlock = blockFactory.newBytesRefBlockBuilder(initialSize).build(); assertThat(bytesRefBlock.getPositionCount(), is(0)); assertThat(bytesRefBlock.asVector(), is(notNullValue())); EqualsHashCodeTestUtils.checkEqualsAndHashCode(bytesRefBlock, this::serializeDeserializeBlock, null, Releasable::close); + bytesRefBlock.close(); } } public void testNullOnly() throws IOException { for (int initialSize : new int[] { 0, 10, 100, randomInt(512) }) { - IntBlock intBlock = IntBlock.newBlockBuilder(initialSize).appendNull().build(); + IntBlock intBlock = blockFactory.newIntBlockBuilder(initialSize).appendNull().build(); assertThat(intBlock.getPositionCount(), is(1)); assertThat(intBlock.getValueCount(0), is(0)); assertNull(intBlock.asVector()); EqualsHashCodeTestUtils.checkEqualsAndHashCode(intBlock, this::serializeDeserializeBlock, null, Releasable::close); + intBlock.close(); - LongBlock longBlock = LongBlock.newBlockBuilder(initialSize).appendNull().build(); + LongBlock longBlock = blockFactory.newLongBlockBuilder(initialSize).appendNull().build(); assertThat(longBlock.getPositionCount(), is(1)); assertThat(longBlock.getValueCount(0), is(0)); assertNull(longBlock.asVector()); EqualsHashCodeTestUtils.checkEqualsAndHashCode(longBlock, this::serializeDeserializeBlock, null, Releasable::close); + longBlock.close(); - DoubleBlock doubleBlock = DoubleBlock.newBlockBuilder(initialSize).appendNull().build(); + DoubleBlock doubleBlock = blockFactory.newDoubleBlockBuilder(initialSize).appendNull().build(); assertThat(doubleBlock.getPositionCount(), is(1)); assertThat(doubleBlock.getValueCount(0), is(0)); assertNull(doubleBlock.asVector()); EqualsHashCodeTestUtils.checkEqualsAndHashCode(doubleBlock, this::serializeDeserializeBlock, null, Releasable::close); + doubleBlock.close(); - BytesRefBlock bytesRefBlock = BytesRefBlock.newBlockBuilder(initialSize).appendNull().build(); + BytesRefBlock bytesRefBlock = blockFactory.newBytesRefBlockBuilder(initialSize).appendNull().build(); assertThat(bytesRefBlock.getPositionCount(), is(1)); assertThat(bytesRefBlock.getValueCount(0), is(0)); assertNull(bytesRefBlock.asVector()); EqualsHashCodeTestUtils.checkEqualsAndHashCode(bytesRefBlock, this::serializeDeserializeBlock, null, Releasable::close); + bytesRefBlock.close(); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/SerializationTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/SerializationTestCase.java index b0666e89cf79e..7d3e00845284a 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/SerializationTestCase.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/SerializationTestCase.java @@ -28,7 +28,7 @@ public abstract class SerializationTestCase extends ESTestCase { BigArrays bigArrays; - private BlockFactory blockFactory; + protected BlockFactory blockFactory; NamedWriteableRegistry registry = new NamedWriteableRegistry(Block.getNamedWriteables()); @Before diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/TestBlockBuilder.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/TestBlockBuilder.java index d9377a490368d..a2b074c1403a0 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/TestBlockBuilder.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/TestBlockBuilder.java @@ -29,7 +29,7 @@ public abstract class TestBlockBuilder implements Block.Builder { public abstract TestBlockBuilder endPositionEntry(); public static Block blockFromValues(List> blockValues, ElementType elementType) { - TestBlockBuilder builder = builderOf(elementType); + TestBlockBuilder builder = builderOf(TestBlockFactory.getNonBreakingInstance(), elementType); for (List rowValues : blockValues) { if (rowValues.isEmpty()) { builder.appendNull(); @@ -47,7 +47,7 @@ public static Block blockFromValues(List> blockValues, ElementType // Builds a block of single values. Each value can be null or non-null. // Differs from blockFromValues, as it does not use begin/endPositionEntry public static Block blockFromSingleValues(List blockValues, ElementType elementType) { - TestBlockBuilder builder = builderOf(elementType); + TestBlockBuilder builder = builderOf(TestBlockFactory.getNonBreakingInstance(), elementType); for (Object rowValue : blockValues) { if (rowValue == null) { builder.appendNull(); @@ -58,39 +58,23 @@ public static Block blockFromSingleValues(List blockValues, ElementType return builder.build(); } - static TestBlockBuilder builderOf(ElementType type) { + static TestBlockBuilder builderOf(BlockFactory blockFactory, ElementType type) { return switch (type) { - case INT -> new TestIntBlockBuilder(0); - case LONG -> new TestLongBlockBuilder(0); - case DOUBLE -> new TestDoubleBlockBuilder(0); - case BYTES_REF -> new TestBytesRefBlockBuilder(0); - case BOOLEAN -> new TestBooleanBlockBuilder(0); + case INT -> new TestIntBlockBuilder(blockFactory, 0); + case LONG -> new TestLongBlockBuilder(blockFactory, 0); + case DOUBLE -> new TestDoubleBlockBuilder(blockFactory, 0); + case BYTES_REF -> new TestBytesRefBlockBuilder(blockFactory, 0); + case BOOLEAN -> new TestBooleanBlockBuilder(blockFactory, 0); default -> throw new AssertionError(type); }; } - static TestBlockBuilder ofInt(int estimatedSize) { - return new TestIntBlockBuilder(estimatedSize); - } - - static TestBlockBuilder ofLong(int estimatedSize) { - return new TestLongBlockBuilder(estimatedSize); - } - - static TestBlockBuilder ofDouble(int estimatedSize) { - return new TestDoubleBlockBuilder(estimatedSize); - } - - static TestBlockBuilder ofBytesRef(int estimatedSize) { - return new TestBytesRefBlockBuilder(estimatedSize); - } - private static class TestIntBlockBuilder extends TestBlockBuilder { private final IntBlock.Builder builder; - TestIntBlockBuilder(int estimatedSize) { - builder = IntBlock.newBlockBuilder(estimatedSize); + TestIntBlockBuilder(BlockFactory blockFactory, int estimatedSize) { + builder = blockFactory.newIntBlockBuilder(estimatedSize); } @Override @@ -150,8 +134,8 @@ private static class TestLongBlockBuilder extends TestBlockBuilder { private final LongBlock.Builder builder; - TestLongBlockBuilder(int estimatedSize) { - builder = LongBlock.newBlockBuilder(estimatedSize); + TestLongBlockBuilder(BlockFactory blockFactory, int estimatedSize) { + builder = blockFactory.newLongBlockBuilder(estimatedSize); } @Override @@ -211,8 +195,8 @@ private static class TestDoubleBlockBuilder extends TestBlockBuilder { private final DoubleBlock.Builder builder; - TestDoubleBlockBuilder(int estimatedSize) { - builder = DoubleBlock.newBlockBuilder(estimatedSize); + TestDoubleBlockBuilder(BlockFactory blockFactory, int estimatedSize) { + builder = blockFactory.newDoubleBlockBuilder(estimatedSize); } @Override @@ -272,8 +256,8 @@ private static class TestBytesRefBlockBuilder extends TestBlockBuilder { private final BytesRefBlock.Builder builder; - TestBytesRefBlockBuilder(int estimatedSize) { - builder = BytesRefBlock.newBlockBuilder(estimatedSize); + TestBytesRefBlockBuilder(BlockFactory blockFactory, int estimatedSize) { + builder = blockFactory.newBytesRefBlockBuilder(estimatedSize); } @Override @@ -333,8 +317,8 @@ private static class TestBooleanBlockBuilder extends TestBlockBuilder { private final BooleanBlock.Builder builder; - TestBooleanBlockBuilder(int estimatedSize) { - builder = BooleanBlock.newBlockBuilder(estimatedSize); + TestBooleanBlockBuilder(BlockFactory blockFactory, int estimatedSize) { + builder = blockFactory.newBooleanBlockBuilder(estimatedSize); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/TestBlockFactory.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/TestBlockFactory.java new file mode 100644 index 0000000000000..5b7072ab6476d --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/TestBlockFactory.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data; + +import org.elasticsearch.common.breaker.NoopCircuitBreaker; +import org.elasticsearch.common.util.BigArrays; + +public class TestBlockFactory { + + private static final BlockFactory NON_BREAKING = BlockFactory.getInstance( + new NoopCircuitBreaker("test-noop"), + BigArrays.NON_RECYCLING_INSTANCE + ); + + /** + * Returns the Non-Breaking block factory. + */ + public static BlockFactory getNonBreakingInstance() { + return NON_BREAKING; + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/VectorBuilderTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/VectorBuilderTests.java index 04ccf47ea6122..096db174a2580 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/VectorBuilderTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/VectorBuilderTests.java @@ -114,11 +114,11 @@ public void testCranky() { private Vector.Builder vectorBuilder(int estimatedSize, BlockFactory blockFactory) { return switch (elementType) { case NULL, DOC, UNKNOWN -> throw new UnsupportedOperationException(); - case BOOLEAN -> BooleanVector.newVectorBuilder(estimatedSize, blockFactory); - case BYTES_REF -> BytesRefVector.newVectorBuilder(estimatedSize, blockFactory); - case DOUBLE -> DoubleVector.newVectorBuilder(estimatedSize, blockFactory); - case INT -> IntVector.newVectorBuilder(estimatedSize, blockFactory); - case LONG -> LongVector.newVectorBuilder(estimatedSize, blockFactory); + case BOOLEAN -> blockFactory.newBooleanVectorBuilder(estimatedSize); + case BYTES_REF -> blockFactory.newBytesRefVectorBuilder(estimatedSize); + case DOUBLE -> blockFactory.newDoubleVectorBuilder(estimatedSize); + case INT -> blockFactory.newIntVectorBuilder(estimatedSize); + case LONG -> blockFactory.newLongVectorBuilder(estimatedSize); }; } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/VectorFixedBuilderTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/VectorFixedBuilderTests.java index 3c46fef7e5257..cdfc7611ec678 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/VectorFixedBuilderTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/VectorFixedBuilderTests.java @@ -116,10 +116,10 @@ public void testCranky() { private Vector.Builder vectorBuilder(int size, BlockFactory blockFactory) { return switch (elementType) { case NULL, BYTES_REF, DOC, UNKNOWN -> throw new UnsupportedOperationException(); - case BOOLEAN -> BooleanVector.newVectorFixedBuilder(size, blockFactory); - case DOUBLE -> DoubleVector.newVectorFixedBuilder(size, blockFactory); - case INT -> IntVector.newVectorFixedBuilder(size, blockFactory); - case LONG -> LongVector.newVectorFixedBuilder(size, blockFactory); + case BOOLEAN -> blockFactory.newBooleanVectorFixedBuilder(size); + case DOUBLE -> blockFactory.newDoubleVectorFixedBuilder(size); + case INT -> blockFactory.newIntVectorFixedBuilder(size); + case LONG -> blockFactory.newLongVectorFixedBuilder(size); }; } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneCountOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneCountOperatorTests.java index d6edc903607cc..8d401c2099b85 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneCountOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneCountOperatorTests.java @@ -11,13 +11,11 @@ import org.apache.lucene.document.LongPoint; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.NoMergePolicy; -import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; import org.elasticsearch.common.breaker.CircuitBreakingException; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.Page; @@ -27,15 +25,11 @@ import org.elasticsearch.compute.operator.OperatorTestCase; import org.elasticsearch.compute.operator.TestResultPageSinkOperator; import org.elasticsearch.core.IOUtils; -import org.elasticsearch.index.cache.query.TrivialQueryCachingPolicy; -import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.indices.CrankyCircuitBreakerService; -import org.elasticsearch.search.internal.ContextIndexSearcher; import org.elasticsearch.search.internal.SearchContext; import org.junit.After; import java.io.IOException; -import java.io.UncheckedIOException; import java.util.ArrayList; import java.util.List; import java.util.concurrent.CopyOnWriteArrayList; @@ -44,7 +38,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThanOrEqualTo; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class LuceneCountOperatorTests extends AnyOperatorTestCase { @@ -57,11 +50,11 @@ public void closeIndex() throws IOException { } @Override - protected LuceneCountOperator.Factory simple(BigArrays bigArrays) { - return simple(bigArrays, randomFrom(DataPartitioning.values()), between(1, 10_000), 100); + protected LuceneCountOperator.Factory simple() { + return simple(randomFrom(DataPartitioning.values()), between(1, 10_000), 100); } - private LuceneCountOperator.Factory simple(BigArrays bigArrays, DataPartitioning dataPartitioning, int numDocs, int limit) { + private LuceneCountOperator.Factory simple(DataPartitioning dataPartitioning, int numDocs, int limit) { boolean enableShortcut = randomBoolean(); int commitEvery = Math.max(1, numDocs / 10); try ( @@ -89,10 +82,8 @@ private LuceneCountOperator.Factory simple(BigArrays bigArrays, DataPartitioning throw new RuntimeException(e); } - SearchContext ctx = mockSearchContext(reader); - SearchExecutionContext ectx = mock(SearchExecutionContext.class); - when(ctx.getSearchExecutionContext()).thenReturn(ectx); - when(ectx.getIndexReader()).thenReturn(reader); + SearchContext ctx = LuceneSourceOperatorTests.mockSearchContext(reader, 0); + when(ctx.getSearchExecutionContext().getIndexReader()).thenReturn(reader); final Query query; if (enableShortcut && randomBoolean()) { query = new MatchAllDocsQuery(); @@ -158,7 +149,7 @@ private void testEmpty(Supplier contexts) { private void testCount(Supplier contexts, int size, int limit) { DataPartitioning dataPartitioning = randomFrom(DataPartitioning.values()); - LuceneCountOperator.Factory factory = simple(contexts.get().bigArrays(), dataPartitioning, size, limit); + LuceneCountOperator.Factory factory = simple(dataPartitioning, size, limit); List results = new CopyOnWriteArrayList<>(); List drivers = new ArrayList<>(); int taskConcurrency = between(1, 8); @@ -185,25 +176,4 @@ private void testCount(Supplier contexts, int size, int limit) { assertThat(totalCount, equalTo((long) size)); } } - - /** - * Creates a mock search context with the given index reader. - * The returned mock search context can be used to test with {@link LuceneOperator}. - */ - public static SearchContext mockSearchContext(IndexReader reader) { - try { - ContextIndexSearcher searcher = new ContextIndexSearcher( - reader, - IndexSearcher.getDefaultSimilarity(), - IndexSearcher.getDefaultQueryCache(), - TrivialQueryCachingPolicy.NEVER, - true - ); - SearchContext searchContext = mock(SearchContext.class); - when(searchContext.searcher()).thenReturn(searcher); - return searchContext; - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneSourceOperatorStatusTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneSourceOperatorStatusTests.java index fad1f793122d8..01c5273a1e617 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneSourceOperatorStatusTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneSourceOperatorStatusTests.java @@ -7,25 +7,46 @@ package org.elasticsearch.compute.lucene; +import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.test.ESTestCase; +import java.util.List; +import java.util.Set; +import java.util.TreeSet; + import static org.hamcrest.Matchers.equalTo; +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103774") public class LuceneSourceOperatorStatusTests extends AbstractWireSerializingTestCase { public static LuceneSourceOperator.Status simple() { - return new LuceneSourceOperator.Status(0, 0, 1, 5, 123, 99990, 8000); + return new LuceneSourceOperator.Status(2, Set.of("*:*"), new TreeSet<>(List.of("a:0", "a:1")), 0, 1, 5, 123, 99990, 8000); } public static String simpleToJson() { return """ - {"processed_slices":0,"slice_index":0,"total_slices":1,"pages_emitted":5,"slice_min":123,"slice_max":99990,"current":8000}"""; + { + "processed_slices" : 2, + "processed_queries" : [ + "*:*" + ], + "processed_shards" : [ + "a:0", + "a:1" + ], + "slice_index" : 0, + "total_slices" : 1, + "pages_emitted" : 5, + "slice_min" : 123, + "slice_max" : 99990, + "current" : 8000 + }"""; } public void testToXContent() { - assertThat(Strings.toString(simple()), equalTo(simpleToJson())); + assertThat(Strings.toString(simple(), true, true), equalTo(simpleToJson())); } @Override @@ -37,6 +58,8 @@ protected Writeable.Reader instanceReader() { public LuceneSourceOperator.Status createTestInstance() { return new LuceneSourceOperator.Status( randomNonNegativeInt(), + randomProcessedQueries(), + randomProcessedShards(), randomNonNegativeInt(), randomNonNegativeInt(), randomNonNegativeInt(), @@ -46,26 +69,58 @@ public LuceneSourceOperator.Status createTestInstance() { ); } + private static Set randomProcessedQueries() { + int size = between(0, 10); + Set set = new TreeSet<>(); + while (set.size() < size) { + set.add(randomAlphaOfLength(5)); + } + return set; + } + + private static Set randomProcessedShards() { + int size = between(0, 10); + Set set = new TreeSet<>(); + while (set.size() < size) { + set.add(randomAlphaOfLength(3) + ":" + between(0, 10)); + } + return set; + } + @Override protected LuceneSourceOperator.Status mutateInstance(LuceneSourceOperator.Status instance) { int processedSlices = instance.processedSlices(); + Set processedQueries = instance.processedQueries(); + Set processedShards = instance.processedShards(); int sliceIndex = instance.sliceIndex(); int totalSlices = instance.totalSlices(); int pagesEmitted = instance.pagesEmitted(); int sliceMin = instance.sliceMin(); int sliceMax = instance.sliceMax(); int current = instance.current(); - switch (between(0, 6)) { + switch (between(0, 8)) { case 0 -> processedSlices = randomValueOtherThan(processedSlices, ESTestCase::randomNonNegativeInt); - case 1 -> sliceIndex = randomValueOtherThan(sliceIndex, ESTestCase::randomNonNegativeInt); - case 2 -> totalSlices = randomValueOtherThan(totalSlices, ESTestCase::randomNonNegativeInt); - case 3 -> pagesEmitted = randomValueOtherThan(pagesEmitted, ESTestCase::randomNonNegativeInt); - case 4 -> sliceMin = randomValueOtherThan(sliceMin, ESTestCase::randomNonNegativeInt); - case 5 -> sliceMax = randomValueOtherThan(sliceMax, ESTestCase::randomNonNegativeInt); - case 6 -> current = randomValueOtherThan(current, ESTestCase::randomNonNegativeInt); + case 1 -> processedQueries = randomValueOtherThan(processedQueries, LuceneSourceOperatorStatusTests::randomProcessedQueries); + case 2 -> processedQueries = randomValueOtherThan(processedShards, LuceneSourceOperatorStatusTests::randomProcessedShards); + case 3 -> sliceIndex = randomValueOtherThan(sliceIndex, ESTestCase::randomNonNegativeInt); + case 4 -> totalSlices = randomValueOtherThan(totalSlices, ESTestCase::randomNonNegativeInt); + case 5 -> pagesEmitted = randomValueOtherThan(pagesEmitted, ESTestCase::randomNonNegativeInt); + case 6 -> sliceMin = randomValueOtherThan(sliceMin, ESTestCase::randomNonNegativeInt); + case 7 -> sliceMax = randomValueOtherThan(sliceMax, ESTestCase::randomNonNegativeInt); + case 8 -> current = randomValueOtherThan(current, ESTestCase::randomNonNegativeInt); default -> throw new UnsupportedOperationException(); } ; - return new LuceneSourceOperator.Status(processedSlices, sliceIndex, totalSlices, pagesEmitted, sliceMin, sliceMax, current); + return new LuceneSourceOperator.Status( + processedSlices, + processedQueries, + processedShards, + sliceIndex, + totalSlices, + pagesEmitted, + sliceMin, + sliceMax, + current + ); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneSourceOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneSourceOperatorTests.java index 41fe1a93d9c8b..19e16144e11c5 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneSourceOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneSourceOperatorTests.java @@ -17,7 +17,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; import org.elasticsearch.common.breaker.CircuitBreakingException; -import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.AnyOperatorTestCase; @@ -27,6 +27,7 @@ import org.elasticsearch.compute.operator.OperatorTestCase; import org.elasticsearch.compute.operator.TestResultPageSinkOperator; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.index.Index; import org.elasticsearch.index.cache.query.TrivialQueryCachingPolicy; import org.elasticsearch.index.fielddata.FieldDataContext; import org.elasticsearch.index.fielddata.IndexFieldData; @@ -37,6 +38,7 @@ import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.query.support.NestedScope; import org.elasticsearch.indices.CrankyCircuitBreakerService; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.search.internal.ContextIndexSearcher; import org.elasticsearch.search.internal.SearchContext; import org.junit.After; @@ -69,11 +71,11 @@ public void closeIndex() throws IOException { } @Override - protected LuceneSourceOperator.Factory simple(BigArrays bigArrays) { - return simple(bigArrays, randomFrom(DataPartitioning.values()), between(1, 10_000), 100); + protected LuceneSourceOperator.Factory simple() { + return simple(randomFrom(DataPartitioning.values()), between(1, 10_000), 100); } - private LuceneSourceOperator.Factory simple(BigArrays bigArrays, DataPartitioning dataPartitioning, int numDocs, int limit) { + private LuceneSourceOperator.Factory simple(DataPartitioning dataPartitioning, int numDocs, int limit) { int commitEvery = Math.max(1, numDocs / 10); try ( RandomIndexWriter writer = new RandomIndexWriter( @@ -95,24 +97,23 @@ private LuceneSourceOperator.Factory simple(BigArrays bigArrays, DataPartitionin throw new RuntimeException(e); } - SearchContext ctx = mockSearchContext(reader); - SearchExecutionContext ectx = mock(SearchExecutionContext.class); - when(ctx.getSearchExecutionContext()).thenReturn(ectx); - when(ectx.getFieldType(anyString())).thenAnswer(inv -> { + SearchContext ctx = mockSearchContext(reader, 0); + when(ctx.getSearchExecutionContext().getFieldType(anyString())).thenAnswer(inv -> { String name = inv.getArgument(0); return switch (name) { case "s" -> S_FIELD; default -> throw new IllegalArgumentException("don't support [" + name + "]"); }; }); - when(ectx.getForField(any(), any())).thenAnswer(inv -> { + when(ctx.getSearchExecutionContext().getForField(any(), any())).thenAnswer(inv -> { MappedFieldType ft = inv.getArgument(0); IndexFieldData.Builder builder = ft.fielddataBuilder(FieldDataContext.noRuntimeFields("test")); - return builder.build(new IndexFieldDataCache.None(), bigArrays.breakerService()); + // This breaker is for fielddata from text fields. We don't test it so it won't break not test not to use a breaker here. + return builder.build(new IndexFieldDataCache.None(), new NoneCircuitBreakerService()); }); - when(ectx.nestedScope()).thenReturn(new NestedScope()); - when(ectx.nestedLookup()).thenReturn(NestedLookup.EMPTY); - when(ectx.getIndexReader()).thenReturn(reader); + when(ctx.getSearchExecutionContext().nestedScope()).thenReturn(new NestedScope()); + when(ctx.getSearchExecutionContext().nestedLookup()).thenReturn(NestedLookup.EMPTY); + when(ctx.getSearchExecutionContext().getIndexReader()).thenReturn(reader); Function queryFunction = c -> new MatchAllDocsQuery(); int maxPageSize = between(10, Math.max(10, numDocs)); return new LuceneSourceOperator.Factory(List.of(ctx), queryFunction, dataPartitioning, 1, maxPageSize, limit); @@ -176,8 +177,8 @@ public void testShardDataPartitioningWithCranky() { } private void testSimple(DriverContext ctx, int size, int limit) { - LuceneSourceOperator.Factory factory = simple(ctx.bigArrays(), DataPartitioning.SHARD, size, limit); - Operator.OperatorFactory readS = ValuesSourceReaderOperatorTests.factory(reader, S_FIELD); + LuceneSourceOperator.Factory factory = simple(DataPartitioning.SHARD, size, limit); + Operator.OperatorFactory readS = ValuesSourceReaderOperatorTests.factory(reader, S_FIELD, ElementType.LONG); List results = new ArrayList<>(); @@ -205,7 +206,7 @@ private void testSimple(DriverContext ctx, int size, int limit) { * Creates a mock search context with the given index reader. * The returned mock search context can be used to test with {@link LuceneOperator}. */ - public static SearchContext mockSearchContext(IndexReader reader) { + public static SearchContext mockSearchContext(IndexReader reader, int shardId) { try { ContextIndexSearcher searcher = new ContextIndexSearcher( reader, @@ -216,6 +217,10 @@ public static SearchContext mockSearchContext(IndexReader reader) { ); SearchContext searchContext = mock(SearchContext.class); when(searchContext.searcher()).thenReturn(searcher); + SearchExecutionContext searchExecutionContext = mock(SearchExecutionContext.class); + when(searchContext.getSearchExecutionContext()).thenReturn(searchExecutionContext); + when(searchExecutionContext.getFullyQualifiedIndex()).thenReturn(new Index("test", "uid")); + when(searchExecutionContext.getShardId()).thenReturn(shardId); return searchContext; } catch (IOException e) { throw new UncheckedIOException(e); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperatorTests.java index d1b9e706750df..5776c45274ad1 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperatorTests.java @@ -16,7 +16,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; import org.elasticsearch.common.breaker.CircuitBreakingException; -import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.AnyOperatorTestCase; @@ -32,9 +32,9 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NestedLookup; import org.elasticsearch.index.mapper.NumberFieldMapper; -import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.query.support.NestedScope; import org.elasticsearch.indices.CrankyCircuitBreakerService; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.SortBuilder; @@ -49,7 +49,6 @@ import static org.hamcrest.Matchers.hasSize; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class LuceneTopNSourceOperatorTests extends AnyOperatorTestCase { @@ -63,11 +62,11 @@ public void closeIndex() throws IOException { } @Override - protected LuceneTopNSourceOperator.Factory simple(BigArrays bigArrays) { - return simple(bigArrays, DataPartitioning.SHARD, 10_000, 100); + protected LuceneTopNSourceOperator.Factory simple() { + return simple(DataPartitioning.SHARD, 10_000, 100); } - private LuceneTopNSourceOperator.Factory simple(BigArrays bigArrays, DataPartitioning dataPartitioning, int size, int limit) { + private LuceneTopNSourceOperator.Factory simple(DataPartitioning dataPartitioning, int size, int limit) { int commitEvery = Math.max(1, size / 10); try ( RandomIndexWriter writer = new RandomIndexWriter( @@ -89,24 +88,23 @@ private LuceneTopNSourceOperator.Factory simple(BigArrays bigArrays, DataPartiti throw new RuntimeException(e); } - SearchContext ctx = LuceneSourceOperatorTests.mockSearchContext(reader); - SearchExecutionContext ectx = mock(SearchExecutionContext.class); - when(ctx.getSearchExecutionContext()).thenReturn(ectx); - when(ectx.getFieldType(anyString())).thenAnswer(inv -> { + SearchContext ctx = LuceneSourceOperatorTests.mockSearchContext(reader, 0); + when(ctx.getSearchExecutionContext().getFieldType(anyString())).thenAnswer(inv -> { String name = inv.getArgument(0); return switch (name) { case "s" -> S_FIELD; default -> throw new IllegalArgumentException("don't support [" + name + "]"); }; }); - when(ectx.getForField(any(), any())).thenAnswer(inv -> { + when(ctx.getSearchExecutionContext().getForField(any(), any())).thenAnswer(inv -> { MappedFieldType ft = inv.getArgument(0); IndexFieldData.Builder builder = ft.fielddataBuilder(FieldDataContext.noRuntimeFields("test")); - return builder.build(new IndexFieldDataCache.None(), bigArrays.breakerService()); + // This breaker is used for fielddata but we're not testing that. + return builder.build(new IndexFieldDataCache.None(), new NoneCircuitBreakerService()); }); - when(ectx.nestedScope()).thenReturn(new NestedScope()); - when(ectx.nestedLookup()).thenReturn(NestedLookup.EMPTY); - when(ectx.getIndexReader()).thenReturn(reader); + when(ctx.getSearchExecutionContext().nestedScope()).thenReturn(new NestedScope()); + when(ctx.getSearchExecutionContext().nestedLookup()).thenReturn(NestedLookup.EMPTY); + when(ctx.getSearchExecutionContext().getIndexReader()).thenReturn(reader); Function queryFunction = c -> new MatchAllDocsQuery(); int taskConcurrency = 0; int maxPageSize = between(10, Math.max(10, size)); @@ -176,8 +174,8 @@ private void testEmpty(DriverContext context) { } private void testSimple(DriverContext ctx, int size, int limit) { - LuceneTopNSourceOperator.Factory factory = simple(ctx.bigArrays(), DataPartitioning.SHARD, size, limit); - Operator.OperatorFactory readS = ValuesSourceReaderOperatorTests.factory(reader, S_FIELD); + LuceneTopNSourceOperator.Factory factory = simple(DataPartitioning.SHARD, size, limit); + Operator.OperatorFactory readS = ValuesSourceReaderOperatorTests.factory(reader, S_FIELD, ElementType.LONG); List results = new ArrayList<>(); OperatorTestCase.runDriver( diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorStatusTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorStatusTests.java index 6f0317b509e3b..1851f7ac948cc 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorStatusTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorStatusTests.java @@ -24,11 +24,16 @@ public static ValuesSourceReaderOperator.Status simple() { public static String simpleToJson() { return """ - {"readers_built":{"ReaderType":3},"pages_processed":123}"""; + { + "readers_built" : { + "ReaderType" : 3 + }, + "pages_processed" : 123 + }"""; } public void testToXContent() { - assertThat(Strings.toString(simple()), equalTo(simpleToJson())); + assertThat(Strings.toString(simple(), true, true), equalTo(simpleToJson())); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java index f6310d826c989..ada0582a2fad8 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java @@ -12,14 +12,11 @@ import org.apache.lucene.document.FieldType; import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.SortedDocValuesField; -import org.apache.lucene.document.SortedNumericDocValuesField; -import org.apache.lucene.document.StoredField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.store.Directory; @@ -27,12 +24,10 @@ import org.apache.lucene.tests.mockfile.HandleLimitFS; import org.apache.lucene.tests.util.LuceneTestCase; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.NumericUtils; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BooleanBlock; @@ -42,6 +37,7 @@ import org.elasticsearch.compute.data.DocBlock; import org.elasticsearch.compute.data.DoubleBlock; import org.elasticsearch.compute.data.DoubleVector; +import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.data.LongBlock; @@ -55,26 +51,29 @@ import org.elasticsearch.compute.operator.PageConsumerOperator; import org.elasticsearch.compute.operator.SourceOperator; import org.elasticsearch.core.IOUtils; -import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.mapper.BlockLoader; -import org.elasticsearch.index.mapper.BooleanFieldMapper; +import org.elasticsearch.index.mapper.FieldNamesFieldMapper; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.NumberFieldMapper; -import org.elasticsearch.index.mapper.ProvidedIdFieldMapper; -import org.elasticsearch.index.mapper.SourceFieldMapper; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.MapperServiceTestCase; +import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SourceLoader; +import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.mapper.TextFieldMapper; import org.elasticsearch.index.mapper.TextSearchInfo; import org.elasticsearch.index.mapper.TsidExtractingIdFieldMapper; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonXContent; import org.hamcrest.Matcher; import org.junit.After; +import java.io.Closeable; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; @@ -82,6 +81,9 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.TreeMap; +import java.util.TreeSet; +import java.util.stream.Collectors; import java.util.stream.IntStream; import static org.elasticsearch.compute.lucene.LuceneSourceOperatorTests.mockSearchContext; @@ -111,6 +113,7 @@ public class ValuesSourceReaderOperatorTests extends OperatorTestCase { { false, false, true, true } }; private Directory directory = newDirectory(); + private MapperService mapperService; private IndexReader reader; private static final Map keyToTags = new HashMap<>(); @@ -120,7 +123,7 @@ public void closeIndex() throws IOException { } @Override - protected Operator.OperatorFactory simple(BigArrays bigArrays) { + protected Operator.OperatorFactory simple() { if (reader == null) { // Init a reader if one hasn't been built, so things don't blow up try { @@ -129,19 +132,20 @@ protected Operator.OperatorFactory simple(BigArrays bigArrays) { throw new RuntimeException(e); } } - return factory(reader, docValuesNumberField("long", NumberFieldMapper.NumberType.LONG)); + return factory(reader, mapperService.fieldType("long"), ElementType.LONG); } - static Operator.OperatorFactory factory(IndexReader reader, MappedFieldType ft) { - return factory(reader, ft.name(), ft.blockLoader(null)); + static Operator.OperatorFactory factory(IndexReader reader, MappedFieldType ft, ElementType elementType) { + return factory(reader, ft.name(), elementType, ft.blockLoader(null)); } - static Operator.OperatorFactory factory(IndexReader reader, String name, BlockLoader loader) { - return new ValuesSourceReaderOperator.Factory( - List.of(new ValuesSourceReaderOperator.FieldInfo(name, List.of(loader))), - List.of(new ValuesSourceReaderOperator.ShardContext(reader, () -> SourceLoader.FROM_STORED_SOURCE)), - 0 - ); + static Operator.OperatorFactory factory(IndexReader reader, String name, ElementType elementType, BlockLoader loader) { + return new ValuesSourceReaderOperator.Factory(List.of(new ValuesSourceReaderOperator.FieldInfo(name, elementType, shardIdx -> { + if (shardIdx != 0) { + fail("unexpected shardIdx [" + shardIdx + "]"); + } + return loader; + })), List.of(new ValuesSourceReaderOperator.ShardContext(reader, () -> SourceLoader.FROM_STORED_SOURCE)), 0); } @Override @@ -160,7 +164,7 @@ private SourceOperator simpleInput(DriverContext context, int size, int commitEv throw new RuntimeException(e); } var luceneFactory = new LuceneSourceOperator.Factory( - List.of(mockSearchContext(reader)), + List.of(mockSearchContext(reader, 0)), ctx -> new MatchAllDocsQuery(), DataPartitioning.SHARD, randomIntBetween(1, 10), @@ -170,8 +174,46 @@ private SourceOperator simpleInput(DriverContext context, int size, int commitEv return luceneFactory.get(context); } + private void initMapping() throws IOException { + mapperService = new MapperServiceTestCase() { + }.createMapperService(MapperServiceTestCase.mapping(b -> { + fieldExamples(b, "key", "integer"); + fieldExamples(b, "int", "integer"); + fieldExamples(b, "short", "short"); + fieldExamples(b, "byte", "byte"); + fieldExamples(b, "long", "long"); + fieldExamples(b, "double", "double"); + fieldExamples(b, "bool", "boolean"); + fieldExamples(b, "kwd", "keyword"); + b.startObject("stored_kwd").field("type", "keyword").field("store", true).endObject(); + b.startObject("mv_stored_kwd").field("type", "keyword").field("store", true).endObject(); + + simpleField(b, "missing_text", "text"); + b.startObject("source_text").field("type", "text").field("store", false).endObject(); + b.startObject("mv_source_text").field("type", "text").field("store", false).endObject(); + b.startObject("stored_text").field("type", "text").field("store", true).endObject(); + b.startObject("mv_stored_text").field("type", "text").field("store", true).endObject(); + + for (String n : new String[] { "text_with_delegate", "mv_text_with_delegate", "missing_text_with_delegate" }) { + b.startObject(n); + { + b.field("type", "text"); + b.startObject("fields"); + b.startObject("kwd").field("type", "keyword").endObject(); + b.endObject(); + } + b.endObject(); + } + })); + } + private void initIndex(int size, int commitEvery) throws IOException { + initMapping(); keyToTags.clear(); + reader = initIndex(directory, size, commitEvery); + } + + private IndexReader initIndex(Directory directory, int size, int commitEvery) throws IOException { try ( IndexWriter writer = new IndexWriter( directory, @@ -179,68 +221,149 @@ private void initIndex(int size, int commitEvery) throws IOException { ) ) { for (int d = 0; d < size; d++) { - List doc = new ArrayList<>(); - doc.add(IdFieldMapper.standardIdField("id")); - doc.add(new SortedNumericDocValuesField("key", d)); - doc.add(new SortedNumericDocValuesField("int", d)); - doc.add(new SortedNumericDocValuesField("short", (short) d)); - doc.add(new SortedNumericDocValuesField("byte", (byte) d)); - doc.add(new SortedNumericDocValuesField("long", d)); + XContentBuilder source = JsonXContent.contentBuilder(); + source.startObject(); + source.field("key", d); + + source.field("long", d); + source.startArray("mv_long"); + for (int v = 0; v <= d % 3; v++) { + source.value((long) (-1_000 * d + v)); + } + source.endArray(); + source.field("source_long", (long) d); + source.startArray("mv_source_long"); + for (int v = 0; v <= d % 3; v++) { + source.value((long) (-1_000 * d + v)); + } + source.endArray(); + + source.field("int", d); + source.startArray("mv_int"); + for (int v = 0; v <= d % 3; v++) { + source.value(1_000 * d + v); + } + source.endArray(); + source.field("source_int", d); + source.startArray("mv_source_int"); + for (int v = 0; v <= d % 3; v++) { + source.value(1_000 * d + v); + } + source.endArray(); + + source.field("short", (short) d); + source.startArray("mv_short"); + for (int v = 0; v <= d % 3; v++) { + source.value((short) (2_000 * d + v)); + } + source.endArray(); + source.field("source_short", (short) d); + source.startArray("mv_source_short"); + for (int v = 0; v <= d % 3; v++) { + source.value((short) (2_000 * d + v)); + } + source.endArray(); + + source.field("byte", (byte) d); + source.startArray("mv_byte"); + for (int v = 0; v <= d % 3; v++) { + source.value((byte) (3_000 * d + v)); + } + source.endArray(); + source.field("source_byte", (byte) d); + source.startArray("mv_source_byte"); + for (int v = 0; v <= d % 3; v++) { + source.value((byte) (3_000 * d + v)); + } + source.endArray(); + + source.field("double", d / 123_456d); + source.startArray("mv_double"); + for (int v = 0; v <= d % 3; v++) { + source.value(d / 123_456d + v); + } + source.endArray(); + source.field("source_double", d / 123_456d); + source.startArray("mv_source_double"); + for (int v = 0; v <= d % 3; v++) { + source.value(d / 123_456d + v); + } + source.endArray(); + + source.field("bool", d % 2 == 0); + source.startArray("mv_bool"); + for (int v = 0; v <= d % 3; v++) { + source.value(v % 2 == 0); + } + source.endArray(); + source.field("source_bool", d % 2 == 0); + source.startArray("source_mv_bool"); + for (int v = 0; v <= d % 3; v++) { + source.value(v % 2 == 0); + } + source.endArray(); + String tag = keyToTags.computeIfAbsent(d, k -> "tag-" + randomIntBetween(1, 5)); - doc.add(new KeywordFieldMapper.KeywordField("kwd", new BytesRef(tag), KeywordFieldMapper.Defaults.FIELD_TYPE)); - doc.add(new StoredField("stored_kwd", new BytesRef(Integer.toString(d)))); - doc.add(new StoredField("stored_text", Integer.toString(d))); - doc.add(new SortedNumericDocValuesField("bool", d % 2 == 0 ? 1 : 0)); - doc.add(new SortedNumericDocValuesField("double", NumericUtils.doubleToSortableLong(d / 123_456d))); + source.field("kwd", tag); + source.startArray("mv_kwd"); for (int v = 0; v <= d % 3; v++) { - doc.add(new SortedNumericDocValuesField("mv_bool", v % 2 == 0 ? 1 : 0)); - doc.add(new SortedNumericDocValuesField("mv_int", 1_000 * d + v)); - doc.add(new SortedNumericDocValuesField("mv_short", (short) (2_000 * d + v))); - doc.add(new SortedNumericDocValuesField("mv_byte", (byte) (3_000 * d + v))); - doc.add(new SortedNumericDocValuesField("mv_long", -1_000 * d + v)); - doc.add(new SortedNumericDocValuesField("mv_double", NumericUtils.doubleToSortableLong(d / 123_456d + v))); - doc.add( - new KeywordFieldMapper.KeywordField("mv_kwd", new BytesRef(PREFIX[v] + d), KeywordFieldMapper.Defaults.FIELD_TYPE) - ); - doc.add(new StoredField("mv_stored_kwd", new BytesRef(PREFIX[v] + d))); - doc.add(new StoredField("mv_stored_text", PREFIX[v] + d)); + source.value(PREFIX[v] + d); } - XContentBuilder source = JsonXContent.contentBuilder(); - source.startObject(); + source.endArray(); + source.field("stored_kwd", Integer.toString(d)); + source.startArray("mv_stored_kwd"); + for (int v = 0; v <= d % 3; v++) { + source.value(PREFIX[v] + d); + } + source.endArray(); source.field("source_kwd", Integer.toString(d)); source.startArray("mv_source_kwd"); for (int v = 0; v <= d % 3; v++) { source.value(PREFIX[v] + d); } source.endArray(); - source.field("source_text", Integer.toString(d)); - source.startArray("mv_source_text"); + + source.field("text", Integer.toString(d)); + source.startArray("mv_text"); for (int v = 0; v <= d % 3; v++) { source.value(PREFIX[v] + d); } source.endArray(); - source.field("source_long", (long) d); - source.startArray("mv_source_long"); + source.field("stored_text", Integer.toString(d)); + source.startArray("mv_stored_text"); for (int v = 0; v <= d % 3; v++) { - source.value((long) (-1_000 * d + v)); + source.value(PREFIX[v] + d); } source.endArray(); - source.field("source_int", d); - source.startArray("mv_source_int"); + source.field("source_text", Integer.toString(d)); + source.startArray("mv_source_text"); for (int v = 0; v <= d % 3; v++) { - source.value(1_000 * d + v); + source.value(PREFIX[v] + d); + } + source.endArray(); + + source.field("text_with_delegate", Integer.toString(d)); + source.startArray("mv_text_with_delegate"); + for (int v = 0; v <= d % 3; v++) { + source.value(PREFIX[v] + d); } source.endArray(); source.endObject(); - doc.add(new StoredField(SourceFieldMapper.NAME, BytesReference.bytes(source).toBytesRef())); - writer.addDocument(doc); + + ParsedDocument doc = mapperService.documentParser() + .parseDocument( + new SourceToParse("id" + d, BytesReference.bytes(source), XContentType.JSON), + mapperService.mappingLookup() + ); + writer.addDocuments(doc.docs()); + if (d % commitEvery == commitEvery - 1) { writer.commit(); } } } - reader = DirectoryReader.open(directory); + return DirectoryReader.open(directory); } @Override @@ -272,9 +395,9 @@ protected void assertSimpleOutput(List input, List results) { } @Override - protected ByteSizeValue smallEnoughToCircuitBreak() { - assumeTrue("doesn't use big arrays so can't break", false); - return null; + protected ByteSizeValue enoughMemoryForSimple() { + assumeFalse("strange exception in the test, fix soon", true); + return ByteSizeValue.ofKb(1); } public void testLoadAll() { @@ -308,12 +431,13 @@ public void testManySingleDocPages() { Checks checks = new Checks(Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING); FieldCase testCase = new FieldCase( new KeywordFieldMapper.KeywordFieldType("kwd"), + ElementType.BYTES_REF, checks::tags, StatusChecks::keywordsFromDocValues ); operators.add( new ValuesSourceReaderOperator.Factory( - List.of(testCase.info, fieldInfo(docValuesNumberField("key", NumberFieldMapper.NumberType.INTEGER))), + List.of(testCase.info, fieldInfo(mapperService.fieldType("key"), ElementType.INT)), List.of(new ValuesSourceReaderOperator.ShardContext(reader, () -> SourceLoader.FROM_STORED_SOURCE)), 0 ).get(driverContext) @@ -356,8 +480,17 @@ public void testLoadAllInOnePageShuffled() { loadSimpleAndAssert(driverContext, List.of(source), Block.MvOrdering.UNORDERED); } - private static ValuesSourceReaderOperator.FieldInfo fieldInfo(MappedFieldType ft) { - return new ValuesSourceReaderOperator.FieldInfo(ft.name(), List.of(ft.blockLoader(new MappedFieldType.BlockLoaderContext() { + private static ValuesSourceReaderOperator.FieldInfo fieldInfo(MappedFieldType ft, ElementType elementType) { + return new ValuesSourceReaderOperator.FieldInfo(ft.name(), elementType, shardIdx -> { + if (shardIdx != 0) { + fail("unexpected shardIdx [" + shardIdx + "]"); + } + return ft.blockLoader(blContext()); + }); + } + + private static MappedFieldType.BlockLoaderContext blContext() { + return new MappedFieldType.BlockLoaderContext() { @Override public String indexName() { return "test_index"; @@ -377,7 +510,12 @@ public Set sourcePaths(String name) { public String parentField(String field) { return null; } - }))); + + @Override + public FieldNamesFieldMapper.FieldNamesFieldType fieldNames() { + return FieldNamesFieldMapper.FieldNamesFieldType.get(true); + } + }; } private void loadSimpleAndAssert(DriverContext driverContext, List input, Block.MvOrdering docValuesMvOrdering) { @@ -386,7 +524,7 @@ private void loadSimpleAndAssert(DriverContext driverContext, List input, List operators = new ArrayList<>(); operators.add( new ValuesSourceReaderOperator.Factory( - List.of(fieldInfo(docValuesNumberField("key", NumberFieldMapper.NumberType.INTEGER))), + List.of(fieldInfo(mapperService.fieldType("key"), ElementType.INT)), List.of(new ValuesSourceReaderOperator.ShardContext(reader, () -> SourceLoader.FROM_STORED_SOURCE)), 0 ).get(driverContext) @@ -439,13 +577,14 @@ interface CheckReadersWithName { } record FieldCase(ValuesSourceReaderOperator.FieldInfo info, CheckResults checkResults, CheckReadersWithName checkReaders) { - FieldCase(MappedFieldType ft, CheckResults checkResults, CheckReadersWithName checkReaders) { - this(fieldInfo(ft), checkResults, checkReaders); + FieldCase(MappedFieldType ft, ElementType elementType, CheckResults checkResults, CheckReadersWithName checkReaders) { + this(fieldInfo(ft, elementType), checkResults, checkReaders); } - FieldCase(MappedFieldType ft, CheckResults checkResults, CheckReaders checkReaders) { + FieldCase(MappedFieldType ft, ElementType elementType, CheckResults checkResults, CheckReaders checkReaders) { this( ft, + elementType, checkResults, (name, forcedRowByRow, pageCount, segmentCount, readersBuilt) -> checkReaders.check( forcedRowByRow, @@ -505,148 +644,118 @@ private void testLoadAllStatus(boolean allInOnePage) { private List infoAndChecksForEachType(Block.MvOrdering docValuesMvOrdering) { Checks checks = new Checks(docValuesMvOrdering); List r = new ArrayList<>(); - r.add( - new FieldCase(docValuesNumberField("long", NumberFieldMapper.NumberType.LONG), checks::longs, StatusChecks::longsFromDocValues) - ); + r.add(new FieldCase(mapperService.fieldType(IdFieldMapper.NAME), ElementType.BYTES_REF, checks::ids, StatusChecks::id)); + r.add(new FieldCase(TsidExtractingIdFieldMapper.INSTANCE.fieldType(), ElementType.BYTES_REF, checks::ids, StatusChecks::id)); + r.add(new FieldCase(mapperService.fieldType("long"), ElementType.LONG, checks::longs, StatusChecks::longsFromDocValues)); r.add( new FieldCase( - docValuesNumberField("mv_long", NumberFieldMapper.NumberType.LONG), + mapperService.fieldType("mv_long"), + ElementType.LONG, checks::mvLongsFromDocValues, StatusChecks::mvLongsFromDocValues ) ); + r.add(new FieldCase(mapperService.fieldType("missing_long"), ElementType.LONG, checks::constantNulls, StatusChecks::constantNulls)); + r.add(new FieldCase(mapperService.fieldType("source_long"), ElementType.LONG, checks::longs, StatusChecks::longsFromSource)); r.add( new FieldCase( - docValuesNumberField("missing_long", NumberFieldMapper.NumberType.LONG), - checks::constantNulls, - StatusChecks::constantNulls - ) - ); - r.add( - new FieldCase(sourceNumberField("source_long", NumberFieldMapper.NumberType.LONG), checks::longs, StatusChecks::longsFromSource) - ); - r.add( - new FieldCase( - sourceNumberField("mv_source_long", NumberFieldMapper.NumberType.LONG), + mapperService.fieldType("mv_source_long"), + ElementType.LONG, checks::mvLongsUnordered, StatusChecks::mvLongsFromSource ) ); - r.add( - new FieldCase(docValuesNumberField("int", NumberFieldMapper.NumberType.INTEGER), checks::ints, StatusChecks::intsFromDocValues) - ); + r.add(new FieldCase(mapperService.fieldType("int"), ElementType.INT, checks::ints, StatusChecks::intsFromDocValues)); r.add( new FieldCase( - docValuesNumberField("mv_int", NumberFieldMapper.NumberType.INTEGER), + mapperService.fieldType("mv_int"), + ElementType.INT, checks::mvIntsFromDocValues, StatusChecks::mvIntsFromDocValues ) ); + r.add(new FieldCase(mapperService.fieldType("missing_int"), ElementType.INT, checks::constantNulls, StatusChecks::constantNulls)); + r.add(new FieldCase(mapperService.fieldType("source_int"), ElementType.INT, checks::ints, StatusChecks::intsFromSource)); r.add( new FieldCase( - docValuesNumberField("missing_int", NumberFieldMapper.NumberType.INTEGER), - checks::constantNulls, - StatusChecks::constantNulls - ) - ); - r.add( - new FieldCase(sourceNumberField("source_int", NumberFieldMapper.NumberType.INTEGER), checks::ints, StatusChecks::intsFromSource) - ); - r.add( - new FieldCase( - sourceNumberField("mv_source_int", NumberFieldMapper.NumberType.INTEGER), + mapperService.fieldType("mv_source_int"), + ElementType.INT, checks::mvIntsUnordered, StatusChecks::mvIntsFromSource ) ); + r.add(new FieldCase(mapperService.fieldType("short"), ElementType.INT, checks::shorts, StatusChecks::shortsFromDocValues)); + r.add(new FieldCase(mapperService.fieldType("mv_short"), ElementType.INT, checks::mvShorts, StatusChecks::mvShortsFromDocValues)); + r.add(new FieldCase(mapperService.fieldType("missing_short"), ElementType.INT, checks::constantNulls, StatusChecks::constantNulls)); + r.add(new FieldCase(mapperService.fieldType("byte"), ElementType.INT, checks::bytes, StatusChecks::bytesFromDocValues)); + r.add(new FieldCase(mapperService.fieldType("mv_byte"), ElementType.INT, checks::mvBytes, StatusChecks::mvBytesFromDocValues)); + r.add(new FieldCase(mapperService.fieldType("missing_byte"), ElementType.INT, checks::constantNulls, StatusChecks::constantNulls)); + r.add(new FieldCase(mapperService.fieldType("double"), ElementType.DOUBLE, checks::doubles, StatusChecks::doublesFromDocValues)); r.add( - new FieldCase( - docValuesNumberField("short", NumberFieldMapper.NumberType.SHORT), - checks::shorts, - StatusChecks::shortsFromDocValues - ) - ); - r.add( - new FieldCase( - docValuesNumberField("mv_short", NumberFieldMapper.NumberType.SHORT), - checks::mvShorts, - StatusChecks::mvShortsFromDocValues - ) + new FieldCase(mapperService.fieldType("mv_double"), ElementType.DOUBLE, checks::mvDoubles, StatusChecks::mvDoublesFromDocValues) ); r.add( - new FieldCase( - docValuesNumberField("missing_short", NumberFieldMapper.NumberType.SHORT), - checks::constantNulls, - StatusChecks::constantNulls - ) + new FieldCase(mapperService.fieldType("missing_double"), ElementType.DOUBLE, checks::constantNulls, StatusChecks::constantNulls) ); + r.add(new FieldCase(mapperService.fieldType("bool"), ElementType.BOOLEAN, checks::bools, StatusChecks::boolFromDocValues)); + r.add(new FieldCase(mapperService.fieldType("mv_bool"), ElementType.BOOLEAN, checks::mvBools, StatusChecks::mvBoolFromDocValues)); r.add( - new FieldCase(docValuesNumberField("byte", NumberFieldMapper.NumberType.BYTE), checks::bytes, StatusChecks::bytesFromDocValues) + new FieldCase(mapperService.fieldType("missing_bool"), ElementType.BOOLEAN, checks::constantNulls, StatusChecks::constantNulls) ); + r.add(new FieldCase(mapperService.fieldType("kwd"), ElementType.BYTES_REF, checks::tags, StatusChecks::keywordsFromDocValues)); r.add( new FieldCase( - docValuesNumberField("mv_byte", NumberFieldMapper.NumberType.BYTE), - checks::mvBytes, - StatusChecks::mvBytesFromDocValues + mapperService.fieldType("mv_kwd"), + ElementType.BYTES_REF, + checks::mvStringsFromDocValues, + StatusChecks::mvKeywordsFromDocValues ) ); r.add( - new FieldCase( - docValuesNumberField("missing_byte", NumberFieldMapper.NumberType.BYTE), - checks::constantNulls, - StatusChecks::constantNulls - ) + new FieldCase(mapperService.fieldType("missing_kwd"), ElementType.BYTES_REF, checks::constantNulls, StatusChecks::constantNulls) ); + r.add(new FieldCase(storedKeywordField("stored_kwd"), ElementType.BYTES_REF, checks::strings, StatusChecks::keywordsFromStored)); r.add( new FieldCase( - docValuesNumberField("double", NumberFieldMapper.NumberType.DOUBLE), - checks::doubles, - StatusChecks::doublesFromDocValues + storedKeywordField("mv_stored_kwd"), + ElementType.BYTES_REF, + checks::mvStringsUnordered, + StatusChecks::mvKeywordsFromStored ) ); r.add( - new FieldCase( - docValuesNumberField("mv_double", NumberFieldMapper.NumberType.DOUBLE), - checks::mvDoubles, - StatusChecks::mvDoublesFromDocValues - ) + new FieldCase(mapperService.fieldType("source_kwd"), ElementType.BYTES_REF, checks::strings, StatusChecks::keywordsFromSource) ); r.add( new FieldCase( - docValuesNumberField("missing_double", NumberFieldMapper.NumberType.DOUBLE), - checks::constantNulls, - StatusChecks::constantNulls + mapperService.fieldType("mv_source_kwd"), + ElementType.BYTES_REF, + checks::mvStringsUnordered, + StatusChecks::mvKeywordsFromSource ) ); - r.add(new FieldCase(new BooleanFieldMapper.BooleanFieldType("bool"), checks::bools, StatusChecks::boolFromDocValues)); - r.add(new FieldCase(new BooleanFieldMapper.BooleanFieldType("mv_bool"), checks::mvBools, StatusChecks::mvBoolFromDocValues)); - r.add(new FieldCase(new BooleanFieldMapper.BooleanFieldType("missing_bool"), checks::constantNulls, StatusChecks::constantNulls)); - r.add(new FieldCase(new KeywordFieldMapper.KeywordFieldType("kwd"), checks::tags, StatusChecks::keywordsFromDocValues)); + r.add(new FieldCase(mapperService.fieldType("source_text"), ElementType.BYTES_REF, checks::strings, StatusChecks::textFromSource)); r.add( new FieldCase( - new KeywordFieldMapper.KeywordFieldType("mv_kwd"), - checks::mvStringsFromDocValues, - StatusChecks::mvKeywordsFromDocValues + mapperService.fieldType("mv_source_text"), + ElementType.BYTES_REF, + checks::mvStringsUnordered, + StatusChecks::mvTextFromSource ) ); - r.add(new FieldCase(new KeywordFieldMapper.KeywordFieldType("missing_kwd"), checks::constantNulls, StatusChecks::constantNulls)); - r.add(new FieldCase(storedKeywordField("stored_kwd"), checks::strings, StatusChecks::keywordsFromStored)); - r.add(new FieldCase(storedKeywordField("mv_stored_kwd"), checks::mvStringsUnordered, StatusChecks::mvKeywordsFromStored)); - r.add(new FieldCase(sourceKeywordField("source_kwd"), checks::strings, StatusChecks::keywordsFromSource)); - r.add(new FieldCase(sourceKeywordField("mv_source_kwd"), checks::mvStringsUnordered, StatusChecks::mvKeywordsFromSource)); - r.add(new FieldCase(new TextFieldMapper.TextFieldType("source_text", false), checks::strings, StatusChecks::textFromSource)); + r.add(new FieldCase(storedTextField("stored_text"), ElementType.BYTES_REF, checks::strings, StatusChecks::textFromStored)); r.add( new FieldCase( - new TextFieldMapper.TextFieldType("mv_source_text", false), + storedTextField("mv_stored_text"), + ElementType.BYTES_REF, checks::mvStringsUnordered, - StatusChecks::mvTextFromSource + StatusChecks::mvTextFromStored ) ); - r.add(new FieldCase(storedTextField("stored_text"), checks::strings, StatusChecks::textFromStored)); - r.add(new FieldCase(storedTextField("mv_stored_text"), checks::mvStringsUnordered, StatusChecks::mvTextFromStored)); r.add( new FieldCase( textFieldWithDelegate("text_with_delegate", new KeywordFieldMapper.KeywordFieldType("kwd")), + ElementType.BYTES_REF, checks::tags, StatusChecks::textWithDelegate ) @@ -654,6 +763,7 @@ private List infoAndChecksForEachType(Block.MvOrdering docValuesMvOrd r.add( new FieldCase( textFieldWithDelegate("mv_text_with_delegate", new KeywordFieldMapper.KeywordFieldType("mv_kwd")), + ElementType.BYTES_REF, checks::mvStringsFromDocValues, StatusChecks::mvTextWithDelegate ) @@ -661,22 +771,25 @@ private List infoAndChecksForEachType(Block.MvOrdering docValuesMvOrd r.add( new FieldCase( textFieldWithDelegate("missing_text_with_delegate", new KeywordFieldMapper.KeywordFieldType("missing_kwd")), + ElementType.BYTES_REF, checks::constantNulls, StatusChecks::constantNullTextWithDelegate ) ); - r.add(new FieldCase(new ProvidedIdFieldMapper(() -> false).fieldType(), checks::ids, StatusChecks::id)); - r.add(new FieldCase(TsidExtractingIdFieldMapper.INSTANCE.fieldType(), checks::ids, StatusChecks::id)); r.add( new FieldCase( - new ValuesSourceReaderOperator.FieldInfo("constant_bytes", List.of(BlockLoader.constantBytes(new BytesRef("foo")))), + new ValuesSourceReaderOperator.FieldInfo( + "constant_bytes", + ElementType.BYTES_REF, + shardIdx -> BlockLoader.constantBytes(new BytesRef("foo")) + ), checks::constantBytes, StatusChecks::constantBytes ) ); r.add( new FieldCase( - new ValuesSourceReaderOperator.FieldInfo("null", List.of(BlockLoader.CONSTANT_NULLS)), + new ValuesSourceReaderOperator.FieldInfo("null", ElementType.NULL, shardIdx -> BlockLoader.CONSTANT_NULLS), checks::constantNulls, StatusChecks::constantNulls ) @@ -728,7 +841,7 @@ void bools(Block block, int position, int key) { void ids(Block block, int position, int key) { BytesRefVector ids = ((BytesRefBlock) block).asVector(); - assertThat(ids.getBytesRef(position, new BytesRef()).utf8ToString(), equalTo("id")); + assertThat(ids.getBytesRef(position, new BytesRef()).utf8ToString(), equalTo("id" + key)); } void constantBytes(Block block, int position, int key) { @@ -1119,9 +1232,15 @@ static void constantNulls(String name, boolean forcedRowByRow, int pageCount, in } public void testWithNulls() throws IOException { - MappedFieldType intFt = docValuesNumberField("i", NumberFieldMapper.NumberType.INTEGER); - MappedFieldType longFt = docValuesNumberField("j", NumberFieldMapper.NumberType.LONG); - MappedFieldType doubleFt = docValuesNumberField("d", NumberFieldMapper.NumberType.DOUBLE); + mapperService = new MapperServiceTestCase() { + }.createMapperService(MapperServiceTestCase.mapping(b -> { + fieldExamples(b, "i", "integer"); + fieldExamples(b, "j", "long"); + fieldExamples(b, "d", "double"); + })); + MappedFieldType intFt = mapperService.fieldType("i"); + MappedFieldType longFt = mapperService.fieldType("j"); + MappedFieldType doubleFt = mapperService.fieldType("d"); MappedFieldType kwFt = new KeywordFieldMapper.KeywordFieldType("kw"); NumericDocValuesField intField = new NumericDocValuesField(intFt.name(), 0); @@ -1149,7 +1268,7 @@ public void testWithNulls() throws IOException { DriverContext driverContext = driverContext(); var luceneFactory = new LuceneSourceOperator.Factory( - List.of(mockSearchContext(reader)), + List.of(mockSearchContext(reader, 0)), ctx -> new MatchAllDocsQuery(), randomFrom(DataPartitioning.values()), randomIntBetween(1, 10), @@ -1161,10 +1280,10 @@ public void testWithNulls() throws IOException { driverContext, luceneFactory.get(driverContext), List.of( - factory(reader, intFt).get(driverContext), - factory(reader, longFt).get(driverContext), - factory(reader, doubleFt).get(driverContext), - factory(reader, kwFt).get(driverContext) + factory(reader, intFt, ElementType.INT).get(driverContext), + factory(reader, longFt, ElementType.LONG).get(driverContext), + factory(reader, doubleFt, ElementType.DOUBLE).get(driverContext), + factory(reader, kwFt, ElementType.BYTES_REF).get(driverContext) ), new PageConsumerOperator(page -> { try { @@ -1195,25 +1314,20 @@ public void testWithNulls() throws IOException { assertDriverContext(driverContext); } - private NumberFieldMapper.NumberFieldType docValuesNumberField(String name, NumberFieldMapper.NumberType type) { - return new NumberFieldMapper.NumberFieldType(name, type); + private XContentBuilder fieldExamples(XContentBuilder builder, String name, String type) throws IOException { + simpleField(builder, name, type); + simpleField(builder, "mv_" + name, type); + simpleField(builder, "missing_" + name, type); + sourceField(builder, "source_" + name, type); + return sourceField(builder, "mv_source_" + name, type); } - private NumberFieldMapper.NumberFieldType sourceNumberField(String name, NumberFieldMapper.NumberType type) { - return new NumberFieldMapper.NumberFieldType( - name, - type, - randomBoolean(), - false, - false, - randomBoolean(), - null, - Map.of(), - null, - false, - null, - randomFrom(IndexMode.values()) - ); + private XContentBuilder simpleField(XContentBuilder builder, String name, String type) throws IOException { + return builder.startObject(name).field("type", type).endObject(); + } + + private XContentBuilder sourceField(XContentBuilder builder, String name, String type) throws IOException { + return builder.startObject(name).field("type", type).field("store", false).field("doc_values", false).endObject(); } private KeywordFieldMapper.KeywordFieldType storedKeywordField(String name) { @@ -1232,22 +1346,6 @@ private KeywordFieldMapper.KeywordFieldType storedKeywordField(String name) { ); } - private KeywordFieldMapper.KeywordFieldType sourceKeywordField(String name) { - FieldType ft = new FieldType(KeywordFieldMapper.Defaults.FIELD_TYPE); - ft.setDocValuesType(DocValuesType.NONE); - ft.setStored(false); - ft.freeze(); - return new KeywordFieldMapper.KeywordFieldType( - name, - ft, - Lucene.KEYWORD_ANALYZER, - Lucene.KEYWORD_ANALYZER, - Lucene.KEYWORD_ANALYZER, - new KeywordFieldMapper.Builder(name, IndexVersion.current()).docValues(false), - false - ); - } - private TextFieldMapper.TextFieldType storedTextField(String name) { return new TextFieldMapper.TextFieldType( name, @@ -1286,8 +1384,8 @@ public void testNullsShared() { List.of( new ValuesSourceReaderOperator.Factory( List.of( - new ValuesSourceReaderOperator.FieldInfo("null1", List.of(BlockLoader.CONSTANT_NULLS)), - new ValuesSourceReaderOperator.FieldInfo("null2", List.of(BlockLoader.CONSTANT_NULLS)) + new ValuesSourceReaderOperator.FieldInfo("null1", ElementType.NULL, shardIdx -> BlockLoader.CONSTANT_NULLS), + new ValuesSourceReaderOperator.FieldInfo("null2", ElementType.NULL, shardIdx -> BlockLoader.CONSTANT_NULLS) ), List.of(new ValuesSourceReaderOperator.ShardContext(reader, () -> SourceLoader.FROM_STORED_SOURCE)), 0 @@ -1313,26 +1411,27 @@ public void testNullsShared() { assertDriverContext(driverContext); } - public void testSequentialStoredFieldsTooSmall() { + public void testSequentialStoredFieldsTooSmall() throws IOException { testSequentialStoredFields(false, between(1, ValuesSourceReaderOperator.SEQUENTIAL_BOUNDARY - 1)); } - public void testSequentialStoredFieldsBigEnough() { + public void testSequentialStoredFieldsBigEnough() throws IOException { testSequentialStoredFields( true, between(ValuesSourceReaderOperator.SEQUENTIAL_BOUNDARY, ValuesSourceReaderOperator.SEQUENTIAL_BOUNDARY * 2) ); } - private void testSequentialStoredFields(boolean sequential, int docCount) { + private void testSequentialStoredFields(boolean sequential, int docCount) throws IOException { + initMapping(); DriverContext driverContext = driverContext(); List source = CannedSourceOperator.collectPages(simpleInput(driverContext, docCount, docCount, docCount)); assertThat(source, hasSize(1)); // We want one page for simpler assertions, and we want them all in one segment assertTrue(source.get(0).getBlock(0).asVector().singleSegmentNonDecreasing()); Operator op = new ValuesSourceReaderOperator.Factory( List.of( - fieldInfo(docValuesNumberField("key", NumberFieldMapper.NumberType.INTEGER)), - fieldInfo(storedTextField("stored_text")) + fieldInfo(mapperService.fieldType("key"), ElementType.INT), + fieldInfo(storedTextField("stored_text"), ElementType.BYTES_REF) ), List.of(new ValuesSourceReaderOperator.ShardContext(reader, () -> SourceLoader.FROM_STORED_SOURCE)), 0 @@ -1355,7 +1454,8 @@ private void testSequentialStoredFields(boolean sequential, int docCount) { assertDriverContext(driverContext); } - public void testDescriptionOfMany() { + public void testDescriptionOfMany() throws IOException { + initIndex(1, 1); List cases = infoAndChecksForEachType(randomFrom(Block.MvOrdering.values())); ValuesSourceReaderOperator.Factory factory = new ValuesSourceReaderOperator.Factory( @@ -1368,4 +1468,64 @@ public void testDescriptionOfMany() { assertThat(op.toString(), equalTo("ValuesSourceReaderOperator[fields = [" + cases.size() + " fields]]")); } } + + public void testManyShards() throws IOException { + initMapping(); + int shardCount = between(2, 10); + int size = between(100, 1000); + Directory[] dirs = new Directory[shardCount]; + IndexReader[] readers = new IndexReader[shardCount]; + Closeable[] closeMe = new Closeable[shardCount * 2]; + Set seenShards = new TreeSet<>(); + Map keyCounts = new TreeMap<>(); + try { + for (int d = 0; d < dirs.length; d++) { + closeMe[d * 2 + 1] = dirs[d] = newDirectory(); + closeMe[d * 2] = readers[d] = initIndex(dirs[d], size, between(10, size * 2)); + } + List contexts = new ArrayList<>(); + List readerShardContexts = new ArrayList<>(); + for (int s = 0; s < shardCount; s++) { + contexts.add(mockSearchContext(readers[s], s)); + readerShardContexts.add(new ValuesSourceReaderOperator.ShardContext(readers[s], () -> SourceLoader.FROM_STORED_SOURCE)); + } + var luceneFactory = new LuceneSourceOperator.Factory( + contexts, + ctx -> new MatchAllDocsQuery(), + DataPartitioning.SHARD, + randomIntBetween(1, 10), + 1000, + LuceneOperator.NO_LIMIT + ); + MappedFieldType ft = mapperService.fieldType("key"); + var readerFactory = new ValuesSourceReaderOperator.Factory( + List.of(new ValuesSourceReaderOperator.FieldInfo("key", ElementType.INT, shardIdx -> { + seenShards.add(shardIdx); + return ft.blockLoader(blContext()); + })), + readerShardContexts, + 0 + ); + DriverContext driverContext = driverContext(); + List results = drive( + readerFactory.get(driverContext), + CannedSourceOperator.collectPages(luceneFactory.get(driverContext)).iterator(), + driverContext + ); + assertThat(seenShards, equalTo(IntStream.range(0, shardCount).boxed().collect(Collectors.toCollection(TreeSet::new)))); + for (Page p : results) { + IntBlock keyBlock = p.getBlock(1); + IntVector keys = keyBlock.asVector(); + for (int i = 0; i < keys.getPositionCount(); i++) { + keyCounts.merge(keys.getInt(i), 1, (prev, one) -> prev + one); + } + } + assertThat(keyCounts.keySet(), hasSize(size)); + for (int k = 0; k < size; k++) { + assertThat(keyCounts.get(k), equalTo(shardCount)); + } + } finally { + IOUtils.close(closeMe); + } + } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AggregationOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AggregationOperatorTests.java index 784d5134e9608..884b702a3b703 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AggregationOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AggregationOperatorTests.java @@ -7,8 +7,6 @@ package org.elasticsearch.compute.operator; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.aggregation.AggregatorMode; import org.elasticsearch.compute.aggregation.MaxLongAggregatorFunction; import org.elasticsearch.compute.aggregation.MaxLongAggregatorFunctionSupplier; @@ -35,7 +33,7 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { } @Override - protected Operator.OperatorFactory simpleWithMode(BigArrays bigArrays, AggregatorMode mode) { + protected Operator.OperatorFactory simpleWithMode(AggregatorMode mode) { List sumChannels, maxChannels; if (mode.isInputPartial()) { int sumInterChannelCount = SumLongAggregatorFunction.intermediateStateDesc().size(); @@ -48,8 +46,8 @@ protected Operator.OperatorFactory simpleWithMode(BigArrays bigArrays, Aggregato return new AggregationOperator.AggregationOperatorFactory( List.of( - new SumLongAggregatorFunctionSupplier(bigArrays, sumChannels).aggregatorFactory(mode), - new MaxLongAggregatorFunctionSupplier(bigArrays, maxChannels).aggregatorFactory(mode) + new SumLongAggregatorFunctionSupplier(sumChannels).aggregatorFactory(mode), + new MaxLongAggregatorFunctionSupplier(maxChannels).aggregatorFactory(mode) ), mode ); @@ -81,10 +79,4 @@ protected void assertSimpleOutput(List input, List results) { sum.assertSimpleOutput(input.stream().map(p -> p.getBlock(0)).toList(), sums); max.assertSimpleOutput(input.stream().map(p -> p.getBlock(0)).toList(), maxs); } - - @Override - protected ByteSizeValue smallEnoughToCircuitBreak() { - assumeTrue("doesn't use big array so never breaks", false); - return null; - } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AnyOperatorTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AnyOperatorTestCase.java index 290756e81cfae..25d79d0808741 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AnyOperatorTestCase.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AnyOperatorTestCase.java @@ -8,21 +8,8 @@ package org.elasticsearch.compute.operator; import org.elasticsearch.common.breaker.CircuitBreaker; -import org.elasticsearch.common.breaker.CircuitBreakingException; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.util.MockBigArrays; -import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.compute.aggregation.GroupingAggregatorFunction; import org.elasticsearch.compute.data.BlockFactory; -import org.elasticsearch.compute.data.MockBlockFactory; -import org.elasticsearch.indices.CrankyCircuitBreakerService; -import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; -import org.elasticsearch.test.ESTestCase; -import org.junit.After; - -import java.util.ArrayList; -import java.util.List; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.matchesPattern; @@ -30,12 +17,12 @@ /** * Superclass for testing any {@link Operator}, including {@link SourceOperator}s. */ -public abstract class AnyOperatorTestCase extends ESTestCase { +public abstract class AnyOperatorTestCase extends ComputeTestCase { /** * The operator configured a "simple" or basic way, used for smoke testing - * descriptions and {@link BigArrays} and scatter/gather. + * descriptions, {@link CircuitBreaker}s, and scatter/gather. */ - protected abstract Operator.OperatorFactory simple(BigArrays bigArrays); // TODO remove BigArrays - that's part of the context + protected abstract Operator.OperatorFactory simple(); /** * The description of the operator produced by {@link #simple}. @@ -66,7 +53,7 @@ public abstract class AnyOperatorTestCase extends ESTestCase { * Makes sure the description of {@link #simple} matches the {@link #expectedDescriptionOfSimple}. */ public final void testSimpleDescription() { - Operator.OperatorFactory factory = simple(nonBreakingBigArrays()); + Operator.OperatorFactory factory = simple(); String description = factory.describe(); assertThat(description, equalTo(expectedDescriptionOfSimple())); try (Operator op = factory.get(driverContext())) { @@ -82,63 +69,21 @@ public final void testSimpleDescription() { * Makes sure the description of {@link #simple} matches the {@link #expectedDescriptionOfSimple}. */ public final void testSimpleToString() { - try (Operator operator = simple(nonBreakingBigArrays()).get(driverContext())) { + try (Operator operator = simple().get(driverContext())) { assertThat(operator.toString(), equalTo(expectedToStringOfSimple())); } } - /** - * A {@link BigArrays} that won't throw {@link CircuitBreakingException}. - *

- * Rather than using the {@link NoneCircuitBreakerService} we use a - * very large limit so tests can call {@link CircuitBreaker#getUsed()}. - *

- */ - protected final BigArrays nonBreakingBigArrays() { - return new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, ByteSizeValue.ofBytes(Integer.MAX_VALUE)).withCircuitBreaking(); - } - /** * A {@link DriverContext} with a nonBreakingBigArrays. */ protected DriverContext driverContext() { // TODO make this final once all operators support memory tracking - BigArrays bigArrays = new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, ByteSizeValue.ofGb(1)).withCircuitBreaking(); - CircuitBreaker breaker = bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST); - breakers.add(breaker); - BlockFactory factory = new MockBlockFactory(breaker, bigArrays); - blockFactories.add(factory); - return new DriverContext(bigArrays, factory); - } - - protected final DriverContext nonBreakingDriverContext() { // TODO drop this once the driverContext method isn't overrideable - return new DriverContext(nonBreakingBigArrays(), BlockFactory.getNonBreakingInstance()); + BlockFactory blockFactory = blockFactory(); + return new DriverContext(blockFactory.bigArrays(), blockFactory); } - private final List breakers = new ArrayList<>(); - private final List blockFactories = new ArrayList<>(); - protected final DriverContext crankyDriverContext() { - CrankyCircuitBreakerService cranky = new CrankyCircuitBreakerService(); - BigArrays bigArrays = new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, cranky).withCircuitBreaking(); - CircuitBreaker breaker = bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST); - breakers.add(breaker); - BlockFactory blockFactory = new MockBlockFactory(breaker, bigArrays); - blockFactories.add(blockFactory); - return new DriverContext(bigArrays, blockFactory); - } - - @After - public void allBreakersEmpty() throws Exception { - // first check that all big arrays are released, which can affect breakers - MockBigArrays.ensureAllArraysAreReleased(); - - for (CircuitBreaker breaker : breakers) { - for (var factory : blockFactories) { - if (factory instanceof MockBlockFactory mockBlockFactory) { - mockBlockFactory.ensureAllBlocksAreReleased(); - } - } - assertThat("Unexpected used in breaker: " + breaker, breaker.getUsed(), equalTo(0L)); - } + BlockFactory blockFactory = crankyBlockFactory(); + return new DriverContext(blockFactory.bigArrays(), blockFactory); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorTests.java index 8cd7116677fd0..a4370face45ad 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorTests.java @@ -43,6 +43,8 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.LongStream; @@ -76,7 +78,7 @@ public void testBasic() { final DriverContext driverContext; if (randomBoolean()) { localBreaker = new LocalCircuitBreaker(globalBlockFactory.breaker(), between(0, 1024), between(0, 4096)); - BlockFactory localFactory = new BlockFactory(localBreaker, globalBlockFactory.bigArrays()); + BlockFactory localFactory = globalBlockFactory.newChildFactory(localBreaker); driverContext = new DriverContext(globalBlockFactory.bigArrays(), localFactory); } else { driverContext = new DriverContext(globalBlockFactory.bigArrays(), globalBlockFactory); @@ -213,7 +215,7 @@ public void testFailure() throws Exception { final DriverContext driverContext; if (randomBoolean()) { localBreaker = new LocalCircuitBreaker(globalBlockFactory.breaker(), between(0, 1024), between(0, 4096)); - BlockFactory localFactory = new BlockFactory(localBreaker, globalBlockFactory.bigArrays()); + BlockFactory localFactory = globalBlockFactory.newChildFactory(localBreaker); driverContext = new DriverContext(globalBlockFactory.bigArrays(), localFactory); } else { driverContext = new DriverContext(globalBlockFactory.bigArrays(), globalBlockFactory); @@ -270,6 +272,53 @@ protected void doClose() { } } + public void testIsFinished() { + int iters = iterations(10, 10_000); + BlockFactory blockFactory = blockFactory(); + for (int i = 0; i < iters; i++) { + DriverContext driverContext = new DriverContext(blockFactory.bigArrays(), blockFactory); + CyclicBarrier barrier = new CyclicBarrier(2); + AsyncOperator asyncOperator = new AsyncOperator(driverContext, between(1, 10)) { + @Override + protected void performAsync(Page inputPage, ActionListener listener) { + ActionRunnable command = new ActionRunnable<>(listener) { + @Override + protected void doRun() { + try { + barrier.await(10, TimeUnit.SECONDS); + } catch (Exception e) { + throw new AssertionError(e); + } + listener.onFailure(new ElasticsearchException("simulated")); + } + }; + threadPool.executor(ESQL_TEST_EXECUTOR).execute(command); + } + + @Override + protected void doClose() { + + } + }; + asyncOperator.addInput(new Page(blockFactory.newConstantIntBlockWith(randomInt(), between(1, 10)))); + asyncOperator.finish(); + try { + barrier.await(10, TimeUnit.SECONDS); + } catch (Exception e) { + throw new AssertionError(e); + } + int numChecks = between(10, 100); + while (--numChecks >= 0) { + try { + assertFalse("must not finished or failed", asyncOperator.isFinished()); + } catch (ElasticsearchException e) { + assertThat(e.getMessage(), equalTo("simulated")); + break; + } + } + } + } + static class LookupService { private final ThreadPool threadPool; private final Map dict; diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/CannedSourceOperator.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/CannedSourceOperator.java index 47febc09e45f5..01f51b32edb1d 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/CannedSourceOperator.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/CannedSourceOperator.java @@ -9,7 +9,9 @@ import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.data.TestBlockFactory; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; @@ -47,7 +49,7 @@ public static Page mergePages(List pages) { Block.Builder[] builders = new Block.Builder[first.getBlockCount()]; try { for (int b = 0; b < builders.length; b++) { - builders[b] = first.getBlock(b).elementType().newBlockBuilder(totalPositions); + builders[b] = first.getBlock(b).elementType().newBlockBuilder(totalPositions, TestBlockFactory.getNonBreakingInstance()); } for (Page p : pages) { for (int b = 0; b < builders.length; b++) { @@ -79,11 +81,12 @@ public static Page mergePages(List pages) { */ public static List deepCopyOf(List pages) { List out = new ArrayList<>(pages.size()); + BlockFactory blockFactory = TestBlockFactory.getNonBreakingInstance(); for (Page p : pages) { Block[] blocks = new Block[p.getBlockCount()]; for (int b = 0; b < blocks.length; b++) { Block orig = p.getBlock(b); - Block.Builder builder = orig.elementType().newBlockBuilder(p.getPositionCount()); + Block.Builder builder = orig.elementType().newBlockBuilder(p.getPositionCount(), blockFactory); builder.copyFrom(orig, 0, p.getPositionCount()); blocks[b] = builder.build(); } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ColumnExtractOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ColumnExtractOperatorTests.java index 485610f5842bb..2a8c259f069b4 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ColumnExtractOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ColumnExtractOperatorTests.java @@ -9,8 +9,6 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.lucene.BytesRefs; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BytesRefBlock; @@ -48,7 +46,7 @@ public String toString() { } @Override - protected Operator.OperatorFactory simple(BigArrays bigArrays) { + protected Operator.OperatorFactory simple() { Supplier expEval = () -> new FirstWord(0); return new ColumnExtractOperator.Factory( new ElementType[] { ElementType.BYTES_REF }, @@ -58,7 +56,7 @@ public Block eval(Page page) { BytesRefBlock input = page.getBlock(0); for (int i = 0; i < input.getPositionCount(); i++) { if (input.getBytesRef(i, new BytesRef()).utf8ToString().startsWith("no_")) { - return Block.constantNullBlock(input.getPositionCount(), input.blockFactory()); + return input.blockFactory().newConstantNullBlock(input.getPositionCount()); } } input.incRef(); @@ -96,18 +94,13 @@ protected void assertSimpleOutput(List input, List results) { } } - @Override - protected ByteSizeValue smallEnoughToCircuitBreak() { - return ByteSizeValue.ofBytes(between(1, 32)); - } - public void testAllNullValues() { DriverContext driverContext = driverContext(); BytesRef scratch = new BytesRef(); - Block input1 = BytesRefBlock.newBlockBuilder(1, driverContext.blockFactory()).appendBytesRef(new BytesRef("can_match")).build(); - Block input2 = BytesRefBlock.newBlockBuilder(1, driverContext.blockFactory()).appendBytesRef(new BytesRef("no_match")).build(); + Block input1 = driverContext.blockFactory().newBytesRefBlockBuilder(1).appendBytesRef(new BytesRef("can_match")).build(); + Block input2 = driverContext.blockFactory().newBytesRefBlockBuilder(1).appendBytesRef(new BytesRef("no_match")).build(); List inputPages = List.of(new Page(input1), new Page(input2)); - List outputPages = drive(simple(driverContext.bigArrays()).get(driverContext), inputPages.iterator(), driverContext); + List outputPages = drive(simple().get(driverContext), inputPages.iterator(), driverContext); BytesRefBlock output1 = outputPages.get(0).getBlock(1); BytesRefBlock output2 = outputPages.get(1).getBlock(1); assertThat(output1.getBytesRef(0, scratch), equalTo(new BytesRef("can_match"))); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ComputeTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ComputeTestCase.java new file mode 100644 index 0000000000000..ce62fb9896eba --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ComputeTestCase.java @@ -0,0 +1,92 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.operator; + +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.MockBlockFactory; +import org.elasticsearch.indices.CrankyCircuitBreakerService; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.test.ESTestCase; +import org.junit.After; + +import java.util.ArrayList; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; + +/** + * Superclass for testing with blocks and operators + */ +public abstract class ComputeTestCase extends ESTestCase { + + private final List breakers = new ArrayList<>(); + private final List blockFactories = new ArrayList<>(); + + /** + * A {@link BigArrays} that won't throw {@link CircuitBreakingException}. + *

+ * Rather than using the {@link NoneCircuitBreakerService} we use a + * very large limit so tests can call {@link CircuitBreaker#getUsed()}. + *

+ */ + protected final BigArrays nonBreakingBigArrays() { + return new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, ByteSizeValue.ofBytes(Integer.MAX_VALUE)).withCircuitBreaking(); + } + + /** + * Build a {@link BlockFactory} with a huge limit. + */ + protected final BlockFactory blockFactory() { + return blockFactory(ByteSizeValue.ofGb(1)); + } + + /** + * Build a {@link BlockFactory} with a configured limit. + */ + protected final BlockFactory blockFactory(ByteSizeValue limit) { + BigArrays bigArrays = new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, limit).withCircuitBreaking(); + CircuitBreaker breaker = bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST); + breakers.add(breaker); + BlockFactory factory = new MockBlockFactory(breaker, bigArrays); + blockFactories.add(factory); + return factory; + } + + /** + * Build a {@link BlockFactory} that randomly fails. + */ + protected final BlockFactory crankyBlockFactory() { + CrankyCircuitBreakerService cranky = new CrankyCircuitBreakerService(); + BigArrays bigArrays = new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, cranky).withCircuitBreaking(); + CircuitBreaker breaker = bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST); + breakers.add(breaker); + BlockFactory blockFactory = new MockBlockFactory(breaker, bigArrays); + blockFactories.add(blockFactory); + return blockFactory; + } + + @After + public final void allBreakersEmpty() throws Exception { + // first check that all big arrays are released, which can affect breakers + MockBigArrays.ensureAllArraysAreReleased(); + for (var factory : blockFactories) { + if (factory instanceof MockBlockFactory mockBlockFactory) { + mockBlockFactory.ensureAllBlocksAreReleased(); + } + } + for (CircuitBreaker breaker : breakers) { + assertThat("Unexpected used in breaker: " + breaker, breaker.getUsed(), equalTo(0L)); + } + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverContextTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverContextTests.java index 27076c2adf2d2..a3af5aafcbee3 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverContextTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverContextTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.TestBlockFactory; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; @@ -167,7 +167,7 @@ static class AssertingDriverContext extends DriverContext { AssertingDriverContext() { super( new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, new NoneCircuitBreakerService()), - BlockFactory.getNonBreakingInstance() + TestBlockFactory.getNonBreakingInstance() ); } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverProfileTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverProfileTests.java index f6b4fbc817940..ec9952cdce022 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverProfileTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverProfileTests.java @@ -30,19 +30,23 @@ public void testToXContent() { new DriverStatus.OperatorStatus("ValuesSourceReader", ValuesSourceReaderOperatorStatusTests.simple()) ) ); - assertThat( - Strings.toString(status), - equalTo( - """ - {"operators":[""" - + """ - {"operator":"LuceneSource","status":""" - + LuceneSourceOperatorStatusTests.simpleToJson() - + "},{\"operator\":\"ValuesSourceReader\",\"status\":" - + ValuesSourceReaderOperatorStatusTests.simpleToJson() - + "}]}" - ) - ); + assertThat(Strings.toString(status, true, true), equalTo(""" + { + "operators" : [ + { + "operator" : "LuceneSource", + "status" : + """.stripTrailing() + " " + LuceneSourceOperatorStatusTests.simpleToJson().replace("\n", "\n ") + """ + + }, + { + "operator" : "ValuesSourceReader", + "status" : + """.stripTrailing() + " " + ValuesSourceReaderOperatorStatusTests.simpleToJson().replace("\n", "\n ") + """ + + } + ] + }""")); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverStatusTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverStatusTests.java index cdae4283540c4..c10bcf8d49ca4 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverStatusTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverStatusTests.java @@ -39,15 +39,34 @@ public void testToXContent() { ), List.of(new DriverStatus.OperatorStatus("ExchangeSink", ExchangeSinkOperatorStatusTests.simple())) ); - assertThat(Strings.toString(status), equalTo(""" - {"sessionId":"ABC:123","last_updated":"1973-11-29T09:27:23.214Z","status":"running", - """.trim() + """ - "completed_operators":[{"operator":"LuceneSource","status": - """.trim() + LuceneSourceOperatorStatusTests.simpleToJson() + """ - },{"operator":"ValuesSourceReader","status": - """.trim() + ValuesSourceReaderOperatorStatusTests.simpleToJson() + """ - }],"active_operators":[{"operator":"ExchangeSink","status": - """.trim() + ExchangeSinkOperatorStatusTests.simpleToJson() + "}]}")); + assertThat(Strings.toString(status, true, true), equalTo(""" + { + "sessionId" : "ABC:123", + "last_updated" : "1973-11-29T09:27:23.214Z", + "status" : "running", + "completed_operators" : [ + { + "operator" : "LuceneSource", + "status" : + """.trim() + " " + LuceneSourceOperatorStatusTests.simpleToJson().replace("\n", "\n ") + """ + + }, + { + "operator" : "ValuesSourceReader", + "status" : + """.stripTrailing() + " " + ValuesSourceReaderOperatorStatusTests.simpleToJson().replace("\n", "\n ") + """ + + } + ], + "active_operators" : [ + { + "operator" : "ExchangeSink", + "status" : + """.stripTrailing() + " " + ExchangeSinkOperatorStatusTests.simpleToJson().replace("\n", "\n ") + """ + + } + ] + }""")); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/EvalOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/EvalOperatorTests.java index c755c5eafe08d..0894e665b8fed 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/EvalOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/EvalOperatorTests.java @@ -7,8 +7,6 @@ package org.elasticsearch.compute.operator; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.LongBlock; @@ -37,7 +35,7 @@ record Addition(DriverContext driverContext, int lhs, int rhs) implements EvalOp public Block eval(Page page) { LongVector lhsVector = page.getBlock(0).asVector(); LongVector rhsVector = page.getBlock(1).asVector(); - try (LongVector.FixedBuilder result = LongVector.newVectorFixedBuilder(page.getPositionCount(), driverContext.blockFactory())) { + try (LongVector.FixedBuilder result = driverContext.blockFactory().newLongVectorFixedBuilder(page.getPositionCount())) { for (int p = 0; p < page.getPositionCount(); p++) { result.appendLong(lhsVector.getLong(p) + rhsVector.getLong(p)); } @@ -67,7 +65,7 @@ public void close() {} } @Override - protected Operator.OperatorFactory simple(BigArrays bigArrays) { + protected Operator.OperatorFactory simple() { return new EvalOperator.EvalOperatorFactory(new EvalOperator.ExpressionEvaluator.Factory() { @Override public EvalOperator.ExpressionEvaluator get(DriverContext context) { @@ -115,9 +113,4 @@ public void testReadFromBlock() { results.forEach(Page::releaseBlocks); assertThat(context.breaker().getUsed(), equalTo(0L)); } - - @Override - protected ByteSizeValue smallEnoughToCircuitBreak() { - return ByteSizeValue.ofBytes(between(1, 8000)); - } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/FilterOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/FilterOperatorTests.java index d067435ba9aaa..d68e03203b9af 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/FilterOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/FilterOperatorTests.java @@ -7,8 +7,6 @@ package org.elasticsearch.compute.operator; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BooleanBlock; @@ -36,7 +34,7 @@ record SameLastDigit(DriverContext context, int lhs, int rhs) implements EvalOpe public Block eval(Page page) { LongVector lhsVector = page.getBlock(0).asVector(); LongVector rhsVector = page.getBlock(1).asVector(); - BooleanVector.FixedBuilder result = BooleanVector.newVectorFixedBuilder(page.getPositionCount(), context.blockFactory()); + BooleanVector.FixedBuilder result = context.blockFactory().newBooleanVectorFixedBuilder(page.getPositionCount()); for (int p = 0; p < page.getPositionCount(); p++) { result.appendBoolean(lhsVector.getLong(p) % 10 == rhsVector.getLong(p) % 10); } @@ -53,7 +51,7 @@ public void close() {} } @Override - protected Operator.OperatorFactory simple(BigArrays bigArrays) { + protected Operator.OperatorFactory simple() { return new FilterOperator.FilterOperatorFactory(dvrCtx -> new SameLastDigit(dvrCtx, 0, 1)); } @@ -114,9 +112,4 @@ public void testReadFromBlock() { results.forEach(Page::releaseBlocks); assertThat(context.breaker().getUsed(), equalTo(0L)); } - - @Override - protected ByteSizeValue smallEnoughToCircuitBreak() { - return ByteSizeValue.ofBytes(between(1, 600)); - } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ForkingOperatorTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ForkingOperatorTestCase.java index 9403d22f2b4c4..87675e3139a43 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ForkingOperatorTestCase.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ForkingOperatorTestCase.java @@ -11,12 +11,11 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.compute.aggregation.AggregatorMode; -import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockTestUtils; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.data.TestBlockFactory; import org.elasticsearch.compute.operator.exchange.ExchangeSinkHandler; import org.elasticsearch.compute.operator.exchange.ExchangeSinkOperator; import org.elasticsearch.compute.operator.exchange.ExchangeSourceHandler; @@ -50,27 +49,23 @@ public abstract class ForkingOperatorTestCase extends OperatorTestCase { private static final String ESQL_TEST_EXECUTOR = "esql_test_executor"; - protected abstract Operator.OperatorFactory simpleWithMode(BigArrays bigArrays, AggregatorMode mode); + protected abstract Operator.OperatorFactory simpleWithMode(AggregatorMode mode); @Override - protected final Operator.OperatorFactory simple(BigArrays bigArrays) { - return simpleWithMode(bigArrays, AggregatorMode.SINGLE); + protected final Operator.OperatorFactory simple() { + return simpleWithMode(AggregatorMode.SINGLE); } public final void testInitialFinal() { - BigArrays bigArrays = nonBreakingBigArrays(); DriverContext driverContext = driverContext(); List input = CannedSourceOperator.collectPages(simpleInput(driverContext.blockFactory(), between(1_000, 100_000))); - List origInput = BlockTestUtils.deepCopyOf(input, BlockFactory.getNonBreakingInstance()); + List origInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); List results = new ArrayList<>(); try ( Driver d = new Driver( driverContext, new CannedSourceOperator(input.iterator()), - List.of( - simpleWithMode(bigArrays, AggregatorMode.INITIAL).get(driverContext), - simpleWithMode(bigArrays, AggregatorMode.FINAL).get(driverContext) - ), + List.of(simpleWithMode(AggregatorMode.INITIAL).get(driverContext), simpleWithMode(AggregatorMode.FINAL).get(driverContext)), new TestResultPageSinkOperator(page -> results.add(page)), () -> {} ) @@ -82,17 +77,16 @@ public final void testInitialFinal() { } public final void testManyInitialFinal() { - BigArrays bigArrays = nonBreakingBigArrays(); DriverContext driverContext = driverContext(); List input = CannedSourceOperator.collectPages(simpleInput(driverContext.blockFactory(), between(1_000, 100_000))); - List origInput = BlockTestUtils.deepCopyOf(input, BlockFactory.getNonBreakingInstance()); - List partials = oneDriverPerPage(input, () -> List.of(simpleWithMode(bigArrays, AggregatorMode.INITIAL).get(driverContext))); + List origInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); + List partials = oneDriverPerPage(input, () -> List.of(simpleWithMode(AggregatorMode.INITIAL).get(driverContext))); List results = new ArrayList<>(); try ( Driver d = new Driver( driverContext, new CannedSourceOperator(partials.iterator()), - List.of(simpleWithMode(bigArrays, AggregatorMode.FINAL).get(driverContext)), + List.of(simpleWithMode(AggregatorMode.FINAL).get(driverContext)), new TestResultPageSinkOperator(results::add), () -> {} ) @@ -104,10 +98,9 @@ public final void testManyInitialFinal() { } public final void testInitialIntermediateFinal() { - BigArrays bigArrays = nonBreakingBigArrays(); DriverContext driverContext = driverContext(); List input = CannedSourceOperator.collectPages(simpleInput(driverContext.blockFactory(), between(1_000, 100_000))); - List origInput = BlockTestUtils.deepCopyOf(input, BlockFactory.getNonBreakingInstance()); + List origInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); List results = new ArrayList<>(); try ( @@ -115,9 +108,9 @@ public final void testInitialIntermediateFinal() { driverContext, new CannedSourceOperator(input.iterator()), List.of( - simpleWithMode(bigArrays, AggregatorMode.INITIAL).get(driverContext), - simpleWithMode(bigArrays, AggregatorMode.INTERMEDIATE).get(driverContext), - simpleWithMode(bigArrays, AggregatorMode.FINAL).get(driverContext) + simpleWithMode(AggregatorMode.INITIAL).get(driverContext), + simpleWithMode(AggregatorMode.INTERMEDIATE).get(driverContext), + simpleWithMode(AggregatorMode.FINAL).get(driverContext) ), new TestResultPageSinkOperator(page -> results.add(page)), () -> {} @@ -130,16 +123,15 @@ public final void testInitialIntermediateFinal() { } public final void testManyInitialManyPartialFinal() { - BigArrays bigArrays = nonBreakingBigArrays(); DriverContext driverContext = driverContext(); List input = CannedSourceOperator.collectPages(simpleInput(driverContext.blockFactory(), between(1_000, 100_000))); - List origInput = BlockTestUtils.deepCopyOf(input, BlockFactory.getNonBreakingInstance()); + List origInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); - List partials = oneDriverPerPage(input, () -> List.of(simpleWithMode(bigArrays, AggregatorMode.INITIAL).get(driverContext))); + List partials = oneDriverPerPage(input, () -> List.of(simpleWithMode(AggregatorMode.INITIAL).get(driverContext))); Collections.shuffle(partials, random()); List intermediates = oneDriverPerPageList( randomSplits(partials).iterator(), - () -> List.of(simpleWithMode(bigArrays, AggregatorMode.INTERMEDIATE).get(driverContext)) + () -> List.of(simpleWithMode(AggregatorMode.INTERMEDIATE).get(driverContext)) ); List results = new ArrayList<>(); @@ -147,7 +139,7 @@ public final void testManyInitialManyPartialFinal() { Driver d = new Driver( driverContext, new CannedSourceOperator(intermediates.iterator()), - List.of(simpleWithMode(bigArrays, AggregatorMode.FINAL).get(driverContext)), + List.of(simpleWithMode(AggregatorMode.FINAL).get(driverContext)), new TestResultPageSinkOperator(results::add), () -> {} ) @@ -161,11 +153,10 @@ public final void testManyInitialManyPartialFinal() { // Similar to testManyInitialManyPartialFinal, but uses with the DriverRunner infrastructure // to move the data through the pipeline. public final void testManyInitialManyPartialFinalRunner() { - BigArrays bigArrays = nonBreakingBigArrays(); List input = CannedSourceOperator.collectPages(simpleInput(driverContext().blockFactory(), between(1_000, 100_000))); - List origInput = BlockTestUtils.deepCopyOf(input, BlockFactory.getNonBreakingInstance()); + List origInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); List results = new ArrayList<>(); - List drivers = createDriversForInput(bigArrays, input, results, false /* no throwing ops */); + List drivers = createDriversForInput(input, results, false /* no throwing ops */); var runner = new DriverRunner(threadPool.getThreadContext()) { @Override protected void start(Driver driver, ActionListener listener) { @@ -185,11 +176,10 @@ protected void start(Driver driver, ActionListener listener) { // @com.carrotsearch.randomizedtesting.annotations.Repeat(iterations = 100) public final void testManyInitialManyPartialFinalRunnerThrowing() throws Exception { DriverContext driverContext = driverContext(); - BigArrays bigArrays = nonBreakingBigArrays(); List input = CannedSourceOperator.collectPages(simpleInput(driverContext.blockFactory(), between(1_000, 100_000))); List results = new ArrayList<>(); - List drivers = createDriversForInput(bigArrays, input, results, true /* one throwing op */); + List drivers = createDriversForInput(input, results, true /* one throwing op */); var runner = new DriverRunner(threadPool.getThreadContext()) { @Override protected void start(Driver driver, ActionListener listener) { @@ -209,7 +199,7 @@ protected void start(Driver driver, ActionListener listener) { // intermediate results. The second is a single operator that consumes intermediate input and // produces the final results. The throwingOp param allows to construct a pipeline that will // fail by throwing an exception in one of the operators. - List createDriversForInput(BigArrays bigArrays, List input, List results, boolean throwingOp) { + List createDriversForInput(List input, List results, boolean throwingOp) { Collection> splitInput = randomSplits(input, randomIntBetween(2, 4)); ExchangeSinkHandler sinkExchanger = new ExchangeSinkHandler(randomIntBetween(2, 10), threadPool::relativeTimeInMillis); @@ -233,9 +223,9 @@ List createDriversForInput(BigArrays bigArrays, List input, List

createDriversForInput(BigArrays bigArrays, List input, List

sumChannels, maxChannels; if (mode.isInputPartial()) { int sumChannelCount = SumLongAggregatorFunction.intermediateStateDesc().size(); @@ -55,11 +53,10 @@ protected Operator.OperatorFactory simpleWithMode(BigArrays bigArrays, Aggregato return new HashAggregationOperator.HashAggregationOperatorFactory( List.of(new HashAggregationOperator.GroupSpec(0, ElementType.LONG)), List.of( - new SumLongAggregatorFunctionSupplier(bigArrays, sumChannels).groupingAggregatorFactory(mode), - new MaxLongAggregatorFunctionSupplier(bigArrays, maxChannels).groupingAggregatorFactory(mode) + new SumLongAggregatorFunctionSupplier(sumChannels).groupingAggregatorFactory(mode), + new MaxLongAggregatorFunctionSupplier(maxChannels).groupingAggregatorFactory(mode) ), - randomPageSize(), - bigArrays + randomPageSize() ); } @@ -93,10 +90,4 @@ protected void assertSimpleOutput(List input, List results) { max.assertSimpleGroup(input, maxs, i, group); } } - - @Override - protected ByteSizeValue smallEnoughToCircuitBreak() { - return ByteSizeValue.ofBytes(between(1, 32)); - } - } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/LimitOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/LimitOperatorTests.java index 8c85f5927196f..e366646ecd0f5 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/LimitOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/LimitOperatorTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.compute.operator; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.BasicBlockTests; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; @@ -24,7 +23,7 @@ public class LimitOperatorTests extends OperatorTestCase { @Override - protected LimitOperator.Factory simple(BigArrays bigArrays) { + protected LimitOperator.Factory simple() { return new LimitOperator.Factory(100); } @@ -51,14 +50,14 @@ protected void assertSimpleOutput(List input, List results) { } @Override - protected ByteSizeValue smallEnoughToCircuitBreak() { - assumeFalse("doesn't use big arrays", true); + protected ByteSizeValue enoughMemoryForSimple() { + assumeFalse("doesn't allocate, just filters", true); return null; } public void testStatus() { BlockFactory blockFactory = driverContext().blockFactory(); - LimitOperator op = simple(BigArrays.NON_RECYCLING_INSTANCE).get(driverContext()); + LimitOperator op = simple().get(driverContext()); LimitOperator.Status status = op.status(); assertThat(status.limit(), equalTo(100)); @@ -80,7 +79,7 @@ public void testStatus() { public void testNeedInput() { BlockFactory blockFactory = driverContext().blockFactory(); - try (LimitOperator op = simple(BigArrays.NON_RECYCLING_INSTANCE).get(driverContext())) { + try (LimitOperator op = simple().get(driverContext())) { assertTrue(op.needsInput()); Page p = new Page(blockFactory.newConstantNullBlock(10)); op.addInput(p); @@ -95,7 +94,7 @@ public void testNeedInput() { public void testBlockBiggerThanRemaining() { BlockFactory blockFactory = driverContext().blockFactory(); for (int i = 0; i < 100; i++) { - try (var op = simple(BigArrays.NON_RECYCLING_INSTANCE).get(driverContext())) { + try (var op = simple().get(driverContext())) { assertTrue(op.needsInput()); Page p = new Page(randomBlock(blockFactory, 200)); // test doesn't close because operator returns a view op.addInput(p); @@ -115,7 +114,7 @@ public void testBlockBiggerThanRemaining() { public void testBlockPreciselyRemaining() { BlockFactory blockFactory = driverContext().blockFactory(); for (int i = 0; i < 100; i++) { - try (var op = simple(BigArrays.NON_RECYCLING_INSTANCE).get(driverContext())) { + try (var op = simple().get(driverContext())) { assertTrue(op.needsInput()); Page p = new Page(randomBlock(blockFactory, 100)); // test doesn't close because operator returns same page op.addInput(p); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/MultivalueDedupeTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/MultivalueDedupeTests.java index 50b20a2ffdcff..b67076d635993 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/MultivalueDedupeTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/MultivalueDedupeTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.TestBlockFactory; import org.elasticsearch.core.Releasables; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matcher; @@ -55,7 +56,7 @@ public class MultivalueDedupeTests extends ESTestCase { public static List supportedTypes() { List supported = new ArrayList<>(); for (ElementType elementType : ElementType.values()) { - if (elementType == ElementType.UNKNOWN || elementType == ElementType.NULL || elementType == ElementType.DOC) { + if (oneOf(elementType, ElementType.UNKNOWN, ElementType.NULL, ElementType.DOC)) { continue; } supported.add(elementType); @@ -63,11 +64,20 @@ public static List supportedTypes() { return supported; } + private static boolean oneOf(ElementType elementType, ElementType... others) { + for (ElementType other : others) { + if (elementType == other) { + return true; + } + } + return false; + } + @ParametersFactory public static List params() { List params = new ArrayList<>(); for (ElementType elementType : supportedTypes()) { - if (elementType == ElementType.UNKNOWN || elementType == ElementType.NULL || elementType == ElementType.DOC) { + if (oneOf(elementType, ElementType.UNKNOWN, ElementType.NULL, ElementType.DOC)) { continue; } for (boolean nullAllowed : new boolean[] { false, true }) { @@ -180,6 +190,7 @@ public void testHashWithPreviousValues() { assertBooleanHash(previousValues, b); } case BYTES_REF -> { + // TODO: Also test spatial WKB int prevSize = between(1, 10000); Set previousValues = new HashSet<>(prevSize); while (previousValues.size() < prevSize) { @@ -346,7 +357,7 @@ private void assertHash( for (int i = start; i < end; i++) { actualValues.add(lookup.apply(hashes.getInt(i) - 1)); } - assertThat(actualValues, containsInAnyOrder(v.stream().collect(Collectors.toSet()).stream().sorted().toArray())); + assertThat(new HashSet<>(actualValues), containsInAnyOrder(new HashSet<>(v).toArray())); allValues.addAll(v); } @@ -374,7 +385,7 @@ private int assertEncodedPosition(BasicBlockTests.RandomBlock b, BatchEncoder en * This produces a block with a single value per position, but it's good enough * for comparison. */ - Block.Builder builder = elementType.newBlockBuilder(encoder.valueCount(offset)); + Block.Builder builder = elementType.newBlockBuilder(encoder.valueCount(offset), TestBlockFactory.getNonBreakingInstance()); BytesRef[] toDecode = new BytesRef[encoder.valueCount(offset)]; for (int i = 0; i < toDecode.length; i++) { BytesRefBuilder dest = new BytesRefBuilder(); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/MvExpandOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/MvExpandOperatorTests.java index 3572dc620287d..165e5b80b9a58 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/MvExpandOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/MvExpandOperatorTests.java @@ -8,13 +8,11 @@ package org.elasticsearch.compute.operator; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.ElementType; -import org.elasticsearch.compute.data.IntBlock; -import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.data.TestBlockFactory; import java.util.Iterator; import java.util.List; @@ -48,7 +46,7 @@ protected Page createPage(int positionOffset, int length) { } @Override - protected Operator.OperatorFactory simple(BigArrays bigArrays) { + protected Operator.OperatorFactory simple() { return new MvExpandOperator.Factory(0, randomIntBetween(1, 1000)); } @@ -202,16 +200,17 @@ protected void assertSimpleOutput(List input, List results) { } @Override - protected ByteSizeValue smallEnoughToCircuitBreak() { - assumeTrue("doesn't use big arrays so can't break", false); - return null; + protected ByteSizeValue enoughMemoryForSimple() { + assumeFalse("doesn't throw in tests but probably should", true); + return ByteSizeValue.ofBytes(1); } public void testNoopStatus() { + BlockFactory blockFactory = blockFactory(); MvExpandOperator op = new MvExpandOperator(0, randomIntBetween(1, 1000)); List result = drive( op, - List.of(new Page(IntVector.newVectorBuilder(2).appendInt(1).appendInt(2).build().asBlock())).iterator(), + List.of(new Page(blockFactory.newIntVectorBuilder(2).appendInt(1).appendInt(2).build().asBlock())).iterator(), driverContext() ); assertThat(result, hasSize(1)); @@ -224,7 +223,8 @@ public void testNoopStatus() { public void testExpandStatus() { MvExpandOperator op = new MvExpandOperator(0, randomIntBetween(1, 1)); - var builder = IntBlock.newBlockBuilder(2).beginPositionEntry().appendInt(1).appendInt(2).endPositionEntry(); + BlockFactory blockFactory = blockFactory(); + var builder = blockFactory.newIntBlockBuilder(2).beginPositionEntry().appendInt(1).appendInt(2).endPositionEntry(); List result = drive(op, List.of(new Page(builder.build())).iterator(), driverContext()); assertThat(result, hasSize(1)); assertThat(valuesAtPositions(result.get(0).getBlock(0), 0, 2), equalTo(List.of(List.of(1), List.of(2)))); @@ -232,6 +232,7 @@ public void testExpandStatus() { assertThat(status.pagesIn(), equalTo(1)); assertThat(status.pagesOut(), equalTo(1)); assertThat(status.noops(), equalTo(0)); + result.forEach(Page::releaseBlocks); } public void testExpandWithBytesRefs() { @@ -253,7 +254,7 @@ protected Page createPage(int positionOffset, int length) { ); } }); - List origInput = deepCopyOf(input, BlockFactory.getNonBreakingInstance()); + List origInput = deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); List results = drive(new MvExpandOperator(0, randomIntBetween(1, 1000)), input.iterator(), context); assertSimpleOutput(origInput, results); } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OperatorTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OperatorTestCase.java index 2f1cc2981766e..0890ba669f0a2 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OperatorTestCase.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OperatorTestCase.java @@ -24,13 +24,14 @@ import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockTestUtils; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.data.TestBlockFactory; import org.elasticsearch.core.Releasables; import org.elasticsearch.core.TimeValue; import org.elasticsearch.indices.CrankyCircuitBreakerService; +import org.elasticsearch.test.BreakerTestUtil; import org.elasticsearch.threadpool.FixedExecutorBuilder; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; -import org.junit.AssumptionViolatedException; import java.util.ArrayList; import java.util.Iterator; @@ -56,13 +57,6 @@ public abstract class OperatorTestCase extends AnyOperatorTestCase { */ protected abstract void assertSimpleOutput(List input, List results); - /** - * A {@link ByteSizeValue} that is so small any input to the operator - * will cause it to circuit break. If the operator can't break then - * throw an {@link AssumptionViolatedException}. - */ - protected abstract ByteSizeValue smallEnoughToCircuitBreak(); - /** * Test a small input set against {@link #simple}. Smaller input sets * are more likely to discover accidental behavior for clumped inputs. @@ -79,40 +73,64 @@ public final void testSimpleLargeInput() { } /** - * Run {@link #simple} with a circuit breaker configured by - * {@link #smallEnoughToCircuitBreak} and assert that it breaks - * in a sane way. + * Enough memory for {@link #simple} not to throw a {@link CircuitBreakingException}. + * It's fine if this is much more memory than {@linkplain #simple} needs. + * When we want to make {@linkplain #simple} throw we'll find the precise amount of memory + * that'll make it throw with a binary search. + */ + protected ByteSizeValue enoughMemoryForSimple() { + return ByteSizeValue.ofGb(1); + } + + /** + * Run {@link #simple} with a circuit breaker many times, making sure all blocks + * are properly released. In particular, we perform a binary search to find the + * largest amount of memory that'll throw a {@link CircuitBreakingException} with + * starting bounds of {@code 0b} and {@link #enoughMemoryForSimple}. Then we pick + * a random amount of memory between {@code 0b} and the maximum and run that, + * asserting both that this throws a {@link CircuitBreakingException} and releases + * all pages. */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/101824") public final void testSimpleCircuitBreaking() { - /* - * We build two CircuitBreakers - one for the input blocks and one for the operation itself. - * The input blocks don't count against the memory usage for the limited operator that we - * build. - */ + ByteSizeValue memoryLimitForSimple = enoughMemoryForSimple(); + Operator.OperatorFactory simple = simple(); DriverContext inputFactoryContext = driverContext(); - BigArrays bigArrays = new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, smallEnoughToCircuitBreak()) - .withCircuitBreaking(); List input = CannedSourceOperator.collectPages(simpleInput(inputFactoryContext.blockFactory(), between(1_000, 10_000))); + try { + ByteSizeValue limit = BreakerTestUtil.findBreakerLimit( + memoryLimitForSimple, + l -> runWithLimit(simple, CannedSourceOperator.deepCopyOf(input), l) + ); + ByteSizeValue testWithSize = ByteSizeValue.ofBytes(randomLongBetween(0, limit.getBytes())); + logger.info("testing with {} against a limit of {}", testWithSize, limit); + Exception e = expectThrows( + CircuitBreakingException.class, + () -> runWithLimit(simple, CannedSourceOperator.deepCopyOf(input), testWithSize) + ); + assertThat(e.getMessage(), equalTo(MockBigArrays.ERROR_MESSAGE)); + } finally { + Releasables.closeExpectNoException(Releasables.wrap(() -> Iterators.map(input.iterator(), p -> p::releaseBlocks))); + } + assertThat(inputFactoryContext.breaker().getUsed(), equalTo(0L)); + } + + private void runWithLimit(Operator.OperatorFactory factory, List input, ByteSizeValue limit) { + BigArrays bigArrays = new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, limit).withCircuitBreaking(); CircuitBreaker breaker = bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST); BlockFactory blockFactory = BlockFactory.getInstance(breaker, bigArrays); DriverContext driverContext = new DriverContext(bigArrays, blockFactory); - boolean[] driverStarted = new boolean[1]; - Exception e = expectThrows(CircuitBreakingException.class, () -> { - var operator = simple(bigArrays).get(driverContext); - driverStarted[0] = true; + boolean driverStarted = false; + try { + var operator = factory.get(driverContext); + driverStarted = true; drive(operator, input.iterator(), driverContext); - }); - if (driverStarted[0] == false) { - // if drive hasn't even started then we need to release the input pages - Releasables.closeExpectNoException(Releasables.wrap(() -> Iterators.map(input.iterator(), p -> p::releaseBlocks))); + } finally { + if (driverStarted == false) { + // if drive hasn't even started then we need to release the input pages manually + Releasables.closeExpectNoException(Releasables.wrap(() -> Iterators.map(input.iterator(), p -> p::releaseBlocks))); + } + assertThat(bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST).getUsed(), equalTo(0L)); } - assertThat(e.getMessage(), equalTo(MockBigArrays.ERROR_MESSAGE)); - assertThat(bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST).getUsed(), equalTo(0L)); - - // Note the lack of try/finally here - we're asserting that when the driver throws an exception we clear the breakers. - assertThat(bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST).getUsed(), equalTo(0L)); - assertThat(inputFactoryContext.breaker().getUsed(), equalTo(0L)); } /** @@ -129,7 +147,7 @@ public final void testSimpleWithCranky() { boolean driverStarted = false; try { - Operator operator = simple(driverContext.bigArrays()).get(driverContext); + Operator operator = simple().get(driverContext); driverStarted = true; drive(operator, input.iterator(), driverContext); // Either we get lucky and cranky doesn't throw and the test completes or we don't and it throws @@ -186,10 +204,10 @@ protected final void assertSimple(DriverContext context, int size) { } // Clone the input so that the operator can close it, then, later, we can read it again to build the assertion. - List origInput = BlockTestUtils.deepCopyOf(input, BlockFactory.getNonBreakingInstance()); + List origInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); BigArrays bigArrays = context.bigArrays().withCircuitBreaking(); - List results = drive(simple(bigArrays).get(context), input.iterator(), context); + List results = drive(simple().get(context), input.iterator(), context); assertSimpleOutput(origInput, results); assertThat(bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST).getUsed(), equalTo(0L)); @@ -225,7 +243,7 @@ public void testSimpleFinishClose() { // eventually, when driverContext always returns a tracking factory, we can enable this assertion // assertThat(driverContext.blockFactory().breaker().getUsed(), greaterThan(0L)); Page page = input.get(0); - try (var operator = simple(driverContext.bigArrays()).get(driverContext)) { + try (var operator = simple().get(driverContext)) { assert operator.needsInput(); operator.addInput(page); operator.finish(); @@ -270,10 +288,10 @@ public static void runDriver(List drivers) { drivers.add( new Driver( "dummy-session", - new DriverContext(BigArrays.NON_RECYCLING_INSTANCE, BlockFactory.getNonBreakingInstance()), + new DriverContext(BigArrays.NON_RECYCLING_INSTANCE, TestBlockFactory.getNonBreakingInstance()), () -> "dummy-driver", new SequenceLongBlockSourceOperator( - BlockFactory.getNonBreakingInstance(), + TestBlockFactory.getNonBreakingInstance(), LongStream.range(0, between(1, 100)), between(1, 100) ), diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OutputOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OutputOperatorTests.java index bccd5c1b57d81..4aae5fb0dca90 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OutputOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OutputOperatorTests.java @@ -7,8 +7,6 @@ package org.elasticsearch.compute.operator; -import org.elasticsearch.common.util.BigArrays; - import java.util.List; import java.util.stream.IntStream; @@ -16,7 +14,7 @@ public class OutputOperatorTests extends AnyOperatorTestCase { @Override - protected Operator.OperatorFactory simple(BigArrays bigArrays) { + protected Operator.OperatorFactory simple() { return new OutputOperator.OutputOperatorFactory(List.of("a"), p -> p, p -> {}); } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ProjectOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ProjectOperatorTests.java index 30f3bfda27d5e..572657c7c8226 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ProjectOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ProjectOperatorTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.compute.operator; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.IntBlock; @@ -66,7 +65,7 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int end) { } @Override - protected Operator.OperatorFactory simple(BigArrays bigArrays) { + protected Operator.OperatorFactory simple() { return new ProjectOperator.ProjectOperatorFactory(Arrays.asList(1)); } @@ -97,8 +96,8 @@ protected void assertSimpleOutput(List input, List results) { } @Override - protected ByteSizeValue smallEnoughToCircuitBreak() { - assumeTrue("doesn't use big arrays so can't break", false); + protected ByteSizeValue enoughMemoryForSimple() { + assumeTrue("doesn't allocate", false); return null; } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/RowOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/RowOperatorTests.java index c8250eba5703a..cd8a49939fbb5 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/RowOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/RowOperatorTests.java @@ -11,12 +11,12 @@ import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.DoubleBlock; import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.TestBlockFactory; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; @@ -28,7 +28,7 @@ public class RowOperatorTests extends ESTestCase { final DriverContext driverContext = new DriverContext( new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, new NoneCircuitBreakerService()).withCircuitBreaking(), - BlockFactory.getNonBreakingInstance() + TestBlockFactory.getNonBreakingInstance() ); public void testBoolean() { diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/SequenceBooleanBlockSourceOperator.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/SequenceBooleanBlockSourceOperator.java index 7c1c62aea6ab9..b92c6d01e5077 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/SequenceBooleanBlockSourceOperator.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/SequenceBooleanBlockSourceOperator.java @@ -37,12 +37,13 @@ public SequenceBooleanBlockSourceOperator(BlockFactory blockFactory, List @Override protected Page createPage(int positionOffset, int length) { - DoubleVector.FixedBuilder builder = DoubleVector.newVectorFixedBuilder(length, blockFactory); + DoubleVector.FixedBuilder builder = blockFactory.newDoubleVectorFixedBuilder(length); for (int i = 0; i < length; i++) { builder.appendDouble(values[positionOffset + i]); } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/StringExtractOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/StringExtractOperatorTests.java index 70ef2118fcef0..c8a329be7b72a 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/StringExtractOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/StringExtractOperatorTests.java @@ -8,8 +8,6 @@ package org.elasticsearch.compute.operator; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BytesRefBlock; @@ -41,7 +39,7 @@ public Map apply(String s) { } @Override - protected Operator.OperatorFactory simple(BigArrays bigArrays) { + protected Operator.OperatorFactory simple() { Supplier>> expEval = () -> new FirstWord("test"); return new StringExtractOperator.StringExtractOperatorFactory( new String[] { "test" }, @@ -84,11 +82,6 @@ protected void assertSimpleOutput(List input, List results) { } } - @Override - protected ByteSizeValue smallEnoughToCircuitBreak() { - return ByteSizeValue.ofBytes(between(1, 32)); - } - public void testMultivalueDissectInput() { StringExtractOperator operator = new StringExtractOperator(new String[] { "test" }, new EvalOperator.ExpressionEvaluator() { @@ -103,8 +96,9 @@ public Block eval(Page page) { public void close() {} }, new FirstWord("test"), driverContext()); - Page result = null; - try (BytesRefBlock.Builder builder = BytesRefBlock.newBlockBuilder(1)) { + BlockFactory blockFactory = blockFactory(); + final Page result; + try (BytesRefBlock.Builder builder = blockFactory.newBytesRefBlockBuilder(1)) { builder.beginPositionEntry(); builder.appendBytesRef(new BytesRef("foo1 bar1")); builder.appendBytesRef(new BytesRef("foo2 bar2")); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/TestResultPageSinkOperator.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/TestResultPageSinkOperator.java index aaa3a6ac8a3c8..e2cb0e21938e2 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/TestResultPageSinkOperator.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/TestResultPageSinkOperator.java @@ -7,9 +7,9 @@ package org.elasticsearch.compute.operator; -import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockTestUtils; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.data.TestBlockFactory; import java.util.function.Consumer; @@ -21,7 +21,7 @@ public class TestResultPageSinkOperator extends PageConsumerOperator { public TestResultPageSinkOperator(Consumer pageConsumer) { super(page -> { - Page copy = BlockTestUtils.deepCopyOf(page, BlockFactory.getNonBreakingInstance()); + Page copy = BlockTestUtils.deepCopyOf(page, TestBlockFactory.getNonBreakingInstance()); page.releaseBlocks(); pageConsumer.accept(copy); }); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeServiceTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeServiceTests.java index 74e83017e03bf..1b1801c63017d 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeServiceTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeServiceTests.java @@ -24,8 +24,8 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; -import org.elasticsearch.compute.data.ConstantIntVector; import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.MockBlockFactory; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.Driver; import org.elasticsearch.compute.operator.DriverContext; @@ -41,6 +41,7 @@ import org.elasticsearch.threadpool.FixedExecutorBuilder; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.transport.AbstractSimpleTransportTestCase; +import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestHandler; @@ -53,8 +54,6 @@ import java.util.Collections; import java.util.List; import java.util.Set; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Function; import java.util.function.Supplier; @@ -85,19 +84,17 @@ public void shutdownThreadPool() { } public void testBasic() throws Exception { + BlockFactory blockFactory = blockFactory(); Page[] pages = new Page[7]; for (int i = 0; i < pages.length; i++) { - pages[i] = new Page(new ConstantIntVector(i, 2).asBlock()); + pages[i] = new Page(blockFactory.newConstantIntBlockWith(i, 2)); } ExchangeSinkHandler sinkExchanger = new ExchangeSinkHandler(2, threadPool::relativeTimeInMillis); ExchangeSink sink1 = sinkExchanger.createExchangeSink(); ExchangeSink sink2 = sinkExchanger.createExchangeSink(); ExchangeSourceHandler sourceExchanger = new ExchangeSourceHandler(3, threadPool.executor(ESQL_TEST_EXECUTOR)); - assertThat(sourceExchanger.refCount(), equalTo(1)); ExchangeSource source = sourceExchanger.createExchangeSource(); - assertThat(sourceExchanger.refCount(), equalTo(2)); sourceExchanger.addRemoteSink(sinkExchanger::fetchPageAsync, 1); - assertThat(sourceExchanger.refCount(), equalTo(3)); SubscribableListener waitForReading = source.waitForReading(); assertFalse(waitForReading.isDone()); assertNull(source.pollPage()); @@ -135,14 +132,11 @@ public void testBasic() throws Exception { sink2.finish(); assertTrue(sink2.isFinished()); assertTrue(source.isFinished()); - assertBusy(() -> assertThat(sourceExchanger.refCount(), equalTo(2))); source.finish(); - assertThat(sourceExchanger.refCount(), equalTo(1)); - CountDownLatch latch = new CountDownLatch(1); - sourceExchanger.addCompletionListener(ActionListener.releasing(latch::countDown)); - sourceExchanger.decRef(); - assertTrue(latch.await(1, TimeUnit.SECONDS)); ESTestCase.terminate(threadPool); + for (Page page : pages) { + page.releaseBlocks(); + } } /** @@ -180,14 +174,15 @@ public Page getOutput() { return null; } int size = randomIntBetween(1, 10); - IntBlock.Builder builder = IntBlock.newBlockBuilder(size); - for (int i = 0; i < size; i++) { - int seqNo = nextSeqNo.incrementAndGet(); - if (seqNo < maxInputSeqNo) { - builder.appendInt(seqNo); + try (IntBlock.Builder builder = driverContext.blockFactory().newIntBlockBuilder(size)) { + for (int i = 0; i < size; i++) { + int seqNo = nextSeqNo.incrementAndGet(); + if (seqNo < maxInputSeqNo) { + builder.appendInt(seqNo); + } } + return new Page(builder.build()); } - return new Page(builder.build()); } @Override @@ -338,8 +333,9 @@ public void testConcurrentWithHandlers() { } public void testEarlyTerminate() { - IntBlock block1 = new ConstantIntVector(1, 2).asBlock(); - IntBlock block2 = new ConstantIntVector(1, 2).asBlock(); + BlockFactory blockFactory = blockFactory(); + IntBlock block1 = blockFactory.newConstantIntBlockWith(1, 2); + IntBlock block2 = blockFactory.newConstantIntBlockWith(1, 2); Page p1 = new Page(block1); Page p2 = new Page(block2); ExchangeSinkHandler sinkExchanger = new ExchangeSinkHandler(2, threadPool::relativeTimeInMillis); @@ -368,9 +364,10 @@ public void testConcurrentWithTransportActions() throws Exception { try (exchange0; exchange1; node0; node1) { String exchangeId = "exchange"; Task task = new Task(1, "", "", "", null, Collections.emptyMap()); - ExchangeSourceHandler sourceHandler = exchange0.createSourceHandler(exchangeId, randomExchangeBuffer(), ESQL_TEST_EXECUTOR); + var sourceHandler = new ExchangeSourceHandler(randomExchangeBuffer(), threadPool.executor(ESQL_TEST_EXECUTOR)); ExchangeSinkHandler sinkHandler = exchange1.createSinkHandler(exchangeId, randomExchangeBuffer()); - sourceHandler.addRemoteSink(exchange0.newRemoteSink(task, exchangeId, node0, node1.getLocalNode()), randomIntBetween(1, 5)); + Transport.Connection connection = node0.getConnection(node1.getLocalNode()); + sourceHandler.addRemoteSink(exchange0.newRemoteSink(task, exchangeId, node0, connection), randomIntBetween(1, 5)); final int maxInputSeqNo = rarely() ? -1 : randomIntBetween(0, 50_000); final int maxOutputSeqNo = rarely() ? -1 : randomIntBetween(0, 50_000); runConcurrentTest(maxInputSeqNo, maxOutputSeqNo, sourceHandler::createExchangeSource, sinkHandler::createExchangeSink); @@ -410,8 +407,8 @@ public void sendResponse(TransportResponse transportResponse) throws IOException } } } - ExchangeResponse newResp = new ExchangeResponse(page, origResp.finished()); origResp.decRef(); + ExchangeResponse newResp = new ExchangeResponse(page, origResp.finished()); super.sendResponse(newResp); } }; @@ -421,9 +418,10 @@ public void sendResponse(TransportResponse transportResponse) throws IOException try (exchange0; exchange1; node0; node1) { String exchangeId = "exchange"; Task task = new Task(1, "", "", "", null, Collections.emptyMap()); - ExchangeSourceHandler sourceHandler = exchange0.createSourceHandler(exchangeId, randomIntBetween(1, 128), ESQL_TEST_EXECUTOR); + var sourceHandler = new ExchangeSourceHandler(randomIntBetween(1, 128), threadPool.executor(ESQL_TEST_EXECUTOR)); ExchangeSinkHandler sinkHandler = exchange1.createSinkHandler(exchangeId, randomIntBetween(1, 128)); - sourceHandler.addRemoteSink(exchange0.newRemoteSink(task, exchangeId, node0, node1.getLocalNode()), randomIntBetween(1, 5)); + Transport.Connection connection = node0.getConnection(node1.getLocalDiscoNode()); + sourceHandler.addRemoteSink(exchange0.newRemoteSink(task, exchangeId, node0, connection), randomIntBetween(1, 5)); Exception err = expectThrows( Exception.class, () -> runConcurrentTest(maxSeqNo, maxSeqNo, sourceHandler::createExchangeSource, sinkHandler::createExchangeSink) @@ -431,6 +429,7 @@ public void sendResponse(TransportResponse transportResponse) throws IOException Throwable cause = ExceptionsHelper.unwrap(err, IOException.class); assertNotNull(cause); assertThat(cause.getMessage(), equalTo("page is too large")); + sinkHandler.onFailure(new RuntimeException(cause)); } } @@ -468,11 +467,6 @@ public String getProfileName() { return in.getProfileName(); } - @Override - public String getChannelType() { - return in.getChannelType(); - } - @Override public void sendResponse(TransportResponse response) throws IOException { in.sendResponse(response); @@ -495,11 +489,18 @@ private BlockFactory blockFactory() { MockBigArrays bigArrays = new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, ByteSizeValue.ofGb(1)); CircuitBreaker breaker = bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST); breakers.add(breaker); - return new BlockFactory(breaker, bigArrays); + MockBlockFactory factory = new MockBlockFactory(breaker, bigArrays); + blockFactories.add(factory); + return factory; } + private final List blockFactories = new ArrayList<>(); + @After public void allMemoryReleased() { + for (MockBlockFactory blockFactory : blockFactories) { + blockFactory.ensureAllBlocksAreReleased(); + } for (CircuitBreaker breaker : breakers) { assertThat(breaker.getUsed(), equalTo(0L)); } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkOperatorStatusTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkOperatorStatusTests.java index 7438055284b14..369913c7d152c 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkOperatorStatusTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkOperatorStatusTests.java @@ -17,7 +17,7 @@ public class ExchangeSinkOperatorStatusTests extends AbstractWireSerializingTestCase { public void testToXContent() { - assertThat(Strings.toString(simple()), equalTo(simpleToJson())); + assertThat(Strings.toString(simple(), true, true), equalTo(simpleToJson())); } public static ExchangeSinkOperator.Status simple() { @@ -26,7 +26,9 @@ public static ExchangeSinkOperator.Status simple() { public static String simpleToJson() { return """ - {"pages_accepted":10}"""; + { + "pages_accepted" : 10 + }"""; } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/topn/ExtractorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/topn/ExtractorTests.java index 7c8f68549c8a4..24b682d67127d 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/topn/ExtractorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/topn/ExtractorTests.java @@ -19,7 +19,7 @@ import org.elasticsearch.compute.data.BlockUtils; import org.elasticsearch.compute.data.DocVector; import org.elasticsearch.compute.data.ElementType; -import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.TestBlockFactory; import org.elasticsearch.compute.operator.BreakingBytesRefBuilder; import org.elasticsearch.test.ESTestCase; @@ -34,6 +34,7 @@ public class ExtractorTests extends ESTestCase { @ParametersFactory public static Iterable parameters() { + BlockFactory blockFactory = TestBlockFactory.getNonBreakingInstance(); List cases = new ArrayList<>(); for (ElementType e : ElementType.values()) { switch (e) { @@ -71,6 +72,15 @@ public static Iterable parameters() { () -> randomList(2, 10, () -> new BytesRef(InetAddressPoint.encode(randomIp(randomBoolean())))) ) ); + cases.add(valueTestCase("single point", e, TopNEncoder.DEFAULT_UNSORTABLE, TopNEncoderTests::randomPointAsWKB)); + cases.add( + valueTestCase( + "many points", + e, + TopNEncoder.DEFAULT_UNSORTABLE, + () -> randomList(2, 10, TopNEncoderTests::randomPointAsWKB) + ) + ); } case DOC -> cases.add( new Object[] { @@ -79,9 +89,9 @@ public static Iterable parameters() { e, TopNEncoder.DEFAULT_UNSORTABLE, () -> new DocVector( - IntBlock.newConstantBlockWith(randomInt(), 1).asVector(), - IntBlock.newConstantBlockWith(randomInt(), 1).asVector(), - IntBlock.newConstantBlockWith(randomInt(), 1).asVector(), + blockFactory.newConstantIntBlockWith(randomInt(), 1).asVector(), + blockFactory.newConstantIntBlockWith(randomInt(), 1).asVector(), + blockFactory.newConstantIntBlockWith(randomInt(), 1).asVector(), randomBoolean() ? null : randomBoolean() ).asBlock() ) } @@ -109,7 +119,7 @@ static Object[] valueTestCase(String name, ElementType type, TopNEncoder encoder name, type, encoder, - () -> BlockUtils.fromListRow(BlockFactory.getNonBreakingInstance(), Arrays.asList(value.get()))[0] + () -> BlockUtils.fromListRow(TestBlockFactory.getNonBreakingInstance(), Arrays.asList(value.get()))[0] ) }; } @@ -150,7 +160,7 @@ public void testNotInKey() { assertThat(valuesBuilder.length(), greaterThan(0)); ResultBuilder result = ResultBuilder.resultBuilderFor( - BlockFactory.getNonBreakingInstance(), + TestBlockFactory.getNonBreakingInstance(), testCase.type, testCase.encoder.toUnsortable(), false, @@ -164,7 +174,7 @@ public void testNotInKey() { } public void testInKey() { - assumeFalse("can't sort on _doc", testCase.type == ElementType.DOC); + assumeFalse("can't sort with un-sortable encoder", testCase.encoder == TopNEncoder.DEFAULT_UNSORTABLE); Block value = testCase.value.get(); BreakingBytesRefBuilder keysBuilder = nonBreakingBytesRefBuilder(); @@ -177,7 +187,7 @@ public void testInKey() { assertThat(valuesBuilder.length(), greaterThan(0)); ResultBuilder result = ResultBuilder.resultBuilderFor( - BlockFactory.getNonBreakingInstance(), + TestBlockFactory.getNonBreakingInstance(), testCase.type, testCase.encoder.toUnsortable(), true, diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/topn/TopNEncoderTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/topn/TopNEncoderTests.java index 75fad6fffb5de..6cd65a8c3c4a0 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/topn/TopNEncoderTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/topn/TopNEncoderTests.java @@ -12,9 +12,14 @@ import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.operator.BreakingBytesRefBuilder; +import org.elasticsearch.geo.GeometryTestUtils; +import org.elasticsearch.geo.ShapeTestUtils; +import org.elasticsearch.geometry.Point; +import org.elasticsearch.geometry.utils.WellKnownBinary; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.versionfield.Version; +import java.nio.ByteOrder; import java.util.List; import static org.hamcrest.Matchers.equalTo; @@ -104,6 +109,11 @@ public void testVersion() { roundTripBytesRef(randomVersion().toBytesRef()); } + public void testPointAsWKB() { + assumeTrue("unsupported", encoder == TopNEncoder.DEFAULT_UNSORTABLE); + roundTripBytesRef(randomPointAsWKB()); + } + public void testIp() { assumeTrue("unsupported", encoder == TopNEncoder.IP); roundTripBytesRef(new BytesRef(InetAddressPoint.encode(randomIp(randomBoolean())))); @@ -128,4 +138,10 @@ static Version randomVersion() { default -> throw new IllegalArgumentException(); }; } + + static BytesRef randomPointAsWKB() { + Point point = randomBoolean() ? GeometryTestUtils.randomPoint() : ShapeTestUtils.randomPoint(); + byte[] wkb = WellKnownBinary.toWKB(point, ByteOrder.LITTLE_ENDIAN); + return new BytesRef(wkb); + } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/topn/TopNOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/topn/TopNOperatorTests.java index be3e75fcce2a2..ba4f547d80ce1 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/topn/TopNOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/topn/TopNOperatorTests.java @@ -14,19 +14,15 @@ import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; -import org.elasticsearch.compute.data.BooleanBlock; -import org.elasticsearch.compute.data.BytesRefBlock; -import org.elasticsearch.compute.data.DoubleBlock; import org.elasticsearch.compute.data.ElementType; -import org.elasticsearch.compute.data.IntArrayVector; import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.data.TestBlockBuilder; +import org.elasticsearch.compute.data.TestBlockFactory; import org.elasticsearch.compute.operator.CannedSourceOperator; import org.elasticsearch.compute.operator.Driver; import org.elasticsearch.compute.operator.DriverContext; @@ -73,6 +69,7 @@ import static org.elasticsearch.compute.operator.topn.TopNEncoder.DEFAULT_SORTABLE; import static org.elasticsearch.compute.operator.topn.TopNEncoder.DEFAULT_UNSORTABLE; import static org.elasticsearch.compute.operator.topn.TopNEncoder.UTF8; +import static org.elasticsearch.compute.operator.topn.TopNEncoderTests.randomPointAsWKB; import static org.elasticsearch.core.Tuple.tuple; import static org.elasticsearch.test.ListMatcher.matchesList; import static org.elasticsearch.test.MapMatcher.assertMap; @@ -126,7 +123,7 @@ public class TopNOperatorTests extends OperatorTestCase { ); @Override - protected TopNOperator.TopNOperatorFactory simple(BigArrays bigArrays) { + protected TopNOperator.TopNOperatorFactory simple() { return new TopNOperator.TopNOperatorFactory( 4, List.of(LONG), @@ -180,15 +177,6 @@ protected void assertSimpleOutput(List input, List results) { ); } - @Override - protected ByteSizeValue smallEnoughToCircuitBreak() { - /* - * 775 causes us to blow up while collecting values and 780 doesn't - * trip the breaker. So 775 is the max on this range. - */ - return ByteSizeValue.ofBytes(between(1, 775)); - } - public void testRamBytesUsed() { RamUsageTester.Accumulator acc = new RamUsageTester.Accumulator() { @Override @@ -305,14 +293,14 @@ private List topNLong(List inputValues, int limit, boolean ascending } public void testCompareInts() { + BlockFactory blockFactory = blockFactory(); testCompare( new Page( - new Block[] { - IntBlock.newBlockBuilder(2).appendInt(Integer.MIN_VALUE).appendInt(randomIntBetween(-1000, -1)).build(), - IntBlock.newBlockBuilder(2).appendInt(randomIntBetween(-1000, -1)).appendInt(0).build(), - IntBlock.newBlockBuilder(2).appendInt(0).appendInt(randomIntBetween(1, 1000)).build(), - IntBlock.newBlockBuilder(2).appendInt(randomIntBetween(1, 1000)).appendInt(Integer.MAX_VALUE).build(), - IntBlock.newBlockBuilder(2).appendInt(0).appendInt(Integer.MAX_VALUE).build() } + blockFactory.newIntBlockBuilder(2).appendInt(Integer.MIN_VALUE).appendInt(randomIntBetween(-1000, -1)).build(), + blockFactory.newIntBlockBuilder(2).appendInt(randomIntBetween(-1000, -1)).appendInt(0).build(), + blockFactory.newIntBlockBuilder(2).appendInt(0).appendInt(randomIntBetween(1, 1000)).build(), + blockFactory.newIntBlockBuilder(2).appendInt(randomIntBetween(1, 1000)).appendInt(Integer.MAX_VALUE).build(), + blockFactory.newIntBlockBuilder(2).appendInt(0).appendInt(Integer.MAX_VALUE).build() ), INT, DEFAULT_SORTABLE @@ -320,14 +308,14 @@ public void testCompareInts() { } public void testCompareLongs() { + BlockFactory blockFactory = blockFactory(); testCompare( new Page( - new Block[] { - LongBlock.newBlockBuilder(2).appendLong(Long.MIN_VALUE).appendLong(randomLongBetween(-1000, -1)).build(), - LongBlock.newBlockBuilder(2).appendLong(randomLongBetween(-1000, -1)).appendLong(0).build(), - LongBlock.newBlockBuilder(2).appendLong(0).appendLong(randomLongBetween(1, 1000)).build(), - LongBlock.newBlockBuilder(2).appendLong(randomLongBetween(1, 1000)).appendLong(Long.MAX_VALUE).build(), - LongBlock.newBlockBuilder(2).appendLong(0).appendLong(Long.MAX_VALUE).build() } + blockFactory.newLongBlockBuilder(2).appendLong(Long.MIN_VALUE).appendLong(randomLongBetween(-1000, -1)).build(), + blockFactory.newLongBlockBuilder(2).appendLong(randomLongBetween(-1000, -1)).appendLong(0).build(), + blockFactory.newLongBlockBuilder(2).appendLong(0).appendLong(randomLongBetween(1, 1000)).build(), + blockFactory.newLongBlockBuilder(2).appendLong(randomLongBetween(1, 1000)).appendLong(Long.MAX_VALUE).build(), + blockFactory.newLongBlockBuilder(2).appendLong(0).appendLong(Long.MAX_VALUE).build() ), LONG, DEFAULT_SORTABLE @@ -335,17 +323,17 @@ public void testCompareLongs() { } public void testCompareDoubles() { + BlockFactory blockFactory = blockFactory(); testCompare( new Page( - new Block[] { - DoubleBlock.newBlockBuilder(2) - .appendDouble(-Double.MAX_VALUE) - .appendDouble(randomDoubleBetween(-1000, -1, true)) - .build(), - DoubleBlock.newBlockBuilder(2).appendDouble(randomDoubleBetween(-1000, -1, true)).appendDouble(0.0).build(), - DoubleBlock.newBlockBuilder(2).appendDouble(0).appendDouble(randomDoubleBetween(1, 1000, true)).build(), - DoubleBlock.newBlockBuilder(2).appendDouble(randomLongBetween(1, 1000)).appendDouble(Double.MAX_VALUE).build(), - DoubleBlock.newBlockBuilder(2).appendDouble(0.0).appendDouble(Double.MAX_VALUE).build() } + blockFactory.newDoubleBlockBuilder(2) + .appendDouble(-Double.MAX_VALUE) + .appendDouble(randomDoubleBetween(-1000, -1, true)) + .build(), + blockFactory.newDoubleBlockBuilder(2).appendDouble(randomDoubleBetween(-1000, -1, true)).appendDouble(0.0).build(), + blockFactory.newDoubleBlockBuilder(2).appendDouble(0).appendDouble(randomDoubleBetween(1, 1000, true)).build(), + blockFactory.newDoubleBlockBuilder(2).appendDouble(randomLongBetween(1, 1000)).appendDouble(Double.MAX_VALUE).build(), + blockFactory.newDoubleBlockBuilder(2).appendDouble(0.0).appendDouble(Double.MAX_VALUE).build() ), DOUBLE, DEFAULT_SORTABLE @@ -353,10 +341,10 @@ public void testCompareDoubles() { } public void testCompareUtf8() { + BlockFactory blockFactory = blockFactory(); testCompare( new Page( - new Block[] { - BytesRefBlock.newBlockBuilder(2).appendBytesRef(new BytesRef("bye")).appendBytesRef(new BytesRef("hello")).build() } + blockFactory.newBytesRefBlockBuilder(2).appendBytesRef(new BytesRef("bye")).appendBytesRef(new BytesRef("hello")).build() ), BYTES_REF, UTF8 @@ -364,15 +352,16 @@ public void testCompareUtf8() { } public void testCompareBooleans() { + BlockFactory blockFactory = blockFactory(); testCompare( - new Page(new Block[] { BooleanBlock.newBlockBuilder(2).appendBoolean(false).appendBoolean(true).build() }), + new Page(blockFactory.newBooleanBlockBuilder(2).appendBoolean(false).appendBoolean(true).build()), BOOLEAN, DEFAULT_SORTABLE ); } private void testCompare(Page page, ElementType elementType, TopNEncoder encoder) { - Block nullBlock = Block.constantNullBlock(1); + Block nullBlock = TestBlockFactory.getNonBreakingInstance().newConstantNullBlock(1); Page nullPage = new Page(new Block[] { nullBlock, nullBlock, nullBlock, nullBlock, nullBlock }); for (int b = 0; b < page.getBlockCount(); b++) { @@ -423,6 +412,7 @@ private void testCompare(Page page, ElementType elementType, TopNEncoder encoder assertThat(TopNOperator.compareRows(r2, r1), greaterThan(0)); } } + page.releaseBlocks(); } private TopNOperator.Row row( @@ -974,17 +964,26 @@ public void testRandomMultiValuesTopN() { Function randomValueSupplier = (blockType) -> randomValue(blockType); if (e == BYTES_REF) { if (rarely()) { - if (randomBoolean()) { - // deal with IP fields (BytesRef block) like ES does and properly encode the ip addresses - randomValueSupplier = (blockType) -> new BytesRef(InetAddressPoint.encode(randomIp(randomBoolean()))); - // use the right BytesRef encoder (don't touch the bytes) - encoders.add(TopNEncoder.IP); - } else { - // create a valid Version - randomValueSupplier = (blockType) -> randomVersion().toBytesRef(); - // use the right BytesRef encoder (don't touch the bytes) - encoders.add(TopNEncoder.VERSION); - } + randomValueSupplier = switch (randomInt(2)) { + case 0 -> { + // use the right BytesRef encoder (don't touch the bytes) + encoders.add(TopNEncoder.IP); + // deal with IP fields (BytesRef block) like ES does and properly encode the ip addresses + yield (blockType) -> new BytesRef(InetAddressPoint.encode(randomIp(randomBoolean()))); + } + case 1 -> { + // use the right BytesRef encoder (don't touch the bytes) + encoders.add(TopNEncoder.VERSION); + // create a valid Version + yield (blockType) -> randomVersion().toBytesRef(); + } + default -> { + // use the right BytesRef encoder (don't touch the bytes) + encoders.add(DEFAULT_UNSORTABLE); + // create a valid geo_point + yield (blockType) -> randomPointAsWKB(); + } + }; } else { encoders.add(UTF8); } @@ -1386,7 +1385,7 @@ public void testCloseWithoutCompleting() { randomPageSize() ) ) { - op.addInput(new Page(new IntArrayVector(new int[] { 1 }, 1).asBlock())); + op.addInput(new Page(blockFactory().newIntArrayVector(new int[] { 1 }, 1).asBlock())); } } diff --git a/x-pack/plugin/esql/qa/security/build.gradle b/x-pack/plugin/esql/qa/security/build.gradle index 44a4f5a27efea..33371320b865d 100644 --- a/x-pack/plugin/esql/qa/security/build.gradle +++ b/x-pack/plugin/esql/qa/security/build.gradle @@ -1,16 +1,5 @@ -apply plugin: 'elasticsearch.legacy-java-rest-test' +apply plugin: 'elasticsearch.internal-java-rest-test' -testClusters.configureEach { - testDistribution = 'DEFAULT' - setting 'xpack.license.self_generated.type', 'trial' - setting 'xpack.monitoring.collection.enabled', 'false' - setting 'xpack.security.enabled', 'true' - numberOfNodes = 1 - extraConfigFile 'roles.yml', file('roles.yml') - user username: "test-admin", password: 'x-pack-test-password', role: "test-admin" - user username: "user1", password: 'x-pack-test-password', role: "user1" - user username: "user2", password: 'x-pack-test-password', role: "user2" - user username: "user3", password: 'x-pack-test-password', role: "user3" - user username: "user4", password: 'x-pack-test-password', role: "user4" - user username: "user5", password: 'x-pack-test-password', role: "user5" +tasks.named('javaRestTest') { + usesDefaultDistribution() } diff --git a/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java b/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java index 10c77a05af49b..98ec411569af5 100644 --- a/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java +++ b/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java @@ -17,10 +17,14 @@ import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.resource.Resource; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.json.JsonXContent; import org.junit.Before; +import org.junit.ClassRule; import java.io.IOException; import java.util.List; @@ -31,6 +35,26 @@ public class EsqlSecurityIT extends ESRestTestCase { + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .nodes(2) + .setting("xpack.license.self_generated.type", "trial") + .setting("xpack.security.enabled", "true") + .rolesFile(Resource.fromClasspath("roles.yml")) + .user("test-admin", "x-pack-test-password", "test-admin", false) + .user("user1", "x-pack-test-password", "user1", false) + .user("user2", "x-pack-test-password", "user2", false) + .user("user3", "x-pack-test-password", "user3", false) + .user("user4", "x-pack-test-password", "user4", false) + .user("user5", "x-pack-test-password", "user5", false) + .build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + @Override protected Settings restClientSettings() { String token = basicAuthHeaderValue("test-admin", new SecureString("x-pack-test-password".toCharArray())); diff --git a/x-pack/plugin/esql/qa/security/roles.yml b/x-pack/plugin/esql/qa/security/src/javaRestTest/resources/roles.yml similarity index 100% rename from x-pack/plugin/esql/qa/security/roles.yml rename to x-pack/plugin/esql/qa/security/src/javaRestTest/resources/roles.yml diff --git a/x-pack/plugin/esql/qa/server/build.gradle b/x-pack/plugin/esql/qa/server/build.gradle index 12c3a9d951383..ff7ace533fb3a 100644 --- a/x-pack/plugin/esql/qa/server/build.gradle +++ b/x-pack/plugin/esql/qa/server/build.gradle @@ -9,50 +9,3 @@ dependencies { api project(xpackModule('ql:test-fixtures')) api project(xpackModule('esql:qa:testFixtures')) } - -subprojects { - if (subprojects.isEmpty()) { - // leaf project - } else { - apply plugin: 'elasticsearch.java' - apply plugin: 'elasticsearch.standalone-rest-test' - } - - - if (project.name != 'security' && project.name != 'mixed-cluster' ) { - // The security project just configures its subprojects - apply plugin: 'elasticsearch.legacy-java-rest-test' - - testClusters.matching { it.name == "javaRestTest" }.configureEach { - testDistribution = 'DEFAULT' - setting 'xpack.ml.enabled', 'false' - setting 'xpack.watcher.enabled', 'false' - } - - - dependencies { - configurations.javaRestTestRuntimeClasspath { - resolutionStrategy.force "org.slf4j:slf4j-api:1.7.25" - } - configurations.javaRestTestRuntimeOnly { - // This is also required to make resolveAllDependencies work - resolutionStrategy.force "org.slf4j:slf4j-api:1.7.25" - } - - /* Since we're a standalone rest test we actually get transitive - * dependencies but we don't really want them because they cause - * all kinds of trouble with the jar hell checks. So we suppress - * them explicitly for non-es projects. */ - javaRestTestImplementation(project(':x-pack:plugin:esql:qa:server')) { - transitive = false - } - javaRestTestImplementation project(":test:framework") - javaRestTestRuntimeOnly project(xpackModule('ql:test-fixtures')) - - javaRestTestRuntimeOnly "org.slf4j:slf4j-api:1.7.25" - javaRestTestRuntimeOnly "net.sf.supercsv:super-csv:${versions.supercsv}" - - javaRestTestImplementation project(path: xpackModule('ql:test-fixtures')) - } - } -} diff --git a/x-pack/plugin/esql/qa/server/heap-attack/build.gradle b/x-pack/plugin/esql/qa/server/heap-attack/build.gradle index de88fdecf2b14..75fc42c275508 100644 --- a/x-pack/plugin/esql/qa/server/heap-attack/build.gradle +++ b/x-pack/plugin/esql/qa/server/heap-attack/build.gradle @@ -1,19 +1,9 @@ -apply plugin: 'elasticsearch.legacy-yaml-rest-test' +apply plugin: 'elasticsearch.internal-java-rest-test' dependencies { - javaRestTestImplementation project(xpackModule('esql:qa:testFixtures')) + javaRestTestImplementation project(xpackModule('esql:qa:server')) } -restResources { - restApi { - include '_common', 'bulk', 'indices', 'esql', 'xpack', 'enrich' - } -} - -testClusters.configureEach { - numberOfNodes = 1 - testDistribution = 'DEFAULT' - setting 'xpack.license.self_generated.type', 'trial' - setting 'xpack.monitoring.collection.enabled', 'true' - setting 'xpack.security.enabled', 'false' +tasks.named('javaRestTest') { + usesDefaultDistribution() } diff --git a/x-pack/plugin/esql/qa/server/heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/heap_attack/HeapAttackIT.java b/x-pack/plugin/esql/qa/server/heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/heap_attack/HeapAttackIT.java index 37f2c86dbc251..270fc96975401 100644 --- a/x-pack/plugin/esql/qa/server/heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/heap_attack/HeapAttackIT.java +++ b/x-pack/plugin/esql/qa/server/heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/heap_attack/HeapAttackIT.java @@ -9,6 +9,7 @@ import org.apache.http.client.config.RequestConfig; import org.apache.http.util.EntityUtils; +import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; @@ -19,12 +20,15 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ListMatcher; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase; import org.junit.After; import org.junit.Before; +import org.junit.ClassRule; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -47,7 +51,22 @@ * Tests that run ESQL queries that have, in the past, used so much memory they * crash Elasticsearch. */ +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103527") public class HeapAttackIT extends ESRestTestCase { + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .nodes(1) + .setting("xpack.security.enabled", "false") + .setting("xpack.license.self_generated.type", "trial") + .build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + /** * This used to fail, but we've since compacted top n so it actually succeeds now. */ @@ -344,7 +363,6 @@ public void testFetchMvLongs() throws IOException { assertMap(map, matchesMap().entry("columns", columns)); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/100528") public void testFetchTooManyMvLongs() throws IOException { initMvLongsIndex(500, 100, 1000); assertCircuitBreaks(() -> fetchMvLongs()); diff --git a/x-pack/plugin/esql/qa/server/mixed-cluster/build.gradle b/x-pack/plugin/esql/qa/server/mixed-cluster/build.gradle index 01955adb3af0c..51c4a0250a74d 100644 --- a/x-pack/plugin/esql/qa/server/mixed-cluster/build.gradle +++ b/x-pack/plugin/esql/qa/server/mixed-cluster/build.gradle @@ -1,18 +1,12 @@ import org.elasticsearch.gradle.Version -import org.elasticsearch.gradle.VersionProperties +import org.elasticsearch.gradle.util.GradleUtils import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask -apply plugin: 'elasticsearch.internal-testclusters' -apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.internal-java-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.bwc-test' -apply plugin: 'elasticsearch.rest-resources' - -dependencies { - testImplementation project(xpackModule('esql:qa:testFixtures')) - testImplementation project(xpackModule('esql:qa:server')) -} restResources { restApi { @@ -23,31 +17,36 @@ restResources { } } -BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> - - if (bwcVersion != VersionProperties.getElasticsearchVersion() && bwcVersion.onOrAfter(Version.fromString("8.11.0"))) { - /* This project runs the ESQL spec tests against a 4 node cluster where two of the nodes has a different minor. */ - def baseCluster = testClusters.register(baseName) { - versions = [bwcVersion.toString(), bwcVersion.toString(), project.version, project.version] - numberOfNodes = 4 - testDistribution = 'DEFAULT' - setting 'xpack.license.self_generated.type', 'trial' - setting 'xpack.security.enabled', 'false' - } - - tasks.register("${baseName}#mixedClusterTest", StandaloneRestIntegTestTask) { - useCluster baseCluster - mustRunAfter("precommit") - nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) - nonInputProperties.systemProperty('tests.clustername', baseName) - systemProperty 'tests.bwc_nodes_version', bwcVersion.toString().replace('-SNAPSHOT', '') - systemProperty 'tests.new_nodes_version', project.version.toString().replace('-SNAPSHOT', '') - onlyIf("BWC tests disabled") { project.bwc_tests_enabled } - } - - tasks.register(bwcTaskName(bwcVersion)) { - dependsOn "${baseName}#mixedClusterTest" - } +dependencies { + javaRestTestImplementation project(xpackModule('esql:qa:testFixtures')) + javaRestTestImplementation project(xpackModule('esql:qa:server')) +} + +GradleUtils.extendSourceSet(project, "javaRestTest", "yamlRestTest") + +def supportedVersion = bwcVersion -> { + // ESQL is available in 8.11 or later + return bwcVersion.onOrAfter(Version.fromString("8.11.0")); +} + +BuildParams.bwcVersions.withWireCompatible(supportedVersion) { bwcVersion, baseName -> + def javaRestTest = tasks.register("v${bwcVersion}#javaRestTest", StandaloneRestIntegTestTask) { + usesBwcDistribution(bwcVersion) + systemProperty("tests.old_cluster_version", bwcVersion) + } + + def yamlRestTest = tasks.register("v${bwcVersion}#yamlRestTest", StandaloneRestIntegTestTask) { + usesBwcDistribution(bwcVersion) + systemProperty("tests.old_cluster_version", bwcVersion) + testClassesDirs = sourceSets.yamlRestTest.output.classesDirs + classpath = sourceSets.yamlRestTest.runtimeClasspath + } + + tasks.register(bwcTaskName(bwcVersion)) { + dependsOn javaRestTest, yamlRestTest } } +tasks.named("yamlRestTest") { + enabled = false +} diff --git a/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/Clusters.java b/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/Clusters.java new file mode 100644 index 0000000000000..8a55624ed3a6e --- /dev/null +++ b/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/Clusters.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.qa.mixed; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.Version; + +public class Clusters { + public static ElasticsearchCluster mixedVersionCluster() { + Version oldVersion = Version.fromString(System.getProperty("tests.old_cluster_version")); + return ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .withNode(node -> node.version(oldVersion)) + .withNode(node -> node.version(Version.CURRENT)) + .withNode(node -> node.version(oldVersion)) + .withNode(node -> node.version(Version.CURRENT)) + .setting("xpack.security.enabled", "false") + .setting("xpack.license.self_generated.type", "trial") + .setting("cluster.routing.rebalance.enable", "none") // disable relocation until we have retry in ESQL + .build(); + } +} diff --git a/x-pack/plugin/esql/qa/server/mixed-cluster/src/test/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java b/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java similarity index 55% rename from x-pack/plugin/esql/qa/server/mixed-cluster/src/test/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java rename to x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java index b8dab3641c2a0..9d22045522d19 100644 --- a/x-pack/plugin/esql/qa/server/mixed-cluster/src/test/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java +++ b/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java @@ -8,23 +8,40 @@ package org.elasticsearch.xpack.esql.qa.mixed; import org.elasticsearch.Version; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase; import org.elasticsearch.xpack.ql.CsvSpecReader.CsvTestCase; +import org.junit.ClassRule; import static org.elasticsearch.xpack.esql.CsvTestUtils.isEnabled; +import static org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase.Mode.ASYNC; public class MixedClusterEsqlSpecIT extends EsqlSpecTestCase { + @ClassRule + public static ElasticsearchCluster cluster = Clusters.mixedVersionCluster(); - static final Version bwcVersion = Version.fromString(System.getProperty("tests.bwc_nodes_version")); - static final Version newVersion = Version.fromString(System.getProperty("tests.new_nodes_version")); + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + static final Version bwcVersion = Version.fromString(System.getProperty("tests.old_cluster_version")); - public MixedClusterEsqlSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { - super(fileName, groupName, testName, lineNumber, testCase); + public MixedClusterEsqlSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase, Mode mode) { + super(fileName, groupName, testName, lineNumber, testCase, mode); } @Override protected void shouldSkipTest(String testName) { + super.shouldSkipTest(testName); assumeTrue("Test " + testName + " is skipped on " + bwcVersion, isEnabled(testName, bwcVersion)); - assumeTrue("Test " + testName + " is skipped on " + newVersion, isEnabled(testName, newVersion)); + if (mode == ASYNC) { + assumeTrue("Async is not supported on " + bwcVersion, supportsAsync()); + } + } + + @Override + protected boolean supportsAsync() { + return bwcVersion.onOrAfter(Version.V_8_13_0); } } diff --git a/x-pack/plugin/esql/qa/server/mixed-cluster/src/test/java/org/elasticsearch/xpack/esql/qa/mixed/EsqlClientYamlIT.java b/x-pack/plugin/esql/qa/server/mixed-cluster/src/yamlRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/EsqlClientYamlIT.java similarity index 78% rename from x-pack/plugin/esql/qa/server/mixed-cluster/src/test/java/org/elasticsearch/xpack/esql/qa/mixed/EsqlClientYamlIT.java rename to x-pack/plugin/esql/qa/server/mixed-cluster/src/yamlRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/EsqlClientYamlIT.java index 0965c5506c6a1..2c9833ba0793e 100644 --- a/x-pack/plugin/esql/qa/server/mixed-cluster/src/test/java/org/elasticsearch/xpack/esql/qa/mixed/EsqlClientYamlIT.java +++ b/x-pack/plugin/esql/qa/server/mixed-cluster/src/yamlRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/EsqlClientYamlIT.java @@ -9,13 +9,22 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; import org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase; import org.junit.After; import org.junit.Before; +import org.junit.ClassRule; public class EsqlClientYamlIT extends ESClientYamlSuiteTestCase { + @ClassRule + public static ElasticsearchCluster cluster = Clusters.mixedVersionCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } public EsqlClientYamlIT(final ClientYamlTestCandidate testCandidate) { super(testCandidate); diff --git a/x-pack/plugin/esql/qa/server/multi-clusters/build.gradle b/x-pack/plugin/esql/qa/server/multi-clusters/build.gradle new file mode 100644 index 0000000000000..7008bd8b7aa01 --- /dev/null +++ b/x-pack/plugin/esql/qa/server/multi-clusters/build.gradle @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +import org.elasticsearch.gradle.Version +import org.elasticsearch.gradle.internal.info.BuildParams +import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask + +apply plugin: 'elasticsearch.internal-java-rest-test' +apply plugin: 'elasticsearch.bwc-test' + +dependencies { + javaRestTestImplementation project(xpackModule('esql:qa:testFixtures')) + javaRestTestImplementation project(xpackModule('esql:qa:server')) +} + +def supportedVersion = bwcVersion -> { + // This test is less restricted than the actual CCS compatibility matrix that we are supporting. + // CCQ is available on 8.13 or later + return bwcVersion.onOrAfter(Version.fromString("8.13.0")); +} + +BuildParams.bwcVersions.withWireCompatible(supportedVersion) { bwcVersion, baseName -> + tasks.register(bwcTaskName(bwcVersion), StandaloneRestIntegTestTask) { + usesBwcDistribution(bwcVersion) + systemProperty("tests.old_cluster_version", bwcVersion) + maxParallelForks = 1 + } +} diff --git a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/Clusters.java b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/Clusters.java new file mode 100644 index 0000000000000..f20d758132cbb --- /dev/null +++ b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/Clusters.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.ccq; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.Version; + +public class Clusters { + public static ElasticsearchCluster remoteCluster() { + return ElasticsearchCluster.local() + .name("remote_cluster") + .distribution(DistributionType.DEFAULT) + .version(Version.fromString(System.getProperty("tests.old_cluster_version"))) + .nodes(2) + .setting("node.roles", "[data,ingest,master]") + .setting("xpack.security.enabled", "false") + .setting("xpack.license.self_generated.type", "trial") + .shared(true) + .setting("cluster.routing.rebalance.enable", "none") + .build(); + } + + public static ElasticsearchCluster localCluster(ElasticsearchCluster remoteCluster) { + return ElasticsearchCluster.local() + .name("local_cluster") + .distribution(DistributionType.DEFAULT) + .version(Version.CURRENT) + .nodes(2) + .setting("xpack.security.enabled", "false") + .setting("xpack.license.self_generated.type", "trial") + .setting("node.roles", "[data,ingest,master,remote_cluster_client]") + .setting("cluster.remote.remote_cluster.seeds", () -> "\"" + remoteCluster.getTransportEndpoint(0) + "\"") + .setting("cluster.remote.connections_per_cluster", "1") + .shared(true) + .setting("cluster.routing.rebalance.enable", "none") + .build(); + } + + public static org.elasticsearch.Version oldVersion() { + return org.elasticsearch.Version.fromString(System.getProperty("tests.old_cluster_version")); + } +} diff --git a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java new file mode 100644 index 0000000000000..fac888dff37fa --- /dev/null +++ b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java @@ -0,0 +1,214 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.ccq; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.apache.http.HttpEntity; +import org.apache.http.HttpHost; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.IOUtils; +import org.elasticsearch.test.TestClustersThreadFilter; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase; +import org.elasticsearch.xpack.ql.CsvSpecReader; +import org.elasticsearch.xpack.ql.CsvSpecReader.CsvTestCase; +import org.elasticsearch.xpack.ql.SpecReader; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.net.URL; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Locale; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.esql.CsvTestUtils.isEnabled; +import static org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase.Mode.SYNC; +import static org.elasticsearch.xpack.ql.CsvSpecReader.specParser; +import static org.elasticsearch.xpack.ql.TestUtils.classpathResources; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; + +/** + * This suite loads the data into either the local cluster or the remote cluster, then run spec tests with CCQ. + * TODO: Some spec tests prevents us from splitting data across multiple shards/indices/clusters + */ +@ThreadLeakFilters(filters = TestClustersThreadFilter.class) +public class MultiClusterSpecIT extends EsqlSpecTestCase { + + static ElasticsearchCluster remoteCluster = Clusters.remoteCluster(); + static ElasticsearchCluster localCluster = Clusters.localCluster(remoteCluster); + + @ClassRule + public static TestRule clusterRule = RuleChain.outerRule(remoteCluster).around(localCluster); + + @ParametersFactory(argumentFormatting = "%2$s.%3$s") + public static List readScriptSpec() throws Exception { + List urls = classpathResources("/*.csv-spec"); + assertTrue("Not enough specs found " + urls, urls.size() > 0); + List specs = SpecReader.readScriptSpec(urls, specParser()); + + int len = specs.get(0).length; + List testcases = new ArrayList<>(); + for (var spec : specs) { + for (Mode mode : List.of(SYNC)) { // No async, for now + Object[] obj = new Object[len + 1]; + System.arraycopy(spec, 0, obj, 0, len); + obj[len] = mode; + testcases.add(obj); + } + } + return testcases; + } + + public MultiClusterSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase, Mode mode) { + super(fileName, groupName, testName, lineNumber, convertToRemoteIndices(testCase), mode); + } + + @Override + protected void shouldSkipTest(String testName) { + super.shouldSkipTest(testName); + assumeFalse("CCQ doesn't support enrich yet", hasEnrich(testCase.query)); + assumeFalse("can't test with _index metadata", hasIndexMetadata(testCase.query)); + assumeTrue("Test " + testName + " is skipped on " + Clusters.oldVersion(), isEnabled(testName, Clusters.oldVersion())); + } + + @Override + protected String getTestRestCluster() { + return localCluster.getHttpAddresses(); + } + + @Override + protected RestClient buildClient(Settings settings, HttpHost[] localHosts) throws IOException { + RestClient localClient = super.buildClient(settings, localHosts); + HttpHost[] remoteHosts = parseClusterHosts(remoteCluster.getHttpAddresses()).toArray(HttpHost[]::new); + RestClient remoteClient = super.buildClient(settings, remoteHosts); + return twoClients(localClient, remoteClient); + } + + /** + * Creates a new mock client that dispatches every request to both the local and remote clusters, excluding _bulk and _query requests. + * - '_bulk' requests are randomly sent to either the local or remote cluster to populate data. Some spec tests, such as AVG, + * prevent the splitting of bulk requests. + * - '_query' requests are dispatched to the local cluster only, as we are testing cross-cluster queries. + */ + static RestClient twoClients(RestClient localClient, RestClient remoteClient) throws IOException { + RestClient twoClients = mock(RestClient.class); + // write to a single cluster for now due to the precision of some functions such as avg and tests related to updates + final RestClient bulkClient = randomFrom(localClient, remoteClient); + when(twoClients.performRequest(any())).then(invocation -> { + Request request = invocation.getArgument(0); + String endpoint = request.getEndpoint(); + if (endpoint.startsWith("/_query")) { + return localClient.performRequest(request); + } else if (endpoint.contains("_bulk")) { + return bulkClient.performRequest(request); + } else { + Request[] clones = cloneRequests(request, 2); + Response resp1 = remoteClient.performRequest(clones[0]); + Response resp2 = localClient.performRequest(clones[1]); + assertEquals(resp1.getStatusLine().getStatusCode(), resp2.getStatusLine().getStatusCode()); + return resp2; + } + }); + doAnswer(invocation -> { + IOUtils.close(localClient, remoteClient); + return null; + }).when(twoClients).close(); + return twoClients; + } + + static Request[] cloneRequests(Request orig, int numClones) throws IOException { + Request[] clones = new Request[numClones]; + for (int i = 0; i < clones.length; i++) { + clones[i] = new Request(orig.getMethod(), orig.getEndpoint()); + clones[i].addParameters(orig.getParameters()); + } + HttpEntity entity = orig.getEntity(); + if (entity != null) { + byte[] bytes = entity.getContent().readAllBytes(); + entity.getContent().close(); + for (Request clone : clones) { + ByteArrayInputStream cloneInput = new ByteArrayInputStream(bytes); + HttpEntity cloneEntity = spy(entity); + when(cloneEntity.getContent()).thenReturn(cloneInput); + clone.setEntity(cloneEntity); + } + } + return clones; + } + + static CsvSpecReader.CsvTestCase convertToRemoteIndices(CsvSpecReader.CsvTestCase testCase) { + String query = testCase.query; + String[] commands = query.split("\\|"); + String first = commands[0].trim(); + if (commands[0].toLowerCase(Locale.ROOT).startsWith("from")) { + String[] parts = commands[0].split("\\["); + assert parts.length >= 1 : parts; + String fromStatement = parts[0]; + String[] localIndices = fromStatement.substring("FROM ".length()).split(","); + String remoteIndices = Arrays.stream(localIndices) + .map(index -> "*:" + index.trim() + "," + index.trim()) + .collect(Collectors.joining(",")); + var newFrom = "FROM " + remoteIndices + commands[0].substring(fromStatement.length()); + testCase.query = newFrom + " " + query.substring(first.length()); + } + int offset = testCase.query.length() - query.length(); + if (offset != 0) { + final String pattern = "Line (\\d+):(\\d+):"; + final Pattern regex = Pattern.compile(pattern); + testCase.adjustExpectedWarnings(warning -> { + Matcher matcher = regex.matcher(warning); + if (matcher.find()) { + int line = Integer.parseInt(matcher.group(1)); + if (line == 1) { + int position = Integer.parseInt(matcher.group(2)); + int newPosition = position + offset; + return warning.replaceFirst(pattern, "Line " + line + ":" + newPosition + ":"); + } + } + return warning; + }); + } + return testCase; + } + + static boolean hasEnrich(String query) { + String[] commands = query.split("\\|"); + for (int i = 0; i < commands.length; i++) { + commands[i] = commands[i].trim(); + if (commands[i].toLowerCase(Locale.ROOT).startsWith("enrich")) { + return true; + } + } + return false; + } + + static boolean hasIndexMetadata(String query) { + String[] commands = query.split("\\|"); + if (commands[0].trim().toLowerCase(Locale.ROOT).startsWith("from")) { + String[] parts = commands[0].split("\\["); + return parts.length > 1 && parts[1].contains("_index"); + } + return false; + } +} diff --git a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClustersIT.java b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClustersIT.java new file mode 100644 index 0000000000000..f79de820ae48d --- /dev/null +++ b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClustersIT.java @@ -0,0 +1,203 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.ccq; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.apache.http.HttpHost; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.TestClustersThreadFilter; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.esql.qa.rest.RestEsqlTestCase; +import org.junit.After; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +import static org.elasticsearch.test.MapMatcher.assertMap; +import static org.elasticsearch.test.MapMatcher.matchesMap; + +@ThreadLeakFilters(filters = TestClustersThreadFilter.class) +public class MultiClustersIT extends ESRestTestCase { + static ElasticsearchCluster remoteCluster = Clusters.remoteCluster(); + static ElasticsearchCluster localCluster = Clusters.localCluster(remoteCluster); + + @ClassRule + public static TestRule clusterRule = RuleChain.outerRule(remoteCluster).around(localCluster); + + @Override + protected String getTestRestCluster() { + return localCluster.getHttpAddresses(); + } + + record Doc(int id, String color, long data) { + + } + + final String localIndex = "test-local-index"; + List localDocs = List.of(); + final String remoteIndex = "test-remote-index"; + List remoteDocs = List.of(); + + @Before + public void setUpIndices() throws Exception { + final String mapping = """ + "properties": { + "data": { "type": "long" }, + "color": { "type": "keyword" } + } + """; + RestClient localClient = client(); + localDocs = IntStream.range(0, between(1, 500)) + .mapToObj(n -> new Doc(n, randomFrom("red", "yellow", "green"), randomIntBetween(1, 1000))) + .toList(); + createIndex( + localClient, + localIndex, + Settings.builder().put("index.number_of_shards", randomIntBetween(1, 5)).build(), + mapping, + null + ); + indexDocs(localClient, localIndex, localDocs); + + remoteDocs = IntStream.range(0, between(1, 500)) + .mapToObj(n -> new Doc(n, randomFrom("red", "yellow", "green"), randomIntBetween(1, 1000))) + .toList(); + try (RestClient remoteClient = remoteClusterClient()) { + createIndex( + remoteClient, + remoteIndex, + Settings.builder().put("index.number_of_shards", randomIntBetween(1, 5)).build(), + mapping, + null + ); + indexDocs(remoteClient, remoteIndex, remoteDocs); + } + } + + @After + public void wipeIndices() throws Exception { + try (RestClient remoteClient = remoteClusterClient()) { + deleteIndex(remoteClient, remoteIndex); + } + } + + void indexDocs(RestClient client, String index, List docs) throws IOException { + logger.info("--> indexing {} docs to index {}", docs.size(), index); + long total = 0; + for (Doc doc : docs) { + Request createDoc = new Request("POST", "/" + index + "/_doc/id_" + doc.id); + if (randomInt(100) < 10) { + createDoc.addParameter("refresh", "true"); + } + createDoc.setJsonEntity(Strings.format(""" + { "color": "%s", "data": %s} + """, doc.color, doc.data)); + assertOK(client.performRequest(createDoc)); + total += doc.data; + } + logger.info("--> index={} total={}", index, total); + refresh(client, index); + } + + private Map run(String query) throws IOException { + Map resp = runEsql(new RestEsqlTestCase.RequestObjectBuilder().query(query).build()); + logger.info("--> query {} response {}", query, resp); + return resp; + } + + protected boolean supportsAsync() { + return false; // TODO: Version.CURRENT.onOrAfter(Version.V_8_13_0); ?? // the Async API was introduced in 8.13.0 + } + + private Map runEsql(RestEsqlTestCase.RequestObjectBuilder requestObject) throws IOException { + if (supportsAsync()) { + return RestEsqlTestCase.runEsqlAsync(requestObject, NO_WARNINGS); + } else { + return RestEsqlTestCase.runEsqlSync(requestObject, NO_WARNINGS); + } + } + + private static final List NO_WARNINGS = List.of(); + + public void testCount() throws Exception { + { + Map result = run("FROM test-local-index,*:test-remote-index | STATS c = COUNT(*)"); + var columns = List.of(Map.of("name", "c", "type", "long")); + var values = List.of(List.of(localDocs.size() + remoteDocs.size())); + assertMap(result, matchesMap().entry("columns", columns).entry("values", values)); + } + { + Map result = run("FROM *:test-remote-index | STATS c = COUNT(*)"); + var columns = List.of(Map.of("name", "c", "type", "long")); + var values = List.of(List.of(remoteDocs.size())); + assertMap(result, matchesMap().entry("columns", columns).entry("values", values)); + } + } + + public void testUngroupedAggs() throws Exception { + { + Map result = run("FROM test-local-index,*:test-remote-index | STATS total = SUM(data)"); + var columns = List.of(Map.of("name", "total", "type", "long")); + long sum = Stream.concat(localDocs.stream(), remoteDocs.stream()).mapToLong(d -> d.data).sum(); + var values = List.of(List.of(Math.toIntExact(sum))); + assertMap(result, matchesMap().entry("columns", columns).entry("values", values)); + } + { + Map result = run("FROM *:test-remote-index | STATS total = SUM(data)"); + var columns = List.of(Map.of("name", "total", "type", "long")); + long sum = remoteDocs.stream().mapToLong(d -> d.data).sum(); + var values = List.of(List.of(Math.toIntExact(sum))); + assertMap(result, matchesMap().entry("columns", columns).entry("values", values)); + } + } + + public void testGroupedAggs() throws Exception { + { + Map result = run("FROM test-local-index,*:test-remote-index | STATS total = SUM(data) BY color | SORT color"); + var columns = List.of(Map.of("name", "total", "type", "long"), Map.of("name", "color", "type", "keyword")); + var values = Stream.concat(localDocs.stream(), remoteDocs.stream()) + .collect(Collectors.toMap(d -> d.color, Doc::data, Long::sum)) + .entrySet() + .stream() + .sorted(Map.Entry.comparingByKey()) + .map(e -> List.of(Math.toIntExact(e.getValue()), e.getKey())) + .toList(); + assertMap(result, matchesMap().entry("columns", columns).entry("values", values)); + } + { + Map result = run("FROM *:test-remote-index | STATS total = SUM(data) by color | SORT color"); + var columns = List.of(Map.of("name", "total", "type", "long"), Map.of("name", "color", "type", "keyword")); + var values = remoteDocs.stream() + .collect(Collectors.toMap(d -> d.color, Doc::data, Long::sum)) + .entrySet() + .stream() + .sorted(Map.Entry.comparingByKey()) + .map(e -> List.of(Math.toIntExact(e.getValue()), e.getKey())) + .toList(); + assertMap(result, matchesMap().entry("columns", columns).entry("values", values)); + } + } + + private RestClient remoteClusterClient() throws IOException { + var clusterHosts = parseClusterHosts(remoteCluster.getHttpAddresses()); + return buildClient(restClientSettings(), clusterHosts.toArray(new HttpHost[0])); + } +} diff --git a/x-pack/plugin/esql/qa/server/multi-node/build.gradle b/x-pack/plugin/esql/qa/server/multi-node/build.gradle index 300ed4df92bc2..e7ef204d77dbb 100644 --- a/x-pack/plugin/esql/qa/server/multi-node/build.gradle +++ b/x-pack/plugin/esql/qa/server/multi-node/build.gradle @@ -1,19 +1,11 @@ -apply plugin: 'elasticsearch.legacy-yaml-rest-test' +apply plugin: 'elasticsearch.internal-java-rest-test' dependencies { javaRestTestImplementation project(xpackModule('esql:qa:testFixtures')) + javaRestTestImplementation project(xpackModule('esql:qa:server')) } -restResources { - restApi { - include '_common', 'bulk', 'indices', 'esql', 'xpack' - } -} -testClusters.configureEach { - numberOfNodes = 2 - testDistribution = 'DEFAULT' - setting 'xpack.license.self_generated.type', 'trial' - setting 'xpack.monitoring.collection.enabled', 'true' - setting 'xpack.security.enabled', 'false' +tasks.named('javaRestTest') { + usesDefaultDistribution() } diff --git a/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlSpecIT.java b/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlSpecIT.java index eab26b565f93d..d73f66ab00107 100644 --- a/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlSpecIT.java +++ b/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlSpecIT.java @@ -7,11 +7,27 @@ package org.elasticsearch.xpack.esql.qa.multi_node; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase; import org.elasticsearch.xpack.ql.CsvSpecReader.CsvTestCase; +import org.junit.ClassRule; public class EsqlSpecIT extends EsqlSpecTestCase { - public EsqlSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { - super(fileName, groupName, testName, lineNumber, testCase); + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .nodes(2) + .setting("xpack.security.enabled", "false") + .setting("xpack.license.self_generated.type", "trial") + .build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + public EsqlSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase, Mode mode) { + super(fileName, groupName, testName, lineNumber, testCase, mode); } } diff --git a/x-pack/plugin/esql/qa/server/single-node/build.gradle b/x-pack/plugin/esql/qa/server/single-node/build.gradle index 2d430965efb21..1932faa49fcba 100644 --- a/x-pack/plugin/esql/qa/server/single-node/build.gradle +++ b/x-pack/plugin/esql/qa/server/single-node/build.gradle @@ -1,7 +1,9 @@ -apply plugin: 'elasticsearch.legacy-yaml-rest-test' +apply plugin: 'elasticsearch.internal-java-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' dependencies { javaRestTestImplementation project(xpackModule('esql:qa:testFixtures')) + javaRestTestImplementation project(xpackModule('esql:qa:server')) yamlRestTestImplementation project(xpackModule('esql:qa:server')) } @@ -14,9 +16,12 @@ restResources { } } -testClusters.configureEach { - testDistribution = 'DEFAULT' - setting 'xpack.license.self_generated.type', 'trial' - setting 'xpack.monitoring.collection.enabled', 'true' - setting 'xpack.security.enabled', 'false' +tasks.named('javaRestTest') { + usesDefaultDistribution() + maxParallelForks = 1 +} + +tasks.named('yamlRestTest') { + usesDefaultDistribution() + maxParallelForks = 1 } diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/Clusters.java b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/Clusters.java new file mode 100644 index 0000000000000..f0724a411e3c5 --- /dev/null +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/Clusters.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.qa.single_node; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; + +public class Clusters { + public static ElasticsearchCluster testCluster() { + return ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .nodes(1) + .setting("xpack.security.enabled", "false") + .setting("xpack.license.self_generated.type", "trial") + .shared(true) + .build(); + } +} diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlSpecIT.java b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlSpecIT.java index d3a4d7a14a0f1..db737e3678752 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlSpecIT.java +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlSpecIT.java @@ -7,11 +7,25 @@ package org.elasticsearch.xpack.esql.qa.single_node; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.elasticsearch.test.TestClustersThreadFilter; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase; import org.elasticsearch.xpack.ql.CsvSpecReader.CsvTestCase; +import org.junit.ClassRule; +@ThreadLeakFilters(filters = TestClustersThreadFilter.class) public class EsqlSpecIT extends EsqlSpecTestCase { - public EsqlSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { - super(fileName, groupName, testName, lineNumber, testCase); + @ClassRule + public static ElasticsearchCluster cluster = Clusters.testCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + public EsqlSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase, Mode mode) { + super(fileName, groupName, testName, lineNumber, testCase, mode); } } diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/GenerativeIT.java b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/GenerativeIT.java index e499b13bf1db8..9b98c29f5c3e3 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/GenerativeIT.java +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/GenerativeIT.java @@ -7,8 +7,22 @@ package org.elasticsearch.xpack.esql.qa.single_node; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + import org.apache.lucene.tests.util.LuceneTestCase.AwaitsFix; +import org.elasticsearch.test.TestClustersThreadFilter; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.esql.qa.rest.generative.GenerativeRestTest; +import org.junit.ClassRule; @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/102084") -public class GenerativeIT extends GenerativeRestTest {} +@ThreadLeakFilters(filters = TestClustersThreadFilter.class) +public class GenerativeIT extends GenerativeRestTest { + @ClassRule + public static ElasticsearchCluster cluster = Clusters.testCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEnrichIT.java b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEnrichIT.java index b2222f4f2e78e..7c707a82aa82d 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEnrichIT.java +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEnrichIT.java @@ -7,6 +7,24 @@ package org.elasticsearch.xpack.esql.qa.single_node; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.elasticsearch.test.TestClustersThreadFilter; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.esql.qa.rest.RestEnrichTestCase; +import org.junit.ClassRule; + +@ThreadLeakFilters(filters = TestClustersThreadFilter.class) +public class RestEnrichIT extends RestEnrichTestCase { + @ClassRule + public static ElasticsearchCluster cluster = Clusters.testCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } -public class RestEnrichIT extends RestEnrichTestCase {} + public RestEnrichIT(Mode mode) { + super(mode); + } +} diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java index 10e63a563efc7..67d3da5b4e694 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java @@ -6,24 +6,49 @@ */ package org.elasticsearch.xpack.esql.qa.single_node; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + import org.apache.http.util.EntityUtils; import org.elasticsearch.Build; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.TestClustersThreadFilter; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.esql.qa.rest.RestEsqlTestCase; import org.junit.Assert; +import org.junit.ClassRule; import java.io.IOException; import java.nio.charset.StandardCharsets; +import java.util.Arrays; import java.util.List; import java.util.Locale; import java.util.Map; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.core.Is.is; +@ThreadLeakFilters(filters = TestClustersThreadFilter.class) public class RestEsqlIT extends RestEsqlTestCase { + @ClassRule + public static ElasticsearchCluster cluster = Clusters.testCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + @ParametersFactory + public static List modes() { + return Arrays.stream(Mode.values()).map(m -> new Object[] { m }).toList(); + } + + public RestEsqlIT(Mode mode) { + super(mode); + } public void testBasicEsql() throws IOException { StringBuilder b = new StringBuilder(); @@ -44,7 +69,6 @@ public void testBasicEsql() throws IOException { if (Build.current().isSnapshot()) { builder.pragmas(Settings.builder().put("data_partitioning", "shard").build()); } - builder.build(); Map result = runEsql(builder); assertEquals(2, result.size()); Map colA = Map.of("name", "avg(value)", "type", "double"); @@ -63,17 +87,17 @@ public void testInvalidPragma() throws IOException { } RequestObjectBuilder builder = new RequestObjectBuilder().query("from test-index | limit 1 | keep f"); builder.pragmas(Settings.builder().put("data_partitioning", "invalid-option").build()); - builder.build(); - ResponseException re = expectThrows(ResponseException.class, () -> runEsql(builder)); + ResponseException re = expectThrows(ResponseException.class, () -> runEsqlSync(builder)); assertThat(EntityUtils.toString(re.getResponse().getEntity()), containsString("No enum constant")); + + assertThat(deleteIndex("test-index").isAcknowledged(), is(true)); // clean up } public void testPragmaNotAllowed() throws IOException { assumeFalse("pragma only disabled on release builds", Build.current().isSnapshot()); RequestObjectBuilder builder = new RequestObjectBuilder().query("row a = 1, b = 2"); builder.pragmas(Settings.builder().put("data_partitioning", "shard").build()); - builder.build(); - ResponseException re = expectThrows(ResponseException.class, () -> runEsql(builder)); + ResponseException re = expectThrows(ResponseException.class, () -> runEsqlSync(builder)); assertThat(EntityUtils.toString(re.getResponse().getEntity()), containsString("[pragma] only allowed in snapshot builds")); } } diff --git a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlClientYamlIT.java b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlClientYamlIT.java index 38d58644926fe..9e93ae4376896 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlClientYamlIT.java +++ b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlClientYamlIT.java @@ -9,14 +9,30 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; import org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase; import org.junit.After; import org.junit.Before; +import org.junit.ClassRule; public class EsqlClientYamlIT extends ESClientYamlSuiteTestCase { + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .nodes(1) + .setting("xpack.security.enabled", "false") + .setting("xpack.license.self_generated.type", "trial") + .build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + public EsqlClientYamlIT(final ClientYamlTestCandidate testCandidate) { super(testCandidate); } diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlSpecTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlSpecTestCase.java index 734f26fab547a..fd686ec48bb79 100644 --- a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlSpecTestCase.java +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlSpecTestCase.java @@ -12,7 +12,6 @@ import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.ResponseException; -import org.elasticsearch.common.geo.SpatialPoint; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; @@ -27,6 +26,7 @@ import java.io.IOException; import java.net.URL; +import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -39,7 +39,6 @@ import static org.elasticsearch.xpack.esql.CsvTestUtils.loadCsvSpecValues; import static org.elasticsearch.xpack.esql.CsvTestsDataLoader.CSV_DATASET_MAP; import static org.elasticsearch.xpack.esql.CsvTestsDataLoader.loadDataSetIntoEs; -import static org.elasticsearch.xpack.esql.qa.rest.RestEsqlTestCase.runEsql; import static org.elasticsearch.xpack.ql.CsvSpecReader.specParser; import static org.elasticsearch.xpack.ql.TestUtils.classpathResources; @@ -50,21 +49,40 @@ public abstract class EsqlSpecTestCase extends ESRestTestCase { private final String groupName; private final String testName; private final Integer lineNumber; - private final CsvTestCase testCase; + protected final CsvTestCase testCase; + protected final Mode mode; + + public enum Mode { + SYNC, + ASYNC + } @ParametersFactory(argumentFormatting = "%2$s.%3$s") public static List readScriptSpec() throws Exception { List urls = classpathResources("/*.csv-spec"); assertTrue("Not enough specs found " + urls, urls.size() > 0); - return SpecReader.readScriptSpec(urls, specParser()); + List specs = SpecReader.readScriptSpec(urls, specParser()); + + int len = specs.get(0).length; + List testcases = new ArrayList<>(); + for (var spec : specs) { + for (Mode mode : Mode.values()) { + Object[] obj = new Object[len + 1]; + System.arraycopy(spec, 0, obj, 0, len); + obj[len] = mode; + testcases.add(obj); + } + } + return testcases; } - public EsqlSpecTestCase(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { + protected EsqlSpecTestCase(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase, Mode mode) { this.fileName = fileName; this.groupName = groupName; this.testName = testName; this.lineNumber = lineNumber; this.testCase = testCase; + this.mode = mode; } @Before @@ -74,6 +92,10 @@ public void setup() throws IOException { } } + protected boolean supportsAsync() { + return Version.CURRENT.onOrAfter(Version.V_8_13_0); // the Async API was introduced in 8.13.0 + } + @AfterClass public static void wipeTestData() throws IOException { try { @@ -105,7 +127,7 @@ protected void shouldSkipTest(String testName) { protected final void doTest() throws Throwable { RequestObjectBuilder builder = new RequestObjectBuilder(randomFrom(XContentType.values())); - Map answer = runEsql(builder.query(testCase.query).build(), testCase.expectedWarnings(false)); + Map answer = runEsql(builder.query(testCase.query), testCase.expectedWarnings(false)); var expectedColumnsWithValues = loadCsvSpecValues(testCase.expectedResults); var metadata = answer.get("columns"); @@ -122,6 +144,15 @@ protected final void doTest() throws Throwable { assertResults(expectedColumnsWithValues, actualColumns, actualValues, testCase.ignoreOrder, logger); } + private Map runEsql(RequestObjectBuilder requestObject, List expectedWarnings) throws IOException { + if (mode == Mode.ASYNC) { + assert supportsAsync(); + return RestEsqlTestCase.runEsqlAsync(requestObject, expectedWarnings); + } else { + return RestEsqlTestCase.runEsqlSync(requestObject, expectedWarnings); + } + } + protected void assertResults( ExpectedResults expected, List> actualColumns, @@ -130,32 +161,7 @@ protected void assertResults( Logger logger ) { assertMetadata(expected, actualColumns, logger); - assertData(expected, actualValues, testCase.ignoreOrder, logger, EsqlSpecTestCase::valueToString); - } - - /** - * Unfortunately the GeoPoint.toString method returns the old format, but cannot be changed due to BWC. - * So we need to custom format GeoPoint as well as wrap Lists to ensure this custom conversion applies to multi-value fields - */ - private static String valueToString(Object value) { - if (value == null) { - return "null"; - } else if (value instanceof List list) { - StringBuilder sb = new StringBuilder("["); - for (Object field : list) { - if (sb.length() > 1) { - sb.append(", "); - } - sb.append(valueToString(field)); - } - return sb.append("]").toString(); - } else if (value instanceof SpatialPoint point) { - // TODO: This knowledge should be in GeoPoint or at least that package - // Alternatively we could just change GeoPoint.toString() to use WKT, but that has other side-effects - return "POINT (" + point.getX() + " " + point.getY() + ")"; - } else { - return value.toString(); - } + assertData(expected, actualValues, testCase.ignoreOrder, logger, value -> value == null ? "null" : value.toString()); } private Throwable reworkException(Throwable th) { diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEnrichTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEnrichTestCase.java index f409fc6e69dee..c884b123f0f99 100644 --- a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEnrichTestCase.java +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEnrichTestCase.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.esql.qa.rest; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.http.util.EntityUtils; import org.elasticsearch.client.Request; import org.elasticsearch.client.ResponseException; @@ -15,20 +17,36 @@ import org.junit.Before; import java.io.IOException; +import java.util.Arrays; import java.util.List; import java.util.Map; import static org.elasticsearch.test.MapMatcher.assertMap; import static org.elasticsearch.test.MapMatcher.matchesMap; -import static org.elasticsearch.xpack.esql.qa.rest.RestEsqlTestCase.runEsql; import static org.hamcrest.Matchers.containsString; -public class RestEnrichTestCase extends ESRestTestCase { +public abstract class RestEnrichTestCase extends ESRestTestCase { private static final String sourceIndexName = "countries"; private static final String testIndexName = "test"; private static final String policyName = "countries"; + public enum Mode { + SYNC, + ASYNC + } + + protected final Mode mode; + + @ParametersFactory + public static List modes() { + return Arrays.stream(Mode.values()).map(m -> new Object[] { m }).toList(); + } + + protected RestEnrichTestCase(Mode mode) { + this.mode = mode; + } + @Before @After public void assertRequestBreakerEmpty() throws Exception { @@ -126,7 +144,7 @@ public void wipeTestData() throws IOException { public void testNonExistentEnrichPolicy() throws IOException { ResponseException re = expectThrows( ResponseException.class, - () -> runEsql(new RestEsqlTestCase.RequestObjectBuilder().query("from test | enrich countris").build()) + () -> RestEsqlTestCase.runEsqlSync(new RestEsqlTestCase.RequestObjectBuilder().query("from test | enrich countris"), List.of()) ); assertThat( EntityUtils.toString(re.getResponse().getEntity()), @@ -137,7 +155,9 @@ public void testNonExistentEnrichPolicy() throws IOException { public void testNonExistentEnrichPolicy_KeepField() throws IOException { ResponseException re = expectThrows( ResponseException.class, - () -> runEsql(new RestEsqlTestCase.RequestObjectBuilder().query("from test | enrich countris | keep number").build()) + () -> RestEsqlTestCase.runEsqlSync( + new RestEsqlTestCase.RequestObjectBuilder().query("from test | enrich countris | keep number") + ) ); assertThat( EntityUtils.toString(re.getResponse().getEntity()), @@ -147,7 +167,7 @@ public void testNonExistentEnrichPolicy_KeepField() throws IOException { public void testMatchField_ImplicitFieldsList() throws IOException { Map result = runEsql( - new RestEsqlTestCase.RequestObjectBuilder().query("from test | enrich countries | keep number").build() + new RestEsqlTestCase.RequestObjectBuilder().query("from test | enrich countries | keep number") ); var columns = List.of(Map.of("name", "number", "type", "long")); var values = List.of(List.of(1000), List.of(1000), List.of(5000)); @@ -158,7 +178,7 @@ public void testMatchField_ImplicitFieldsList() throws IOException { public void testMatchField_ImplicitFieldsList_WithStats() throws IOException { Map result = runEsql( new RestEsqlTestCase.RequestObjectBuilder().query("from test | enrich countries | stats s = sum(number) by country_name") - .build() + ); var columns = List.of(Map.of("name", "s", "type", "long"), Map.of("name", "country_name", "type", "keyword")); var values = List.of(List.of(2000, "United States of America"), List.of(5000, "China")); @@ -166,6 +186,16 @@ public void testMatchField_ImplicitFieldsList_WithStats() throws IOException { assertMap(result, matchesMap().entry("columns", columns).entry("values", values)); } + private Map runEsql(RestEsqlTestCase.RequestObjectBuilder requestObject) throws IOException { + if (mode == Mode.ASYNC) { + return RestEsqlTestCase.runEsqlAsync(requestObject, NO_WARNINGS); + } else { + return RestEsqlTestCase.runEsqlSync(requestObject, NO_WARNINGS); + } + } + + private static final List NO_WARNINGS = List.of(); + @Override protected boolean preserveClusterUponCompletion() { return true; diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java index 3693f0b0c2bb9..dd55cfd27c9cb 100644 --- a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java @@ -21,6 +21,9 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -36,6 +39,9 @@ import java.nio.charset.StandardCharsets; import java.time.ZoneId; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Locale; import java.util.Map; @@ -45,20 +51,47 @@ import static org.elasticsearch.test.ListMatcher.matchesList; import static org.elasticsearch.test.MapMatcher.assertMap; import static org.elasticsearch.test.MapMatcher.matchesMap; +import static org.elasticsearch.xpack.esql.qa.rest.RestEsqlTestCase.Mode.ASYNC; +import static org.elasticsearch.xpack.esql.qa.rest.RestEsqlTestCase.Mode.SYNC; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.either; +import static org.hamcrest.Matchers.emptyOrNullString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; -public class RestEsqlTestCase extends ESRestTestCase { +public abstract class RestEsqlTestCase extends ESRestTestCase { // Test runner will run multiple suites in parallel, with some of them requiring preserving state between // tests (like EsqlSpecTestCase), so test data (like index name) needs not collide and cleanup must be done locally. private static final String TEST_INDEX_NAME = "rest-esql-test"; + private static final Logger LOGGER = LogManager.getLogger(RestEsqlTestCase.class); + + private static final List NO_WARNINGS = List.of(); + + public static boolean shouldLog() { + return false; + } + + public enum Mode { + SYNC, + ASYNC + } + + protected final Mode mode; + + protected RestEsqlTestCase(Mode mode) { + this.mode = mode; + } + public static class RequestObjectBuilder { private final XContentBuilder builder; private boolean isBuilt = false; + private Boolean keepOnCompletion = null; + public RequestObjectBuilder() throws IOException { this(randomFrom(XContentType.values())); } @@ -88,6 +121,26 @@ public RequestObjectBuilder timeZone(ZoneId zoneId) throws IOException { return this; } + public RequestObjectBuilder waitForCompletion(TimeValue timeout) throws IOException { + builder.field("wait_for_completion_timeout", timeout); + return this; + } + + public RequestObjectBuilder keepOnCompletion(boolean value) throws IOException { + keepOnCompletion = value; + builder.field("keep_on_completion", value); + return this; + } + + Boolean keepOnCompletion() { + return keepOnCompletion; + } + + public RequestObjectBuilder keepAlive(TimeValue timeout) throws IOException { + builder.field("keep_alive", timeout); + return this; + } + public RequestObjectBuilder pragmas(Settings pragmas) throws IOException { builder.startObject("pragma"); pragmas.toXContent(builder, ToXContent.EMPTY_PARAMS); @@ -121,7 +174,7 @@ public static RequestObjectBuilder jsonBuilder() throws IOException { } public void testGetAnswer() throws IOException { - Map answer = runEsql(builder().query("row a = 1, b = 2").build()); + Map answer = runEsql(builder().query("row a = 1, b = 2")); assertEquals(2, answer.size()); Map colA = Map.of("name", "a", "type", "integer"); Map colB = Map.of("name", "b", "type", "integer"); @@ -130,7 +183,7 @@ public void testGetAnswer() throws IOException { } public void testUseUnknownIndex() throws IOException { - ResponseException e = expectThrows(ResponseException.class, () -> runEsql(builder().query("from doesNotExist").build())); + ResponseException e = expectThrows(ResponseException.class, () -> runEsql(builder().query("from doesNotExist"))); assertEquals(400, e.getResponse().getStatusLine().getStatusCode()); assertThat(e.getMessage(), containsString("verification_exception")); assertThat(e.getMessage(), containsString("Unknown index [doesNotExist]")); @@ -160,14 +213,14 @@ public void testNullInAggs() throws IOException { assertThat(EntityUtils.toString(response.getEntity(), StandardCharsets.UTF_8), equalTo("{\"errors\":false}")); RequestObjectBuilder builder = new RequestObjectBuilder().query(fromIndex() + " | stats min(value)"); - Map result = runEsql(builder.build()); + Map result = runEsql(builder); assertMap( result, matchesMap().entry("values", List.of(List.of(1))).entry("columns", List.of(Map.of("name", "min(value)", "type", "long"))) ); builder = new RequestObjectBuilder().query(fromIndex() + " | stats min(value) by group"); - result = runEsql(builder.build()); + result = runEsql(builder); assertMap( result, matchesMap().entry("values", List.of(List.of(2, 0), List.of(1, 1))) @@ -184,7 +237,7 @@ public void testColumnarMode() throws IOException { if (columnar || randomBoolean()) { query.columnar(columnar); } - Map answer = runEsql(query.build()); + Map answer = runEsql(query); Map colKeyword = Map.of("name", "keyword", "type", "keyword"); Map colInteger = Map.of("name", "integer", "type", "integer"); @@ -210,29 +263,29 @@ public void testColumnarMode() throws IOException { public void testTextMode() throws IOException { int count = randomIntBetween(0, 100); bulkLoadTestData(count); - var builder = builder().query(fromIndex() + " | keep keyword, integer | limit 100").build(); + var builder = builder().query(fromIndex() + " | keep keyword, integer | limit 100"); assertEquals(expectedTextBody("txt", count, null), runEsqlAsTextWithFormat(builder, "txt", null)); } public void testCSVMode() throws IOException { int count = randomIntBetween(0, 100); bulkLoadTestData(count); - var builder = builder().query(fromIndex() + " | keep keyword, integer | limit 100").build(); + var builder = builder().query(fromIndex() + " | keep keyword, integer | limit 100"); assertEquals(expectedTextBody("csv", count, '|'), runEsqlAsTextWithFormat(builder, "csv", '|')); } public void testTSVMode() throws IOException { int count = randomIntBetween(0, 100); bulkLoadTestData(count); - var builder = builder().query(fromIndex() + " | keep keyword, integer | limit 100").build(); + var builder = builder().query(fromIndex() + " | keep keyword, integer | limit 100"); assertEquals(expectedTextBody("tsv", count, null), runEsqlAsTextWithFormat(builder, "tsv", null)); } public void testCSVNoHeaderMode() throws IOException { bulkLoadTestData(1); - var builder = builder().query(fromIndex() + " | keep keyword, integer | limit 100").build(); - Request request = prepareRequest(); - String mediaType = attachBody(builder, request); + var builder = builder().query(fromIndex() + " | keep keyword, integer | limit 100"); + Request request = prepareRequest(SYNC); + String mediaType = attachBody(builder.build(), request); RequestOptions.Builder options = request.getOptions().toBuilder(); options.addHeader("Content-Type", mediaType); options.addHeader("Accept", "text/csv; header=absent"); @@ -246,7 +299,7 @@ public void testWarningHeadersOnFailedConversions() throws IOException { int count = randomFrom(10, 40, 60); bulkLoadTestData(count); - Request request = prepareRequest(); + Request request = prepareRequest(SYNC); var query = fromIndex() + " | eval asInt = to_int(case(integer % 2 == 0, to_str(integer), keyword)) | limit 1000"; var mediaType = attachBody(new RequestObjectBuilder().query(query).build(), request); @@ -288,7 +341,7 @@ public void testMetadataFieldsOnMultipleIndices() throws IOException { assertEquals(201, client().performRequest(request).getStatusLine().getStatusCode()); var query = fromIndex() + "* [metadata _index, _version, _id] | sort _version"; - Map result = runEsql(new RequestObjectBuilder().query(query).build()); + Map result = runEsql(new RequestObjectBuilder().query(query)); var columns = List.of( Map.of("name", "a", "type", "long"), Map.of("name", "_index", "type", "keyword"), @@ -298,12 +351,15 @@ public void testMetadataFieldsOnMultipleIndices() throws IOException { var values = List.of(List.of(3, testIndexName() + "-2", 1, "id-2"), List.of(2, testIndexName() + "-1", 2, "id-1")); assertMap(result, matchesMap().entry("columns", columns).entry("values", values)); + + assertThat(deleteIndex(testIndexName() + "-1").isAcknowledged(), is(true)); // clean up + assertThat(deleteIndex(testIndexName() + "-2").isAcknowledged(), is(true)); // clean up } public void testErrorMessageForEmptyParams() throws IOException { ResponseException re = expectThrows( ResponseException.class, - () -> runEsql(new RequestObjectBuilder().query("row a = 1 | eval x = ?").params("[]").build()) + () -> runEsql(new RequestObjectBuilder().query("row a = 1 | eval x = ?").params("[]")) ); assertThat(EntityUtils.toString(re.getResponse().getEntity()), containsString("Not enough actual parameters 0")); } @@ -311,7 +367,7 @@ public void testErrorMessageForEmptyParams() throws IOException { public void testErrorMessageForInvalidParams() throws IOException { ResponseException re = expectThrows( ResponseException.class, - () -> runEsql(new RequestObjectBuilder().query("row a = 1").params("[{\"x\":\"y\"}]").build()) + () -> runEsql(new RequestObjectBuilder().query("row a = 1").params("[{\"x\":\"y\"}]")) ); assertThat(EntityUtils.toString(re.getResponse().getEntity()), containsString("Required [value, type]")); } @@ -319,7 +375,7 @@ public void testErrorMessageForInvalidParams() throws IOException { public void testErrorMessageForMissingTypeInParams() throws IOException { ResponseException re = expectThrows( ResponseException.class, - () -> runEsql(new RequestObjectBuilder().query("row a = 1").params("[\"x\", 123, true, {\"value\": \"y\"}]").build()) + () -> runEsql(new RequestObjectBuilder().query("row a = 1").params("[\"x\", 123, true, {\"value\": \"y\"}]")) ); assertThat(EntityUtils.toString(re.getResponse().getEntity()), containsString("Required [type]")); } @@ -327,7 +383,7 @@ public void testErrorMessageForMissingTypeInParams() throws IOException { public void testErrorMessageForMissingValueInParams() throws IOException { ResponseException re = expectThrows( ResponseException.class, - () -> runEsql(new RequestObjectBuilder().query("row a = 1").params("[\"x\", 123, true, {\"type\": \"y\"}]").build()) + () -> runEsql(new RequestObjectBuilder().query("row a = 1").params("[\"x\", 123, true, {\"type\": \"y\"}]")) ); assertThat(EntityUtils.toString(re.getResponse().getEntity()), containsString("Required [value]")); } @@ -335,7 +391,7 @@ public void testErrorMessageForMissingValueInParams() throws IOException { public void testErrorMessageForInvalidTypeInParams() throws IOException { ResponseException re = expectThrows( ResponseException.class, - () -> runEsql(new RequestObjectBuilder().query("row a = 1 | eval x = ?").params("[{\"type\": \"byte\", \"value\": 5}]").build()) + () -> runEsqlSync(new RequestObjectBuilder().query("row a = 1 | eval x = ?").params("[{\"type\": \"byte\", \"value\": 5}]")) ); assertThat( EntityUtils.toString(re.getResponse().getEntity()), @@ -369,10 +425,10 @@ public void testErrorMessageForLiteralDateMathOverflowOnNegation() throws IOExce assertExceptionForDateMath("-(-9223372036854775807 second - 1 second)", "Exceeds capacity of Duration"); } - private static void assertExceptionForDateMath(String dateMathString, String errorSubstring) throws IOException { + private void assertExceptionForDateMath(String dateMathString, String errorSubstring) throws IOException { ResponseException re = expectThrows( ResponseException.class, - () -> runEsql(new RequestObjectBuilder().query("row a = 1 | eval x = now() + (" + dateMathString + ")").build()) + () -> runEsql(new RequestObjectBuilder().query("row a = 1 | eval x = now() + (" + dateMathString + ")")) ); String responseMessage = EntityUtils.toString(re.getResponse().getEntity()); @@ -386,7 +442,7 @@ public void testErrorMessageForArrayValuesInParams() throws IOException { ResponseException re = expectThrows( ResponseException.class, () -> runEsql( - new RequestObjectBuilder().query("row a = 1 | eval x = ?").params("[{\"type\": \"integer\", \"value\": [5, 6, 7]}]").build() + new RequestObjectBuilder().query("row a = 1 | eval x = ?").params("[{\"type\": \"integer\", \"value\": [5, 6, 7]}]") ) ); assertThat( @@ -425,12 +481,25 @@ private static String expectedTextBody(String format, int count, @Nullable Chara return sb.toString(); } - public static Map runEsql(RequestObjectBuilder requestObject) throws IOException { - return runEsql(requestObject, List.of()); + public Map runEsql(RequestObjectBuilder requestObject) throws IOException { + return runEsql(requestObject, NO_WARNINGS, mode); } - public static Map runEsql(RequestObjectBuilder requestObject, List expectedWarnings) throws IOException { - Request request = prepareRequest(); + public static Map runEsqlSync(RequestObjectBuilder requestObject) throws IOException { + return runEsqlSync(requestObject, NO_WARNINGS); + } + + static Map runEsql(RequestObjectBuilder requestObject, List expectedWarnings, Mode mode) throws IOException { + if (mode == ASYNC) { + return runEsqlAsync(requestObject, expectedWarnings); + } else { + return runEsqlSync(requestObject, expectedWarnings); + } + } + + public static Map runEsqlSync(RequestObjectBuilder requestObject, List expectedWarnings) throws IOException { + requestObject.build(); + Request request = prepareRequest(SYNC); String mediaType = attachBody(requestObject, request); RequestOptions.Builder options = request.getOptions().toBuilder(); @@ -445,16 +514,132 @@ public static Map runEsql(RequestObjectBuilder requestObject, Li request.setOptions(options); HttpEntity entity = performRequest(request, expectedWarnings); + return entityToMap(entity, requestObject.contentType()); + } + + public static Map runEsqlAsync(RequestObjectBuilder requestObject, List expectedWarnings) throws IOException { + addAsyncParameters(requestObject); + requestObject.build(); + Request request = prepareRequest(ASYNC); + String mediaType = attachBody(requestObject, request); + + RequestOptions.Builder options = request.getOptions().toBuilder(); + options.setWarningsHandler(WarningsHandler.PERMISSIVE); // We assert the warnings ourselves + options.addHeader("Content-Type", mediaType); + + if (randomBoolean()) { + options.addHeader("Accept", mediaType); + } else { + request.addParameter("format", requestObject.contentType().queryParameter()); + } + request.setOptions(options); + + if (shouldLog()) { + LOGGER.info("REQUEST={}", request); + } + + Response response = performRequest(request); + HttpEntity entity = response.getEntity(); + + Object initialColumns = null; + Object initialValues = null; + var json = entityToMap(entity, requestObject.contentType()); + checkKeepOnCompletion(requestObject, json); + String id = (String) json.get("id"); + + if (id == null) { + // no id returned from an async call, must have completed immediately and without keep_on_completion + assertThat(requestObject.keepOnCompletion(), either(nullValue()).or(is(false))); + assertThat((boolean) json.get("is_running"), is(false)); + assertWarnings(response, expectedWarnings); + json.remove("is_running"); // remove this to not mess up later map assertions + return Collections.unmodifiableMap(json); + } else { + // async may not return results immediately, so may need an async get + assertThat(id, is(not(emptyOrNullString()))); + if ((boolean) json.get("is_running") == false) { + // must have completed immediately so keep_on_completion must be true + assertThat(requestObject.keepOnCompletion(), is(true)); + assertWarnings(response, expectedWarnings); + // we already have the results, but let's remember them so that we can compare to async get + initialColumns = json.get("columns"); + initialValues = json.get("values"); + } else { + // did not return results immediately, so we will need an async get + assertThat(json.get("columns"), is(equalTo(List.>of()))); // no partial results + assertThat(json.get("pages"), nullValue()); + } + // issue a second request to "async get" the results + Request getRequest = prepareAsyncGetRequest(id); + getRequest.setOptions(options); + response = performRequest(getRequest); + entity = response.getEntity(); + } + + var result = entityToMap(entity, requestObject.contentType()); + + // assert initial contents, if any, are the same as async get contents + if (initialColumns != null) { + assertEquals(initialColumns, result.get("columns")); + assertEquals(initialValues, result.get("values")); + } + + assertWarnings(response, expectedWarnings); + assertDeletable(id); + return removeAsyncProperties(result); + } + + // Removes async properties, otherwise consuming assertions would need to handle sync and async differences + static Map removeAsyncProperties(Map map) { + Map copy = new HashMap<>(map); + assertFalse((boolean) copy.remove("is_running")); + copy.remove("id"); // id is optional, do not assert its removal + return Collections.unmodifiableMap(copy); + } + + static Map entityToMap(HttpEntity entity, XContentType expectedContentType) throws IOException { try (InputStream content = entity.getContent()) { XContentType xContentType = XContentType.fromMediaType(entity.getContentType().getValue()); - assertEquals(requestObject.contentType(), xContentType); - return XContentHelper.convertToMap(xContentType.xContent(), content, false); + assertEquals(expectedContentType, xContentType); + var map = XContentHelper.convertToMap(xContentType.xContent(), content, false); + if (shouldLog()) { + LOGGER.info("entity={}", map); + } + return map; + } + } + + static void addAsyncParameters(RequestObjectBuilder requestObject) throws IOException { + // deliberately short in order to frequently trigger return without results + requestObject.waitForCompletion(TimeValue.timeValueNanos(randomIntBetween(1, 100))); + requestObject.keepOnCompletion(randomBoolean()); + requestObject.keepAlive(TimeValue.timeValueDays(randomIntBetween(1, 10))); + } + + // If keep_on_completion is set then an id must always be present, regardless of the value of any other property. + static void checkKeepOnCompletion(RequestObjectBuilder requestObject, Map json) { + if (requestObject.keepOnCompletion()) { + assertThat((String) json.get("id"), not(emptyOrNullString())); } } + static void assertDeletable(String id) throws IOException { + var request = prepareAsyncDeleteRequest(id); + performRequest(request); + + // the stored response should no longer be retrievable + ResponseException re = expectThrows(ResponseException.class, () -> deleteNonExistent(request)); + assertThat(EntityUtils.toString(re.getResponse().getEntity()), containsString(id)); + } + + static void deleteNonExistent(Request request) throws IOException { + Response response = client().performRequest(request); + assertEquals(404, response.getStatusLine().getStatusCode()); + } + static String runEsqlAsTextWithFormat(RequestObjectBuilder builder, String format, @Nullable Character delimiter) throws IOException { - Request request = prepareRequest(); - String mediaType = attachBody(builder, request); + Request request = prepareRequest(SYNC); + String mediaType = attachBody(builder.build(), request); RequestOptions.Builder options = request.getOptions().toBuilder(); options.addHeader("Content-Type", mediaType); @@ -477,8 +662,22 @@ static String runEsqlAsTextWithFormat(RequestObjectBuilder builder, String forma return Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8)); } - private static Request prepareRequest() { - Request request = new Request("POST", "/_query"); + private static Request prepareRequest(Mode mode) { + Request request = new Request("POST", "/_query" + (mode == ASYNC ? "/async" : "")); + request.addParameter("error_trace", "true"); // Helps with debugging in case something crazy happens on the server. + request.addParameter("pretty", "true"); // Improves error reporting readability + return request; + } + + private static Request prepareAsyncGetRequest(String id) { + Request request = new Request("GET", "/_query/async/" + id + "?wait_for_completion_timeout=60s"); + request.addParameter("error_trace", "true"); // Helps with debugging in case something crazy happens on the server. + request.addParameter("pretty", "true"); // Improves error reporting readability + return request; + } + + private static Request prepareAsyncDeleteRequest(String id) { + Request request = new Request("DELETE", "/_query/async/" + id); request.addParameter("error_trace", "true"); // Helps with debugging in case something crazy happens on the server. request.addParameter("pretty", "true"); // Improves error reporting readability return request; @@ -493,10 +692,25 @@ private static String attachBody(RequestObjectBuilder requestObject, Request req } private static HttpEntity performRequest(Request request, List allowedWarnings) throws IOException { + return assertWarnings(performRequest(request), allowedWarnings); + } + + private static Response performRequest(Request request) throws IOException { Response response = client().performRequest(request); + if (shouldLog()) { + LOGGER.info("RESPONSE={}", response); + LOGGER.info("RESPONSE headers={}", Arrays.toString(response.getHeaders())); + } assertEquals(200, response.getStatusLine().getStatusCode()); + return response; + } + + private static HttpEntity assertWarnings(Response response, List allowedWarnings) { List warnings = new ArrayList<>(response.getWarnings()); warnings.removeAll(mutedWarnings()); + if (shouldLog()) { + LOGGER.info("RESPONSE warnings (after muted)={}", warnings); + } assertMap(warnings, matchesList(allowedWarnings)); return response.getEntity(); } diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/generative/GenerativeRestTest.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/generative/GenerativeRestTest.java index c341ad26cb7a6..cdec7752aef59 100644 --- a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/generative/GenerativeRestTest.java +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/generative/GenerativeRestTest.java @@ -7,9 +7,12 @@ package org.elasticsearch.xpack.esql.qa.rest.generative; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.ResponseException; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xpack.esql.CsvTestsDataLoader; import org.elasticsearch.xpack.esql.qa.rest.RestEsqlTestCase; +import org.junit.AfterClass; import org.junit.Before; import java.io.IOException; @@ -46,6 +49,18 @@ public void setup() throws IOException { } } + @AfterClass + public static void wipeTestData() throws IOException { + try { + adminClient().performRequest(new Request("DELETE", "/*")); + } catch (ResponseException e) { + // 404 here just means we had no indexes + if (e.getResponse().getStatusLine().getStatusCode() != 404) { + throw e; + } + } + } + public void test() { List indices = availableIndices(); List policies = availableEnrichPolicies(); @@ -81,7 +96,7 @@ private void checkException(EsqlQueryGenerator.QueryExecuted query) { private EsqlQueryGenerator.QueryExecuted execute(String command, int depth) { try { - Map a = RestEsqlTestCase.runEsql(new RestEsqlTestCase.RequestObjectBuilder().query(command).build()); + Map a = RestEsqlTestCase.runEsqlSync(new RestEsqlTestCase.RequestObjectBuilder().query(command).build()); List outputSchema = outputSchema(a); return new EsqlQueryGenerator.QueryExecuted(command, depth, outputSchema, null); } catch (Exception e) { diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvAssert.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvAssert.java index 80a88981cf5cc..49dc585c01753 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvAssert.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvAssert.java @@ -119,7 +119,7 @@ private static void assertMetadata( for (int pageIndex = 0; pageIndex < pages.size(); pageIndex++) { var page = pages.get(pageIndex); var block = page.getBlock(column); - var blockType = Type.asType(block.elementType()); + var blockType = Type.asType(block.elementType(), actualType); if (blockType == Type.LONG && (expectedType == Type.DATETIME @@ -155,7 +155,7 @@ private static void assertMetadata( } static void assertData(ExpectedResults expected, ActualResults actual, boolean ignoreOrder, Logger logger) { - assertData(expected, actual.values(), ignoreOrder, logger, Function.identity()); + assertData(expected, actual.values(), ignoreOrder, logger, v -> v); } public static void assertData( @@ -202,9 +202,9 @@ public static void assertData( if (expectedType == Type.DATETIME) { expectedValue = rebuildExpected(expectedValue, Long.class, x -> UTC_DATE_TIME_FORMATTER.formatMillis((long) x)); } else if (expectedType == Type.GEO_POINT) { - expectedValue = rebuildExpected(expectedValue, Long.class, x -> GEO.longAsPoint((long) x)); + expectedValue = rebuildExpected(expectedValue, BytesRef.class, x -> GEO.wkbAsString((BytesRef) x)); } else if (expectedType == Type.CARTESIAN_POINT) { - expectedValue = rebuildExpected(expectedValue, Long.class, x -> CARTESIAN.longAsPoint((long) x)); + expectedValue = rebuildExpected(expectedValue, BytesRef.class, x -> CARTESIAN.wkbAsString((BytesRef) x)); } else if (expectedType == Type.IP) { // convert BytesRef-packed IP to String, allowing subsequent comparison with what's expected expectedValue = rebuildExpected(expectedValue, BytesRef.class, x -> DocValueFormat.IP.format((BytesRef) x)); diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java index ebe27225becb1..919ef66456230 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java @@ -11,7 +11,9 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.time.DateFormatters; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockUtils; @@ -24,7 +26,7 @@ import org.elasticsearch.core.Tuple; import org.elasticsearch.logging.Logger; import org.elasticsearch.test.VersionUtils; -import org.elasticsearch.xpack.esql.action.EsqlQueryResponse; +import org.elasticsearch.xpack.esql.action.ResponseValueUtils; import org.elasticsearch.xpack.ql.util.StringUtils; import org.supercsv.io.CsvListReader; import org.supercsv.prefs.CsvPreference; @@ -139,6 +141,7 @@ public void close() { CsvColumn[] columns = null; + var blockFactory = BlockFactory.getInstance(new NoopCircuitBreaker("test-noop"), BigArrays.NON_RECYCLING_INSTANCE); try (BufferedReader reader = org.elasticsearch.xpack.ql.TestUtils.reader(source)) { String line; int lineNumber = 1; @@ -178,7 +181,7 @@ public void close() { columns[i] = new CsvColumn( name, type, - BlockUtils.wrapperFor(BlockFactory.getNonBreakingInstance(), ElementType.fromJava(type.clazz()), 8) + BlockUtils.wrapperFor(blockFactory, ElementType.fromJava(type.clazz()), 8) ); } } @@ -388,8 +391,8 @@ public enum Type { Long.class ), BOOLEAN(Booleans::parseBoolean, Boolean.class), - GEO_POINT(x -> x == null ? null : GEO.pointAsLong(GEO.stringAsPoint(x)), Long.class), - CARTESIAN_POINT(x -> x == null ? null : CARTESIAN.pointAsLong(CARTESIAN.stringAsPoint(x)), Long.class); + GEO_POINT(x -> x == null ? null : GEO.stringAsWKB(x), BytesRef.class), + CARTESIAN_POINT(x -> x == null ? null : CARTESIAN.stringAsWKB(x), BytesRef.class); private static final Map LOOKUP = new HashMap<>(); @@ -440,19 +443,27 @@ public static Type asType(String name) { return LOOKUP.get(name.toUpperCase(Locale.ROOT)); } - public static Type asType(ElementType elementType) { + public static Type asType(ElementType elementType, Type actualType) { return switch (elementType) { case INT -> INTEGER; case LONG -> LONG; case DOUBLE -> DOUBLE; case NULL -> NULL; - case BYTES_REF -> KEYWORD; + case BYTES_REF -> bytesRefBlockType(actualType); case BOOLEAN -> BOOLEAN; case DOC -> throw new IllegalArgumentException("can't assert on doc blocks"); case UNKNOWN -> throw new IllegalArgumentException("Unknown block types cannot be handled"); }; } + private static Type bytesRefBlockType(Type actualType) { + if (actualType == GEO_POINT || actualType == CARTESIAN_POINT) { + return actualType; + } else { + return KEYWORD; + } + } + Object convert(String value) { if (value == null) { return null; @@ -477,7 +488,7 @@ record ActualResults( Map> responseHeaders ) { Iterator> values() { - return EsqlQueryResponse.pagesToValues(dataTypes(), pages); + return ResponseValueUtils.pagesToValues(dataTypes(), pages); } } diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java index eca8beb06576b..8edcdd9edb124 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java @@ -86,6 +86,11 @@ public byte[] max(String field, DataType dataType) { public boolean isSingleValue(String field) { return false; } + + @Override + public boolean isIndexed(String field) { + return exists(field); + } } public static final TestSearchStats TEST_SEARCH_STATS = new TestSearchStats(); diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/conditional.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/conditional.csv-spec index ea53ac5679aa9..177e169387642 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/conditional.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/conditional.csv-spec @@ -93,3 +93,37 @@ M |10 M |10 M |10 ; + +docsCaseSuccessRate +// tag::docsCaseSuccessRate[] +FROM sample_data +| EVAL successful = CASE( + STARTS_WITH(message, "Connected to"), 1, + message == "Connection error", 0 + ) +| STATS success_rate = AVG(successful) +// end::docsCaseSuccessRate[] +; + +// tag::docsCaseSuccessRate-result[] +success_rate:double +0.5 +// end::docsCaseSuccessRate-result[] +; + +docsCaseHourlyErrorRate +// tag::docsCaseHourlyErrorRate[] +FROM sample_data +| EVAL error = CASE(message LIKE "*error*", 1, 0) +| EVAL hour = DATE_TRUNC(1 hour, @timestamp) +| STATS error_rate = AVG(error) by hour +| SORT hour +// end::docsCaseHourlyErrorRate[] +; + +// tag::docsCaseHourlyErrorRate-result[] +error_rate:double | hour:date +0.0 |2023-10-23T12:00:00.000Z +0.6 |2023-10-23T13:00:00.000Z +// end::docsCaseHourlyErrorRate-result[] +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec index 591d395661afa..8dd9704fd2d4b 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec @@ -337,6 +337,67 @@ AVG(salary):double | bucket:date // end::auto_bucket_in_agg-result[] ; +evalDateDiffInNanoAndMicroAndMilliSeconds#[skip:-8.12.99, reason:date_diff added in 8.13] +ROW date1=to_datetime("2023-12-02T11:00:00.000Z"), date2=to_datetime("2023-12-02T11:00:00.001Z") +| EVAL dd_ns1=date_diff("nanoseconds", date1, date2), dd_ns2=date_diff("ns", date1, date2) +| EVAL dd_mcs1=date_diff("microseconds", date1, date2), dd_mcs2=date_diff("mcs", date1, date2) +| EVAL dd_ms1=date_diff("milliseconds", date1, date2), dd_ms2=date_diff("ms", date1, date2) +| keep dd_ns1, dd_ns2, dd_mcs1, dd_mcs2, dd_ms1, dd_ms2 +; + +dd_ns1:integer | dd_ns2:integer | dd_mcs1:integer | dd_mcs2:integer | dd_ms1:integer | dd_ms2:integer +1000000 | 1000000 | 1000 | 1000 | 1 | 1 +; + +evalDateDiffInSecondsAndMinutesAndHours#[skip:-8.12.99, reason:date_diff added in 8.13] +ROW date1=to_datetime("2023-12-02T11:00:00.000Z"), date2=to_datetime("2023-12-02T12:00:00.000Z") +| EVAL dd_s1=date_diff("seconds", date1, date2), dd_s2=date_diff("ss", date1, date2), dd_s3=date_diff("s", date1, date2) +| EVAL dd_m1=date_diff("minutes", date1, date2), dd_m2=date_diff("mi", date1, date2), dd_m3=date_diff("n", date1, date2) +| EVAL dd_h1=date_diff("hours", date1, date2), dd_h2=date_diff("hh", date1, date2) +| keep dd_s1, dd_s2, dd_s3, dd_m1, dd_m2, dd_m3, dd_h1, dd_h2 +; + +dd_s1:integer | dd_s2:integer | dd_s3:integer | dd_m1:integer | dd_m2:integer | dd_m3:integer | dd_h1:integer | dd_h2:integer +3600 | 3600 | 3600 | 60 | 60 | 60 | 1 | 1 +; + +evalDateDiffInDaysAndWeeks#[skip:-8.12.99, reason:date_diff added in 8.13] +ROW date1=to_datetime("2023-12-02T11:00:00.000Z"), date2=to_datetime("2023-12-24T11:00:00.000Z") +| EVAL dd_wd1=date_diff("weekdays", date1, date2), dd_wd2=date_diff("dw", date1, date2) +| EVAL dd_w1=date_diff("weeks", date1, date2), dd_w2=date_diff("wk", date1, date2), dd_w3=date_diff("ww", date1, date2) +| EVAL dd_d1=date_diff("dy", date1, date2), dd_d2=date_diff("y", date1, date2) +| EVAL dd_dy1=date_diff("days", date1, date2), dd_dy2=date_diff("dd", date1, date2), dd_dy3=date_diff("d", date1, date2) +| keep dd_wd1, dd_wd2, dd_w1, dd_w2, dd_w3, dd_d1, dd_d2, dd_dy1, dd_dy2, dd_dy3 +; + +dd_wd1:integer | dd_wd2:integer | dd_w1:integer | dd_w2:integer | dd_w3:integer | dd_d1:integer | dd_d2:integer | dd_dy1:integer | dd_dy2:integer | dd_dy3:integer +22 | 22 | 3 | 3 | 3 | 22 | 22 | 22 | 22 | 22 +; + +evalDateDiffInMonthsAndQuartersAndYears#[skip:-8.12.99, reason:date_diff added in 8.13] +ROW date1=to_datetime("2023-12-02T11:00:00.000Z"), date2=to_datetime("2024-12-24T11:00:00.000Z") +| EVAL dd_m1=date_diff("months", date1, date2), dd_m2=date_diff("mm", date1, date2), dd_m3=date_diff("m", date1, date2) +| EVAL dd_q1=date_diff("quarters", date1, date2), dd_q2=date_diff("qq", date1, date2), dd_q3=date_diff("q", date1, date2) +| EVAL dd_y1=date_diff("years", date1, date2), dd_y2=date_diff("yyyy", date1, date2), dd_y3=date_diff("yy", date1, date2) +| keep dd_m1, dd_m2, dd_m3, dd_q1, dd_q2, dd_q3, dd_y1, dd_y2, dd_y3 +; + +dd_m1:integer | dd_m2:integer | dd_m3:integer | dd_q1:integer | dd_q2:integer | dd_q3:integer | dd_y1:integer | dd_y2:integer | dd_y3:integer +12 | 12 | 12 | 4 | 4 | 4 | 1 | 1 | 1 +; + +evalDateDiffErrorOutOfIntegerRange#[skip:-8.12.99, reason:date_diff added in 8.13] +ROW date1=to_datetime("2023-12-02T11:00:00.000Z"), date2=to_datetime("2023-12-23T11:00:00.000Z") +| EVAL dd_oo=date_diff("nanoseconds", date1, date2) +| keep dd_oo +; +warning: Line 2:14: evaluation of [date_diff(\"nanoseconds\", date1, date2)] failed, treating result as null. Only first 20 failures recorded. +warning: Line 2:14: org.elasticsearch.xpack.ql.InvalidArgumentException: [1814400000000000] out of [integer] range + +dd_oo:integer +null +; + evalDateParseWithSimpleDate row a = "2023-02-01" | eval b = date_parse("yyyy-MM-dd", a) | keep b; @@ -567,6 +628,30 @@ dt:datetime |plus:datetime 2100-01-01T01:01:01.001Z |2100-01-01T00:00:00.000Z ; +datePlusNull#[skip:-8.12.99, reason:date math with null enabled in 8.13] +row dt = to_dt("2100-01-01T01:01:01.001Z") +| eval plus_post = dt + null, plus_pre = null + dt; + +dt:datetime |plus_post:datetime |plus_pre:datetime +2100-01-01T01:01:01.001Z |null |null +; + +datePlusNullAndDuration#[skip:-8.12.99, reason:date math with null enabled in 8.13] +row dt = to_dt("2100-01-01T01:01:01.001Z") +| eval plus_post = dt + null + 1 hour, plus_pre = 1 second + null + dt; + +dt:datetime |plus_post:datetime |plus_pre:datetime +2100-01-01T01:01:01.001Z |null |null +; + +datePlusNullAndPeriod#[skip:-8.12.99, reason:date math with null enabled in 8.13] +row dt = to_dt("2100-01-01T01:01:01.001Z") +| eval plus_post = dt + null + 2 years, plus_pre = 3 weeks + null + dt; + +dt:datetime |plus_post:datetime |plus_pre:datetime +2100-01-01T01:01:01.001Z |null |null +; + dateMinusDuration row dt = to_dt("2100-01-01T01:01:01.001Z") | eval minus = dt - 1 hour - 1 minute - 1 second - 1 milliseconds; @@ -600,6 +685,33 @@ then:datetime 1953-04-04T00:00:00.000Z ; +dateMinusNull#[skip:-8.12.99, reason:date math with null enabled in 8.13] +row dt = to_dt("1953-04-04T04:03:02.001Z") +| eval minus = dt - null +; + +dt:datetime |minus:datetime +1953-04-04T04:03:02.001Z |null +; + +dateMinusNullAndPeriod#[skip:-8.12.99, reason:date math with null enabled in 8.13] +row dt = to_dt("1953-04-04T04:03:02.001Z") +| eval minus = dt - null - 4 minutes +; + +dt:datetime |minus:datetime +1953-04-04T04:03:02.001Z |null +; + +dateMinusNullAndDuration#[skip:-8.12.99, reason:date math with null enabled in 8.13] +row dt = to_dt("1953-04-04T04:03:02.001Z") +| eval minus = dt - 6 days - null +; + +dt:datetime |minus:datetime +1953-04-04T04:03:02.001Z |null +; + datePlusPeriodAndDuration row dt = to_dt("2100-01-01T00:00:00.000Z") | eval plus = dt + 4 years + 3 months + 2 weeks + 1 day + 1 hour + 1 minute + 1 second + 1 milliseconds; @@ -726,6 +838,86 @@ birth_date:datetime 1953-04-21T00:00:00.000Z ; +docsAutoBucketMonth +//tag::docsAutoBucketMonth[] +FROM employees +| WHERE hire_date >= "1985-01-01T00:00:00Z" AND hire_date < "1986-01-01T00:00:00Z" +| EVAL month = AUTO_BUCKET(hire_date, 20, "1985-01-01T00:00:00Z", "1986-01-01T00:00:00Z") +| KEEP hire_date, month +| SORT hire_date +//end::docsAutoBucketMonth[] +; + +//tag::docsAutoBucketMonth-result[] + hire_date:date | month:date +1985-02-18T00:00:00.000Z|1985-02-01T00:00:00.000Z +1985-02-24T00:00:00.000Z|1985-02-01T00:00:00.000Z +1985-05-13T00:00:00.000Z|1985-05-01T00:00:00.000Z +1985-07-09T00:00:00.000Z|1985-07-01T00:00:00.000Z +1985-09-17T00:00:00.000Z|1985-09-01T00:00:00.000Z +1985-10-14T00:00:00.000Z|1985-10-01T00:00:00.000Z +1985-10-20T00:00:00.000Z|1985-10-01T00:00:00.000Z +1985-11-19T00:00:00.000Z|1985-11-01T00:00:00.000Z +1985-11-20T00:00:00.000Z|1985-11-01T00:00:00.000Z +1985-11-20T00:00:00.000Z|1985-11-01T00:00:00.000Z +1985-11-21T00:00:00.000Z|1985-11-01T00:00:00.000Z +//end::docsAutoBucketMonth-result[] +; + +docsAutoBucketMonthlyHistogram +//tag::docsAutoBucketMonthlyHistogram[] +FROM employees +| WHERE hire_date >= "1985-01-01T00:00:00Z" AND hire_date < "1986-01-01T00:00:00Z" +| EVAL month = AUTO_BUCKET(hire_date, 20, "1985-01-01T00:00:00Z", "1986-01-01T00:00:00Z") +| STATS hires_per_month = COUNT(*) BY month +| SORT month +//end::docsAutoBucketMonthlyHistogram[] +; + +//tag::docsAutoBucketMonthlyHistogram-result[] + hires_per_month:long | month:date +2 |1985-02-01T00:00:00.000Z +1 |1985-05-01T00:00:00.000Z +1 |1985-07-01T00:00:00.000Z +1 |1985-09-01T00:00:00.000Z +2 |1985-10-01T00:00:00.000Z +4 |1985-11-01T00:00:00.000Z +//end::docsAutoBucketMonthlyHistogram-result[] +; + +docsAutoBucketWeeklyHistogram +//tag::docsAutoBucketWeeklyHistogram[] +FROM employees +| WHERE hire_date >= "1985-01-01T00:00:00Z" AND hire_date < "1986-01-01T00:00:00Z" +| EVAL week = AUTO_BUCKET(hire_date, 100, "1985-01-01T00:00:00Z", "1986-01-01T00:00:00Z") +| STATS hires_per_week = COUNT(*) BY week +| SORT week +//end::docsAutoBucketWeeklyHistogram[] +; + +//tag::docsAutoBucketWeeklyHistogram-result[] + hires_per_week:long | week:date +2 |1985-02-18T00:00:00.000Z +1 |1985-05-13T00:00:00.000Z +1 |1985-07-08T00:00:00.000Z +1 |1985-09-16T00:00:00.000Z +2 |1985-10-14T00:00:00.000Z +4 |1985-11-18T00:00:00.000Z +//end::docsAutoBucketWeeklyHistogram-result[] +; + +docsAutoBucketLast24hr +//tag::docsAutoBucketLast24hr[] +FROM sample_data +| WHERE @timestamp >= NOW() - 1 day and @timestamp < NOW() +| EVAL bucket = AUTO_BUCKET(@timestamp, 25, DATE_FORMAT(NOW() - 1 day), DATE_FORMAT(NOW())) +| STATS COUNT(*) BY bucket +//end::docsAutoBucketLast24hr[] +; + + COUNT(*):long | bucket:date +; + docsGettingStartedAutoBucket // tag::gs-auto_bucket[] FROM sample_data @@ -767,3 +959,92 @@ median_duration:double | bucket:date 3107561.0 |2023-10-23T12:00:00.000Z 1756467.0 |2023-10-23T13:00:00.000Z ; + +dateExtract +// tag::dateExtract[] +ROW date = DATE_PARSE("yyyy-MM-dd", "2022-05-06") +| EVAL year = DATE_EXTRACT("year", date) +// end::dateExtract[] +; + +// tag::dateExtract-result[] +date:date | year:long +2022-05-06T00:00:00.000Z | 2022 +// end::dateExtract-result[] +; + +docsDateExtractBusinessHours +// tag::docsDateExtractBusinessHours[] +FROM sample_data +| WHERE DATE_EXTRACT("hour_of_day", @timestamp) < 9 AND DATE_EXTRACT("hour_of_day", @timestamp) >= 17 +// end::docsDateExtractBusinessHours[] +; + +// tag::docsDateExtractBusinessHours-result[] +@timestamp:date | client_ip:ip |event_duration:long | message:keyword +// end::docsDateExtractBusinessHours-result[] +; + +docsDateFormat +// tag::docsDateFormat[] +FROM employees +| KEEP first_name, last_name, hire_date +| EVAL hired = DATE_FORMAT("YYYY-MM-dd", hire_date) +// end::docsDateFormat[] +| SORT first_name +| LIMIT 3 +; + +// tag::docsDateFormat-result[] +first_name:keyword | last_name:keyword | hire_date:date | hired:keyword +Alejandro |McAlpine |1991-06-26T00:00:00.000Z|1991-06-26 +Amabile |Gomatam |1992-11-18T00:00:00.000Z|1992-11-18 +Anneke |Preusig |1989-06-02T00:00:00.000Z|1989-06-02 +// end::docsDateFormat-result[] +; + +docsDateTrunc +// tag::docsDateTrunc[] +FROM employees +| KEEP first_name, last_name, hire_date +| EVAL year_hired = DATE_TRUNC(1 year, hire_date) +// end::docsDateTrunc[] +| SORT first_name +| LIMIT 3 +; + +// tag::docsDateTrunc-result[] +first_name:keyword | last_name:keyword | hire_date:date | year_hired:date +Alejandro |McAlpine |1991-06-26T00:00:00.000Z|1991-01-01T00:00:00.000Z +Amabile |Gomatam |1992-11-18T00:00:00.000Z|1992-01-01T00:00:00.000Z +Anneke |Preusig |1989-06-02T00:00:00.000Z|1989-01-01T00:00:00.000Z +// end::docsDateTrunc-result[] +; + +docsDateTruncHistogram +// tag::docsDateTruncHistogram[] +FROM employees +| EVAL year = DATE_TRUNC(1 year, hire_date) +| STATS hires = COUNT(emp_no) BY year +| SORT year +// end::docsDateTruncHistogram[] +; + +// tag::docsDateTruncHistogram-result[] +hires:long | year:date +11 |1985-01-01T00:00:00.000Z +11 |1986-01-01T00:00:00.000Z +15 |1987-01-01T00:00:00.000Z +9 |1988-01-01T00:00:00.000Z +13 |1989-01-01T00:00:00.000Z +12 |1990-01-01T00:00:00.000Z +6 |1991-01-01T00:00:00.000Z +8 |1992-01-01T00:00:00.000Z +3 |1993-01-01T00:00:00.000Z +4 |1994-01-01T00:00:00.000Z +5 |1995-01-01T00:00:00.000Z +1 |1996-01-01T00:00:00.000Z +1 |1997-01-01T00:00:00.000Z +1 |1999-01-01T00:00:00.000Z +// end::docsDateTruncHistogram-result[] +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec index 95da19e38a05d..3fe19942bdfaa 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec @@ -18,38 +18,6 @@ FROM employees avg_worked_seconds:long | birth_date:date | emp_no:integer | first_name:keyword | gender:keyword | hire_date:date | is_rehired:boolean | job_positions:keyword | languages:integer | languages.byte:integer | languages.long:long | languages.short:integer | last_name:keyword | salary:integer | salary_change:double | salary_change.int:integer |salary_change.keyword:keyword |salary_change.long:long | still_hired:boolean ; -docsEval -// tag::eval[] -FROM employees -| SORT emp_no -| KEEP first_name, last_name, height -| EVAL height_feet = height * 3.281, height_cm = height * 100 -// end::eval[] -| WHERE first_name == "Georgi" -| LIMIT 1; - -// tag::eval-result[] -first_name:keyword | last_name:keyword | height:double | height_feet:double | height_cm:double -Georgi |Facello | 2.03 | 6.66043 | 202.99999999999997 -// end::eval-result[] -; - -docsEvalReplace -// tag::evalReplace[] -FROM employees -| SORT emp_no -| KEEP first_name, last_name, height -| EVAL height = height * 3.281 -// end::evalReplace[] -| WHERE first_name == "Georgi" -| LIMIT 1; - -// tag::evalReplace-result[] -first_name:keyword | last_name:keyword | height:double -Georgi | Facello | 6.66043 -// end::evalReplace-result[] -; - docsLimit // tag::limit[] FROM employees @@ -187,67 +155,6 @@ null |Lortz |1.53 null |Brender |1.55 ; -docsStats -// tag::stats[] -FROM employees -| STATS count = COUNT(emp_no) BY languages -| SORT languages -// end::stats[] -; - -// tag::stats-result[] - count:long | languages:integer -15 |1 -19 |2 -17 |3 -18 |4 -21 |5 -10 |null -// end::stats-result[] -; - -docsStatsWithoutBy -// tag::statsWithoutBy[] -FROM employees -| STATS avg_lang = AVG(languages) -// end::statsWithoutBy[] -; - -// tag::statsWithoutBy-result[] -avg_lang:double -3.1222222222222222 -// end::statsWithoutBy-result[] -; - -docsStatsMultiple -// tag::statsCalcMultipleValues[] -FROM employees -| STATS avg_lang = AVG(languages), max_lang = MAX(languages) -// end::statsCalcMultipleValues[] -; - -avg_lang:double | max_lang:integer -3.1222222222222222|5 -; - -docsStatsGroupByMultipleValues -// tag::statsGroupByMultipleValues[] -FROM employees -| EVAL hired = DATE_FORMAT("YYYY", hire_date) -| STATS avg_salary = AVG(salary) BY hired, languages.long -| EVAL avg_salary = ROUND(avg_salary) -| SORT hired, languages.long -// end::statsGroupByMultipleValues[] -| LIMIT 4 -; - -hired:keyword |languages.long:long | avg_salary:double -1985 |1 |54668.0 -1985 |3 |47723.0 -1985 |4 |44817.0 -1985 |5 |47720.0 -; - docsWhere // tag::where[] FROM employees @@ -290,20 +197,6 @@ Udi |Jansch |1.93 Uri |Lenart |1.75 ; - -dateExtract -// tag::dateExtract[] -ROW date = DATE_PARSE("yyyy-MM-dd", "2022-05-06") -| EVAL year = DATE_EXTRACT("year", date) -// end::dateExtract[] -; - -// tag::dateExtract-result[] -date:date | year:long -2022-05-06T00:00:00.000Z | 2022 -// end::dateExtract-result[] -; - docsSubstring // tag::substring[] FROM employees diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec index e6486960c7e04..e4f1726b3e1ff 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec @@ -222,10 +222,28 @@ emp_no:integer | foldable:keyword | folded_mv:keyword 10002 | "foo,bar" | [foo, bar] ; +docsConcat +// tag::docsConcat[] +FROM employees +| KEEP first_name, last_name +| EVAL fullname = CONCAT(first_name, " ", last_name) +// end::docsConcat[] +| SORT first_name +| LIMIT 3 +; + +// tag::docsConcat-result[] +first_name:keyword | last_name:keyword | fullname:keyword +Alejandro |McAlpine |Alejandro McAlpine +Amabile |Gomatam |Amabile Gomatam +Anneke |Preusig |Anneke Preusig +// end::docsConcat-result[] +; + docsGettingStartedEval // tag::gs-eval[] FROM sample_data -| EVAL duration_ms = event_duration / 1000000.0 +| EVAL duration_ms = event_duration/1000000.0 // end::gs-eval[] | LIMIT 0 ; @@ -236,10 +254,98 @@ FROM sample_data docsGettingStartedRound // tag::gs-round[] FROM sample_data -| EVAL duration_ms = ROUND(event_duration / 1000000.0, 1) +| EVAL duration_ms = ROUND(event_duration/1000000.0, 1) // end::gs-round[] | LIMIT 0 ; @timestamp:date | client_ip:ip | event_duration:long | message:keyword | duration_ms:double ; + +docsGettingStartedEvalNoColumnName +// tag::gs-eval-no-column-name[] +FROM sample_data +| EVAL event_duration/1000000.0 +// end::gs-eval-no-column-name[] +| LIMIT 0 +; + +@timestamp:date | client_ip:ip | event_duration:long | message:keyword | event_duration/1000000.0:double +; + +docsGettingStartedEvalStatsBackticks +// tag::gs-eval-stats-backticks[] +FROM sample_data +| EVAL event_duration/1000000.0 +| STATS MEDIAN(`event_duration/1000000.0`) +// end::gs-eval-stats-backticks[] +; + +MEDIAN(`event_duration/1000000.0`):double +2.764889 +; + +docsEval +// tag::eval[] +FROM employees +| SORT emp_no +| KEEP first_name, last_name, height +| EVAL height_feet = height * 3.281, height_cm = height * 100 +// end::eval[] +| LIMIT 3; + +// tag::eval-result[] +first_name:keyword | last_name:keyword | height:double | height_feet:double | height_cm:double +Georgi |Facello |2.03 |6.66043 |202.99999999999997 +Bezalel |Simmel |2.08 |6.82448 |208.0 +Parto |Bamford |1.83 |6.004230000000001 |183.0 +// end::eval-result[] +; + +docsEvalReplace +// tag::evalReplace[] +FROM employees +| SORT emp_no +| KEEP first_name, last_name, height +| EVAL height = height * 3.281 +// end::evalReplace[] +| LIMIT 3; + +// tag::evalReplace-result[] +first_name:keyword | last_name:keyword | height:double +Georgi |Facello |6.66043 +Bezalel |Simmel |6.82448 +Parto |Bamford |6.004230000000001 +// end::evalReplace-result[] +; + +docsEvalUnnamedColumn +// tag::evalUnnamedColumn[] +FROM employees +| SORT emp_no +| KEEP first_name, last_name, height +| EVAL height * 3.281 +// end::evalUnnamedColumn[] +| LIMIT 3; + +// tag::evalUnnamedColumn-result[] +first_name:keyword | last_name:keyword | height:double | height*3.281:double +Georgi |Facello |2.03 |6.66043 +Bezalel |Simmel |2.08 |6.82448 +Parto |Bamford |1.83 |6.004230000000001 +// end::evalUnnamedColumn-result[] +; + +docsEvalUnnamedColumnStats +// tag::evalUnnamedColumnStats[] +FROM employees +| EVAL height * 3.281 +| STATS avg_height_feet = AVG(`height*3.281`) +// end::evalUnnamedColumnStats[] +; + +// tag::evalUnnamedColumnStats-result[] +avg_height_feet:double +5.801464200000001 +// end::evalUnnamedColumnStats-result[] +; \ No newline at end of file diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec index 887d931f4cd5c..baf6da2cd0bde 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec @@ -410,6 +410,30 @@ hire_date:date | salary:integer | bs:double // end::auto_bucket-result[] ; +docsAutoBucketNumeric +//tag::docsAutoBucketNumeric[] +FROM employees +| EVAL bs = AUTO_BUCKET(salary, 20, 25324, 74999) +| STATS COUNT(*) by bs +| SORT bs +//end::docsAutoBucketNumeric[] +; + +//tag::docsAutoBucketNumeric-result[] + COUNT(*):long | bs:double +9 |25000.0 +9 |30000.0 +18 |35000.0 +11 |40000.0 +11 |45000.0 +10 |50000.0 +7 |55000.0 +9 |60000.0 +8 |65000.0 +8 |70000.0 +//end::docsAutoBucketNumeric-result[] +; + cos ROW a=2 | EVAL cos=COS(a); diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec index 02e9db6ededf1..0b2ce54d5fd22 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec @@ -178,14 +178,21 @@ eth2 |epsilon |[fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece ; cdirMatchMultipleArgs#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] -from hosts | where cidr_match(ip1, "127.0.0.2/32", "127.0.0.3/32") | keep card, host, ip0, ip1; +//tag::cdirMatchMultipleArgs[] +FROM hosts +| WHERE CIDR_MATCH(ip1, "127.0.0.2/32", "127.0.0.3/32") +| KEEP card, host, ip0, ip1 +//end::cdirMatchMultipleArgs[] +; ignoreOrder:true -warning:Line 1:20: evaluation of [cidr_match(ip1, \"127.0.0.2/32\", \"127.0.0.3/32\")] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:20: java.lang.IllegalArgumentException: single-value function encountered multi-value +warning:Line 2:9: evaluation of [CIDR_MATCH(ip1, \"127.0.0.2/32\", \"127.0.0.3/32\")] failed, treating result as null. Only first 20 failures recorded. +warning:Line 2:9: java.lang.IllegalArgumentException: single-value function encountered multi-value +//tag::cdirMatchMultipleArgs-result[] card:keyword |host:keyword |ip0:ip |ip1:ip eth1 |beta |127.0.0.1 |127.0.0.2 eth0 |gamma |fe80::cae2:65ff:fece:feb9|127.0.0.3 +//end::cdirMatchMultipleArgs-result[] ; cidrMatchFunctionArg#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/math.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/math.csv-spec index daf153051bb89..31b9d6101d2c5 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/math.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/math.csv-spec @@ -1118,3 +1118,34 @@ emp_no:integer | min_plus_max:integer | are_equal:boolean 10004 | 7 | false 10005 | 3 | false ; + +docsAbs +//tag::docsAbs[] +ROW number = -1.0 +| EVAL abs_number = ABS(number) +//end::docsAbs[] +; + +//tag::docsAbs-result[] +number:double | abs_number:double +-1.0 |1.0 +//end::docsAbs-result[] +; + +docsAbsEmployees +//tag::docsAbsEmployees[] +FROM employees +| KEEP first_name, last_name, height +| EVAL abs_height = ABS(0.0 - height) +//end::docsAbsEmployees[] +| SORT first_name +| LIMIT 3 +; + +//tag::docsAbsEmployees-result[] +first_name:keyword | last_name:keyword | height:double | abs_height:double +Alejandro |McAlpine |1.48 |1.48 +Amabile |Gomatam |2.09 |2.09 +Anneke |Preusig |1.56 |1.56 +//end::docsAbsEmployees-result[] +; \ No newline at end of file diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec index 083bd1eaf8417..21d9c21191f77 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec @@ -26,6 +26,7 @@ cos |"double cos(n:integer|long|double|unsigned_long)" cosh |"double cosh(n:integer|long|double|unsigned_long)" |n |"integer|long|double|unsigned_long" | "The number who's hyperbolic cosine is to be returned" |double | "Returns the hyperbolic cosine of a number" | false | false count |? count(arg1:?) |arg1 |? | "" |? | "" | false | false count_distinct |? count_distinct(arg1:?, arg2:?) |[arg1, arg2] |[?, ?] |["", ""] |? | "" | [false, false] | false +date_diff |"integer date_diff(unit:keyword|text, startTimestamp:date, endTimestamp:date)"|[unit, startTimestamp, endTimestamp] |["keyword|text", "date", "date"] |["A valid date unit", "A string representing a start timestamp", "A string representing an end timestamp"] |integer | "Subtract 2 dates and return their difference in multiples of a unit specified in the 1st argument" | [false, false, false] | false date_extract |? date_extract(arg1:?, arg2:?) |[arg1, arg2] |[?, ?] |["", ""] |? | "" | [false, false] | false date_format |? date_format(arg1:?, arg2:?) |[arg1, arg2] |[?, ?] |["", ""] |? | "" | [false, false] | false date_parse |"date date_parse(?datePattern:keyword, dateString:keyword|text)"|[datePattern, dateString]|["keyword", "keyword|text"]|[A valid date pattern, A string representing a date]|date |Parses a string into a date value | [true, false] | false @@ -34,9 +35,9 @@ e |? e() ends_with |? ends_with(arg1:?, arg2:?) |[arg1, arg2] |[?, ?] |["", ""] |? | "" | [false, false] | false floor |"? floor(n:integer|long|double|unsigned_long)" |n |"integer|long|double|unsigned_long" | "" |? | "" | false | false greatest |"? greatest(first:integer|long|double|boolean|keyword|text|ip|version, rest...:integer|long|double|boolean|keyword|text|ip|version)" |[first, rest] |["integer|long|double|boolean|keyword|text|ip|version", "integer|long|double|boolean|keyword|text|ip|version"] |["", ""] |? | "" | [false, false] | true -is_finite |? is_finite(arg1:?) |arg1 |? | "" |? | "" | false | false -is_infinite |? is_infinite(arg1:?) |arg1 |? | "" |? | "" | false | false -is_nan |? is_nan(arg1:?) |arg1 |? | "" |? | "" | false | false +is_finite |boolean is_finite(n:double) |n |double | "A floating-point value" |boolean | "Returns true if the argument is a finite floating-point value." | false | false +is_infinite |boolean is_infinite(n:double) |n |double | "A floating-point value" |boolean | "Returns true if the specified floating-point value is infinitely large in magnitude." | false | false +is_nan |boolean is_nan(n:double) |n |double | "A floating-point value" |boolean | "Returns true if the argument is a Not-a-Number (NaN) value." | false | false least |"? least(first:integer|long|double|boolean|keyword|text|ip|version, rest...:integer|long|double|boolean|keyword|text|ip|version)" |[first, rest] |["integer|long|double|boolean|keyword|text|ip|version", "integer|long|double|boolean|keyword|text|ip|version"] |["", ""] |? | "" | [false, false] | true left |"? left(string:keyword, length:integer)" |[string, length] |["keyword", "integer"] |["", ""] |? | "" | [false, false] | false length |? length(arg1:?) |arg1 |? | "" |? | "" | false | false @@ -50,6 +51,8 @@ mv_avg |? mv_avg(arg1:?) mv_concat |"keyword mv_concat(v:text|keyword, delim:text|keyword)" |[v, delim] |["text|keyword", "text|keyword"] |["values to join", "delimiter"] |keyword | "Reduce a multivalued string field to a single valued field by concatenating all values." | [false, false] | false mv_count |"integer mv_count(v:unsigned_long|date|boolean|double|ip|text|integer|keyword|version|long|geo_point|cartesian_point)" |v | "unsigned_long|date|boolean|double|ip|text|integer|keyword|version|long|geo_point|cartesian_point" | "" | integer | "Reduce a multivalued field to a single valued field containing the count of values." | false | false mv_dedupe |"? mv_dedupe(v:boolean|date|double|ip|text|integer|keyword|version|long)" |v | "boolean|date|double|ip|text|integer|keyword|version|long" | "" |? | "Remove duplicate values from a multivalued field." | false | false +mv_first |"? mv_first(v:unsigned_long|date|boolean|double|ip|text|integer|keyword|version|long|geo_point|cartesian_point)" |v | "unsigned_long|date|boolean|double|ip|text|integer|keyword|version|long|geo_point|cartesian_point" | "" |? | "Reduce a multivalued field to a single valued field containing the first value." | false | false +mv_last |"? mv_last(v:unsigned_long|date|boolean|double|ip|text|integer|keyword|version|long|geo_point|cartesian_point)" |v | "unsigned_long|date|boolean|double|ip|text|integer|keyword|version|long|geo_point|cartesian_point" | "" |? | "Reduce a multivalued field to a single valued field containing the last value." | false | false mv_max |"? mv_max(v:unsigned_long|date|boolean|double|ip|text|integer|keyword|version|long)" |v | "unsigned_long|date|boolean|double|ip|text|integer|keyword|version|long" | "" |? | "Reduce a multivalued field to a single valued field containing the maximum value." | false | false mv_median |? mv_median(arg1:?) |arg1 |? | "" |? | "" | false | false mv_min |"? mv_min(v:unsigned_long|date|boolean|double|ip|text|integer|keyword|version|long)" |v | "unsigned_long|date|boolean|double|ip|text|integer|keyword|version|long" | "" |? | "Reduce a multivalued field to a single valued field containing the minimum value." | false | false @@ -74,13 +77,13 @@ tanh |"double tanh(n:integer|long|double|unsigned_long)" tau |? tau() | null | null | null |? | "" | null | false to_bool |"boolean to_bool(v:boolean|keyword|text|double|long|unsigned_long|integer)" |v |"boolean|keyword|text|double|long|unsigned_long|integer" | |boolean | |false |false to_boolean |"boolean to_boolean(v:boolean|keyword|text|double|long|unsigned_long|integer)" |v |"boolean|keyword|text|double|long|unsigned_long|integer" | |boolean | |false |false -to_cartesianpoint |? to_cartesianpoint(arg1:?) |arg1 |? | "" |? | "" | false | false +to_cartesianpoint |"cartesian_point to_cartesianpoint(v:cartesian_point|long|keyword|text)" |v |"cartesian_point|long|keyword|text" | |cartesian_point | |false |false to_datetime |"date to_datetime(v:date|keyword|text|double|long|unsigned_long|integer)" |v |"date|keyword|text|double|long|unsigned_long|integer" | |date | |false |false to_dbl |"double to_dbl(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" |v |"boolean|date|keyword|text|double|long|unsigned_long|integer" | |double | |false |false to_degrees |"double to_degrees(v:double|long|unsigned_long|integer)" |v |"double|long|unsigned_long|integer" | |double | |false |false to_double |"double to_double(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" |v |"boolean|date|keyword|text|double|long|unsigned_long|integer" | |double | |false |false to_dt |"date to_dt(v:date|keyword|text|double|long|unsigned_long|integer)" |v |"date|keyword|text|double|long|unsigned_long|integer" | |date | |false |false -to_geopoint |? to_geopoint(arg1:?) |arg1 |? | "" |? | "" | false | false +to_geopoint |"geo_point to_geopoint(v:geo_point|long|keyword|text)" |v |"geo_point|long|keyword|text" | |geo_point | |false |false to_int |"integer to_int(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" |v |"boolean|date|keyword|text|double|long|unsigned_long|integer" | |integer | |false |false to_integer |"integer to_integer(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" |v |"boolean|date|keyword|text|double|long|unsigned_long|integer" | |integer | |false |false to_ip |"ip to_ip(v:ip|keyword|text)" |v |"ip|keyword|text" | |ip | |false |false @@ -97,7 +100,7 @@ trim |"keyword|text trim(str:keyword|text)" ; -showFunctionsSynopsis#[skip:-8.11.99] +showFunctionsSynopsis#[skip:-8.12.99] show functions | keep synopsis; synopsis:keyword @@ -117,6 +120,7 @@ synopsis:keyword "double cosh(n:integer|long|double|unsigned_long)" ? count(arg1:?) ? count_distinct(arg1:?, arg2:?) +"integer date_diff(unit:keyword|text, startTimestamp:date, endTimestamp:date)" ? date_extract(arg1:?, arg2:?) ? date_format(arg1:?, arg2:?) "date date_parse(?datePattern:keyword, dateString:keyword|text)" @@ -125,9 +129,9 @@ synopsis:keyword ? ends_with(arg1:?, arg2:?) "? floor(n:integer|long|double|unsigned_long)" "? greatest(first:integer|long|double|boolean|keyword|text|ip|version, rest...:integer|long|double|boolean|keyword|text|ip|version)" -? is_finite(arg1:?) -? is_infinite(arg1:?) -? is_nan(arg1:?) +boolean is_finite(n:double) +boolean is_infinite(n:double) +boolean is_nan(n:double) "? least(first:integer|long|double|boolean|keyword|text|ip|version, rest...:integer|long|double|boolean|keyword|text|ip|version)" ? left(string:keyword, length:integer) ? length(arg1:?) @@ -141,6 +145,8 @@ synopsis:keyword "keyword mv_concat(v:text|keyword, delim:text|keyword)" "integer mv_count(v:unsigned_long|date|boolean|double|ip|text|integer|keyword|version|long|geo_point|cartesian_point)" "? mv_dedupe(v:boolean|date|double|ip|text|integer|keyword|version|long)" +"? mv_first(v:unsigned_long|date|boolean|double|ip|text|integer|keyword|version|long|geo_point|cartesian_point)" +"? mv_last(v:unsigned_long|date|boolean|double|ip|text|integer|keyword|version|long|geo_point|cartesian_point)" "? mv_max(v:unsigned_long|date|boolean|double|ip|text|integer|keyword|version|long)" ? mv_median(arg1:?) "? mv_min(v:unsigned_long|date|boolean|double|ip|text|integer|keyword|version|long)" @@ -165,13 +171,13 @@ synopsis:keyword ? tau() "boolean to_bool(v:boolean|keyword|text|double|long|unsigned_long|integer)" "boolean to_boolean(v:boolean|keyword|text|double|long|unsigned_long|integer)" -? to_cartesianpoint(arg1:?) +"cartesian_point to_cartesianpoint(v:cartesian_point|long|keyword|text)" "date to_datetime(v:date|keyword|text|double|long|unsigned_long|integer)" "double to_dbl(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" "double to_degrees(v:double|long|unsigned_long|integer)" "double to_double(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" "date to_dt(v:date|keyword|text|double|long|unsigned_long|integer)" -? to_geopoint(arg1:?) +"geo_point to_geopoint(v:geo_point|long|keyword|text)" "integer to_int(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" "integer to_integer(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" "ip to_ip(v:ip|keyword|text)" @@ -188,7 +194,7 @@ synopsis:keyword ; -showFunctionsFiltered +showFunctionsFiltered#[skip:-8.12.99] // tag::showFunctionsFiltered[] SHOW functions | WHERE STARTS_WITH(name, "is_") @@ -197,17 +203,17 @@ SHOW functions // tag::showFunctionsFiltered-result[] name:keyword | synopsis:keyword | argNames:keyword | argTypes:keyword | argDescriptions:keyword | returnType:keyword | description:keyword | optionalArgs:boolean | variadic:boolean -is_finite |? is_finite(arg1:?) |arg1 |? | "" |? | "" | false | false -is_infinite |? is_infinite(arg1:?) |arg1 |? | "" |? | "" | false | false -is_nan |? is_nan(arg1:?) |arg1 |? | "" |? | "" | false | false +is_finite |boolean is_finite(n:double) |n |double | "A floating-point value" |boolean | "Returns true if the argument is a finite floating-point value." | false | false +is_infinite |boolean is_infinite(n:double) |n |double | "A floating-point value" |boolean | "Returns true if the specified floating-point value is infinitely large in magnitude." | false | false +is_nan |boolean is_nan(n:double) |n |double | "A floating-point value" |boolean | "Returns true if the argument is a Not-a-Number (NaN) value." | false | false // end::showFunctionsFiltered-result[] ; // see https://github.com/elastic/elasticsearch/issues/102120 -countFunctions#[skip:-8.11.99] +countFunctions#[skip:-8.12.99] show functions | stats a = count(*), b = count(*), c = count(*) | mv_expand c; a:long | b:long | c:long -84 | 84 | 84 +87 | 87 | 87 ; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec index e5039377b4a40..f285c49808c77 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec @@ -1,12 +1,27 @@ -convertFromLong#[skip:-8.11.99, reason:spatial type geo_point only added in 8.12] +############################################### +# Tests for GEO_POINT type +# + +# This first test is only here to verify type support in older versions, all other tests are for newer versions +convertToAndFromLong#[skip:-8.11.99, reason:spatial type geo_point only added in 8.12] +row long = 1512146573982606908 +| eval pt = to_geopoint(long) +| eval l = to_long(pt) +| keep long, l; + +long:long |l:long +1512146573982606908 |1512146573982606908 +; + +convertFromLong#[skip:-8.12.99, reason:spatial type geo_point improved precision in 8.13] row long = 1512146573982606908 | eval pt = to_geopoint(long); long:long |pt:geo_point -1512146573982606908 |POINT(42.97109630194 14.7552534413725) +1512146573982606908 |POINT(42.97109629958868 14.7552534006536) ; -convertFromString#[skip:-8.11.99, reason:spatial type geo_point only added in 8.12] +convertFromString#[skip:-8.12.99, reason:spatial type geo_point improved precision in 8.13] // tag::to_geopoint-str[] row wkt = "POINT(42.97109630194 14.7552534413725)" | eval pt = to_geopoint(wkt) @@ -19,15 +34,15 @@ wkt:keyword |pt:geo_point // end::to_geopoint-str-result[] ; -convertFromLongArray#[skip:-8.11.99, reason:spatial type geo_point only added in 8.12] +convertFromLongArray#[skip:-8.12.99, reason:spatial type geo_point improved precision in 8.13] row long = [1512146573982606908, 2329183180959557901] | eval pt = to_geopoint(long); long:long |pt:geo_point -[1512146573982606908, 2329183180959557901] |[POINT(42.97109630194 14.7552534413725), POINT(75.8092915005895 22.727749187571)] +[1512146573982606908, 2329183180959557901] |[POINT(42.97109629958868 14.7552534006536), POINT(75.80929149873555 22.72774917539209)] ; -convertFromStringArray#[skip:-8.11.99, reason:spatial type geo_point only added in 8.12] +convertFromStringArray#[skip:-8.12.99, reason:spatial type geo_point improved precision in 8.13] row wkt = ["POINT(42.97109630194 14.7552534413725)", "POINT(75.8092915005895 22.727749187571)"] | eval pt = to_geopoint(wkt); @@ -35,33 +50,100 @@ wkt:keyword ["POINT(42.97109630194 14.7552534413725)", "POINT(75.8092915005895 22.727749187571)"] |[POINT(42.97109630194 14.7552534413725), POINT(75.8092915005895 22.727749187571)] ; -simpleLoad#[skip:-8.11.99, reason:spatial type geo_point only added in 8.12] +simpleLoad#[skip:-8.12.99, reason:spatial type geo_point improved precision in 8.13] FROM airports | WHERE scalerank == 9 | SORT abbrev | WHERE length(name) > 12; -abbrev:keyword | location:geo_point | name:text | scalerank:i | type:k -CJJ | POINT(127.49591611325741 36.72202274668962) | Cheongju Int'l | 9 | major -HOD | POINT(42.97109629958868 14.7552534006536) | Hodeidah Int'l | 9 | mid -IDR | POINT(75.80929149873555 22.72774917539209) | Devi Ahilyabai Holkar Int'l | 9 | mid -IXC | POINT(76.80172610096633 30.6707248929888) | Chandigarh Int'l | 9 | [major, military] -LYP | POINT(72.98781909048557 31.362743536010385) | Faisalabad Int'l | 9 | [mid, military] -MLG | POINT(112.71141858771443 -7.9299800377339125)| Abdul Rachman Saleh | 9 | [mid, military] -OMS | POINT(73.3163595199585 54.95764828752726) | Omsk Tsentralny | 9 | mid -OVB | POINT(82.6671524439007 55.00958469696343) | Novosibirsk Tolmachev | 9 | mid -OZH | POINT(35.301872827112675 47.87326351739466) | Zaporozhye Int'l | 9 | [mid, military] -TRZ | POINT(78.7089578434825 10.760357128456235) | Tiruchirappalli | 9 | mid -WIIT | POINT(105.17606039531529 -5.242566782981157) | Radin Inten II | 9 | mid -ZAH | POINT(60.9007085300982 29.47529417462647) | Zahedan Int'l | 9 | mid -; - -convertCartesianFromLong#[skip:-8.11.99, reason:spatial type cartesian_point only added in 8.12] +abbrev:keyword | location:geo_point | name:text | scalerank:i | type:k +CJJ | POINT(127.495916124681 36.7220227766673) | Cheongju Int'l | 9 | major +HOD | POINT(42.97109630194 14.7552534413725) | Hodeidah Int'l | 9 | mid +IDR | POINT(75.8092915005895 22.727749187571) | Devi Ahilyabai Holkar Int'l | 9 | mid +IXC | POINT(76.8017261105242 30.6707248949667) | Chandigarh Int'l | 9 | [major, military] +LYP | POINT(72.9878190922305 31.3627435480862) | Faisalabad Int'l | 9 | [mid, military] +MLG | POINT(112.711418617258 -7.92998002840567) | Abdul Rachman Saleh | 9 | [mid, military] +OMS | POINT(73.3163595376585 54.9576482934059) | Omsk Tsentralny | 9 | mid +OVB | POINT(82.6671524525865 55.0095847136264) | Novosibirsk Tolmachev | 9 | mid +OZH | POINT(35.3018728575279 47.8732635579023) | Zaporozhye Int'l | 9 | [mid, military] +TRZ | POINT(78.7089578747476 10.7603571306554) | Tiruchirappalli | 9 | mid +WIIT | POINT(105.176060419161 -5.242566777132) | Radin Inten II | 9 | mid +ZAH | POINT(60.900708564915 29.4752941956573) | Zahedan Int'l | 9 | mid +; + +geoPointEquals#[skip:-8.12.99, reason:spatial type geo_point improved in 8.13] +// tag::to_geopoint-equals[] +ROW wkt = ["POINT(42.97109630194 14.7552534413725)", "POINT(75.8092915005895 22.727749187571)"] +| MV_EXPAND wkt +| EVAL pt = to_geopoint(wkt) +| WHERE pt == to_geopoint("POINT(42.97109630194 14.7552534413725)") +// end::to_geopoint-equals[] +; + +// tag::to_geopoint-equals-result[] +wkt:keyword |pt:geo_point +"POINT(42.97109630194 14.7552534413725)" |POINT(42.97109630194 14.7552534413725) +// end::to_geopoint-equals-result[] +; + +geoPointNotEquals#[skip:-8.12.99, reason:spatial type geo_point improved in 8.13] +// tag::to_geopoint-not-equals[] +ROW wkt = ["POINT(42.97109630194 14.7552534413725)", "POINT(75.8092915005895 22.727749187571)"] +| MV_EXPAND wkt +| EVAL pt = to_geopoint(wkt) +| WHERE pt != to_geopoint("POINT(42.97109630194 14.7552534413725)") +// end::to_geopoint-not-equals[] +; + +// tag::to_geopoint-not-equals-result[] +wkt:keyword |pt:geo_point +"POINT(75.8092915005895 22.727749187571)" |POINT(75.8092915005895 22.727749187571) +// end::to_geopoint-not-equals-result[] +; + +convertFromStringParseError#[skip:-8.12.99, reason:spatial type geo_point improved in 8.13] +// tag::to_geopoint-str-parse-error[] +row wkt = ["POINTX(42.97109630194 14.7552534413725)", "POINT(75.8092915005895 22.727749187571)", "POINT(111)"] +| mv_expand wkt +| eval pt = to_geopoint(wkt) +// end::to_geopoint-str-parse-error[] +; + +// tag::to_geopoint-str-parse-error-warning[] +warning:Line 3:13: evaluation of [to_geopoint(wkt)] failed, treating result as null. Only first 20 failures recorded. +warning:Line 3:13: java.lang.IllegalArgumentException: Failed to parse WKT: Unknown geometry type: pointx +warning:Line 3:13: java.lang.IllegalArgumentException: Failed to parse WKT: expected number but found: ')' +// end::to_geopoint-str-parse-error-warning[] + +// tag::to_geopoint-str-parse-error-result[] +wkt:keyword |pt:geo_point +"POINTX(42.97109630194 14.7552534413725)" |null +"POINT(75.8092915005895 22.727749187571)" |POINT(75.8092915005895 22.727749187571) +"POINT(111)" |null +// end::to_geopoint-str-parse-error-result[] +; + +############################################### +# Tests for CARTESIAN_POINT type +# + +# This first test is only here to verify type support in older versions, all other tests are for newer versions +convertCartesianToAndFromLong#[skip:-8.11.99, reason:spatial type cartesian_point only added in 8.12] +row long = 5009771769843126025 +| eval pt = to_cartesianpoint(long) +| eval l = to_long(pt) +| keep long, l; + +long:long |l:long +5009771769843126025 |5009771769843126025 +; + +convertCartesianFromLong#[skip:-8.12.99, reason:spatial type cartesian_point improved precision in 8.13] row long = 5009771769843126025 | eval pt = to_cartesianpoint(long); long:long |pt:cartesian_point -5009771769843126025 |POINT(4297.11 -1475.53) +5009771769843126025 |POINT(4297.10986328125 -1475.530029296875) ; -convertCartesianFromString#[skip:-8.11.99, reason:spatial type cartesian_point only added in 8.12] +convertCartesianFromString#[skip:-8.12.99, reason:spatial type cartesian_point improved precision in 8.13] // tag::to_cartesianpoint-str[] row wkt = ["POINT(4297.11 -1475.53)", "POINT(7580.93 2272.77)"] | mv_expand wkt @@ -76,15 +158,15 @@ wkt:keyword |pt:cartesian_point // end::to_cartesianpoint-str-result[] ; -convertCartesianFromLongArray#[skip:-8.11.99, reason:spatial type cartesian_point only added in 8.12] +convertCartesianFromLongArray#[skip:-8.12.99, reason:spatial type cartesian_point improved precision in 8.13] row long = [5009771769843126025, 5038656556796611666] | eval pt = to_cartesianpoint(long); long:long |pt:cartesian_point -[5009771769843126025, 5038656556796611666] |[POINT(4297.11 -1475.53), POINT(7580.93 2272.77)] +[5009771769843126025, 5038656556796611666] |[POINT(4297.10986328125 -1475.530029296875), POINT(7580.93017578125 2272.77001953125)] ; -convertCartesianFromStringArray#[skip:-8.11.99, reason:spatial type cartesian_point only added in 8.12] +convertCartesianFromStringArray#[skip:-8.12.99, reason:spatial type cartesian_point improved precision in 8.13] row wkt = ["POINT(4297.11 -1475.53)", "POINT(7580.93 2272.77)"] | eval pt = to_cartesianpoint(wkt); @@ -92,16 +174,68 @@ wkt:keyword ["POINT(4297.11 -1475.53)", "POINT(7580.93 2272.77)"] |[POINT(4297.11 -1475.53), POINT(7580.93 2272.77)] ; -simpleCartesianLoad#[skip:-8.11.99, reason:spatial type cartesian_point only added in 8.12] +simpleCartesianLoad#[skip:-8.12.99, reason:spatial type cartesian_point improved precision in 8.13] FROM airports_web | WHERE scalerank == 9 | SORT abbrev | WHERE length(name) > 12; -abbrev:keyword | location:cartesian_point | name:text | scalerank:i | type:k -CJJ | POINT (14192780.0 4400431.0) | Cheongju Int'l | 9 | major -HOD | POINT (4783520.5 1661010.0) | Hodeidah Int'l | 9 | mid -IDR | POINT (8439052.0 2599127.5) | Devi Ahilyabai Holkar Int'l | 9 | mid -OMS | POINT (8161540.0 7353651.0) | Omsk Tsentralny | 9 | mid -OVB | POINT (9202465.0 7363726.5) | Novosibirsk Tolmachev | 9 | mid -TRZ | POINT (8761841.0 1204941.5) | Tiruchirappalli | 9 | mid -WIIT | POINT (11708145.0 -584415.9375) | Radin Inten II | 9 | mid -ZAH | POINT (6779436.0 3436280.5) | Zahedan Int'l | 9 | mid +abbrev:keyword | location:cartesian_point | name:text | scalerank:i | type:k +CJJ | POINT(14192780.461221408 4400430.851323913) | Cheongju Int'l | 9 | major +HOD | POINT (4783520.559160681 1661010.0197476079) | Hodeidah Int'l | 9 | mid +IDR | POINT (8439051.727244465 2599127.5424638605) | Devi Ahilyabai Holkar Int'l | 9 | mid +OMS | POINT (8161539.810548711 7353650.845101996) | Omsk Tsentralny | 9 | mid +OVB | POINT (9202465.316351846 7363726.532780712) | Novosibirsk Tolmachev | 9 | mid +TRZ | POINT (8761841.111486122 1204941.537981898) | Tiruchirappalli | 9 | mid +WIIT | POINT (11708145.489503577 -584415.9142832769) | Radin Inten II | 9 | mid +ZAH | POINT (6779435.866395892 3436280.545331025) | Zahedan Int'l | 9 | mid +; + +cartesianPointEquals#[skip:-8.12.99, reason:spatial type cartesian_point improved in 8.13] +// tag::to_cartesianpoint-equals[] +ROW wkt = ["POINT(4297.11 -1475.53)", "POINT(7580.93 2272.77)"] +| MV_EXPAND wkt +| EVAL pt = to_cartesianpoint(wkt) +| WHERE pt == to_cartesianpoint("POINT(4297.11 -1475.53)") +// end::to_cartesianpoint-equals[] +; + +// tag::to_cartesianpoint-equals-result[] +wkt:keyword |pt:cartesian_point +"POINT(4297.11 -1475.53)" |POINT(4297.11 -1475.53) +// end::to_cartesianpoint-equals-result[] +; + +cartesianPointNotEquals#[skip:-8.12.99, reason:spatial type cartesian_point improved in 8.13] +// tag::to_cartesianpoint-not-equals[] +ROW wkt = ["POINT(4297.11 -1475.53)", "POINT(7580.93 2272.77)"] +| MV_EXPAND wkt +| EVAL pt = to_cartesianpoint(wkt) +| WHERE pt != to_cartesianpoint("POINT(4297.11 -1475.53)") +// end::to_cartesianpoint-not-equals[] +; + +// tag::to_cartesianpoint-not-equals-result[] +wkt:keyword |pt:cartesian_point +"POINT(7580.93 2272.77)" |POINT(7580.93 2272.77) +// end::to_cartesianpoint-not-equals-result[] +; + +convertCartesianFromStringParseError#[skip:-8.12.99, reason:spatial type cartesian_point improved in 8.13] +// tag::to_cartesianpoint-str-parse-error[] +row wkt = ["POINTX(4297.11 -1475.53)", "POINT(7580.93 2272.77)", "POINT(111)"] +| mv_expand wkt +| eval pt = to_cartesianpoint(wkt) +// end::to_cartesianpoint-str-parse-error[] +; + +// tag::to_cartesianpoint-str-parse-error-warning[] +warning:Line 3:13: evaluation of [to_cartesianpoint(wkt)] failed, treating result as null. Only first 20 failures recorded. +warning:Line 3:13: java.lang.IllegalArgumentException: Failed to parse WKT: Unknown geometry type: pointx +warning:Line 3:13: java.lang.IllegalArgumentException: Failed to parse WKT: expected number but found: ')' +// end::to_cartesianpoint-str-parse-error-warning[] + +// tag::to_cartesianpoint-str-parse-error-result[] +wkt:keyword |pt:cartesian_point +"POINTX(4297.11 -1475.53)" |null +"POINT(7580.93 2272.77)" |POINT(7580.93 2272.77) +"POINT(111)" |null +// end::to_cartesianpoint-str-parse-error-result[] ; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec index 0ad759feeeea0..ded080023f5c4 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec @@ -435,7 +435,7 @@ g:keyword | l:integer null | 5 ; -repetitiveAggregation +repetitiveAggregation#[skip:-8.11.99,reason:ReplaceDuplicateAggWithEval breaks bwc] from employees | stats m1 = max(salary), m2 = min(salary), m3 = min(salary), m4 = max(salary); m1:i | m2:i | m3:i | m4:i @@ -791,3 +791,91 @@ FROM sample_data count(`event_duration`):l 7 ; + +docsStats +// tag::stats[] +FROM employees +| STATS count = COUNT(emp_no) BY languages +| SORT languages +// end::stats[] +; + +// tag::stats-result[] + count:long | languages:integer +15 |1 +19 |2 +17 |3 +18 |4 +21 |5 +10 |null +// end::stats-result[] +; + +docsStatsWithoutBy +// tag::statsWithoutBy[] +FROM employees +| STATS avg_lang = AVG(languages) +// end::statsWithoutBy[] +; + +// tag::statsWithoutBy-result[] +avg_lang:double +3.1222222222222222 +// end::statsWithoutBy-result[] +; + +docsStatsMultiple +// tag::statsCalcMultipleValues[] +FROM employees +| STATS avg_lang = AVG(languages), max_lang = MAX(languages) +// end::statsCalcMultipleValues[] +; + +avg_lang:double | max_lang:integer +3.1222222222222222|5 +; + +docsStatsGroupByMultipleValues +// tag::statsGroupByMultipleValues[] +FROM employees +| EVAL hired = DATE_FORMAT("YYYY", hire_date) +| STATS avg_salary = AVG(salary) BY hired, languages.long +| EVAL avg_salary = ROUND(avg_salary) +| SORT hired, languages.long +// end::statsGroupByMultipleValues[] +| LIMIT 4 +; + +hired:keyword |languages.long:long | avg_salary:double +1985 |1 |54668.0 +1985 |3 |47723.0 +1985 |4 |44817.0 +1985 |5 |47720.0 +; + +docsStatsUnnamedColumn +// tag::statsUnnamedColumn[] +FROM employees +| STATS AVG(salary) +// end::statsUnnamedColumn[] +; + +// tag::statsUnnamedColumn-result[] +AVG(salary):double +48248.55 +// end::statsUnnamedColumn-result[] +; + +docsStatsUnnamedColumnEval +// tag::statsUnnamedColumnEval[] +FROM employees +| STATS AVG(salary) +| EVAL avg_salary_rounded = ROUND(`AVG(salary)`) +// end::statsUnnamedColumnEval[] +; + +// tag::statsUnnamedColumnEval-result[] +AVG(salary):double | avg_salary_rounded:double +48248.55 | 48249.0 +// end::statsUnnamedColumnEval-result[] +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_percentile.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_percentile.csv-spec index 6ab061b33dfb0..f1849107d606d 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_percentile.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_percentile.csv-spec @@ -70,14 +70,14 @@ NULL ; -medianOfLong +medianOfLong#[skip:-8.11.99,reason:ReplaceDuplicateAggWithEval breaks bwc gh-103765] from employees | stats m = median(salary_change.long), p50 = percentile(salary_change.long, 50); m:double | p50:double 0 | 0 ; -medianOfInteger +medianOfInteger#[skip:-8.11.99,reason:ReplaceDuplicateAggWithEval breaks bwc gh-103765] // tag::median[] FROM employees | STATS MEDIAN(salary), PERCENTILE(salary, 50) @@ -90,7 +90,7 @@ MEDIAN(salary):double | PERCENTILE(salary,50):double // end::median-result[] ; -medianOfDouble +medianOfDouble#[skip:-8.11.99,reason:ReplaceDuplicateAggWithEval breaks bwc gh-103765] from employees | stats m = median(salary_change), p50 = percentile(salary_change, 50); m:double | p50:double @@ -98,7 +98,7 @@ m:double | p50:double ; -medianOfLongByKeyword +medianOfLongByKeyword#[skip:-8.11.99,reason:ReplaceDuplicateAggWithEval breaks bwc gh-103765] from employees | stats m = median(salary_change.long), p50 = percentile(salary_change.long, 50) by job_positions | sort m desc | limit 4; m:double | p50:double | job_positions:keyword @@ -109,7 +109,7 @@ m:double | p50:double | job_positions:keyword ; -medianOfIntegerByKeyword +medianOfIntegerByKeyword#[skip:-8.11.99,reason:ReplaceDuplicateAggWithEval breaks bwc gh-103765] from employees | stats m = median(salary), p50 = percentile(salary, 50) by job_positions | sort m | limit 4; m:double | p50:double | job_positions:keyword @@ -120,7 +120,7 @@ m:double | p50:double | job_positions:keyword ; -medianOfDoubleByKeyword +medianOfDoubleByKeyword#[skip:-8.11.99,reason:ReplaceDuplicateAggWithEval breaks bwc gh-103765] from employees | stats m = median(salary_change), p50 = percentile(salary_change, 50)by job_positions | sort m desc | limit 4; m:double | p50:double | job_positions:keyword diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec index 1f78a63c8c4d8..b8b80df389f9c 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec @@ -415,6 +415,32 @@ ROW a=["foo", "zoo", "bar"] // end::mv_concat-result[] ; +mvFirst#[skip:-8.12.99, reason:Added in 8.13.0] +// tag::mv_first[] +ROW a="foo;bar;baz" +| EVAL first_a = MV_FIRST(SPLIT(a, ";")) +// end::mv_first[] +; + +// tag::mv_first-result[] + a:keyword | first_a:keyword +foo;bar;baz | "foo" +// end::mv_first-result[] +; + +mvLast#[skip:-8.12.99, reason:Added in 8.13.0] +// tag::mv_last[] +ROW a="foo;bar;baz" +| EVAL last_a = MV_LAST(SPLIT(a, ";")) +// end::mv_last[] +; + +// tag::mv_last-result[] + a:keyword | last_a:keyword +foo;bar;baz | "baz" +// end::mv_last-result[] +; + mvMax // tag::mv_max[] ROW a=["foo", "zoo", "bar"] diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/where-like.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/where-like.csv-spec index 37a1978524e7f..160fc46dafcf2 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/where-like.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/where-like.csv-spec @@ -298,3 +298,24 @@ FROM sample_data @timestamp:date | client_ip:ip | event_duration:long | message:keyword ; + +multiValueLike#[skip:-8.12.99] +from employees | where job_positions like "Account*" | keep emp_no, job_positions; + +warning:Line 1:24: evaluation of [job_positions like \"Account*\"] failed, treating result as null. Only first 20 failures recorded. +warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value + +emp_no:integer | job_positions:keyword +10025 | Accountant +; + + +multiValueRLike#[skip:-8.12.99] +from employees | where job_positions rlike "Account.*" | keep emp_no, job_positions; + +warning:Line 1:24: evaluation of [job_positions rlike \"Account.*\"] failed, treating result as null. Only first 20 failures recorded. +warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value + +emp_no:integer | job_positions:keyword +10025 | Accountant +; diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractEsqlIntegTestCase.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractEsqlIntegTestCase.java index 9b5012e56a3ff..0590caf2019b4 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractEsqlIntegTestCase.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractEsqlIntegTestCase.java @@ -9,6 +9,7 @@ import org.elasticsearch.Build; import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.action.admin.cluster.node.tasks.list.TransportListTasksAction; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -17,6 +18,7 @@ import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.operator.exchange.ExchangeService; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.health.node.selection.HealthNode; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; @@ -53,11 +55,25 @@ public void ensureBlocksReleased() { CircuitBreaker reqBreaker = breakerService.getBreaker(CircuitBreaker.REQUEST); try { assertBusy(() -> { - logger.info("running tasks: {}", client().admin().cluster().prepareListTasks().get()); + logger.info( + "running tasks: {}", + client().admin() + .cluster() + .prepareListTasks() + .get() + .getTasks() + .stream() + .filter( + // Skip the tasks we that'd get in the way while debugging + t -> false == t.action().contains(TransportListTasksAction.TYPE.name()) + && false == t.action().contains(HealthNode.TASK_NAME) + ) + .toList() + ); assertThat("Request breaker not reset to 0 on node: " + node, reqBreaker.getUsed(), equalTo(0L)); }); } catch (Exception e) { - assertThat("Request breaker not reset to 0 on node: " + node, reqBreaker.getUsed(), equalTo(0L)); + throw new RuntimeException("failed waiting for breakers to clear", e); } } } @@ -80,6 +96,11 @@ public List> getSettings() { BlockFactory.LOCAL_BREAKER_OVER_RESERVED_MAX_SIZE_SETTING, ByteSizeValue.ofBytes(randomIntBetween(0, 16 * 1024)), Setting.Property.NodeScope + ), + Setting.byteSizeSetting( + BlockFactory.MAX_BLOCK_PRIMITIVE_ARRAY_SIZE_SETTING, + ByteSizeValue.ofBytes(randomLongBetween(1, BlockFactory.DEFAULT_MAX_BLOCK_PRIMITIVE_ARRAY_SIZE.getBytes())), + Setting.Property.NodeScope ) ); } diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractPausableIntegTestCase.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractPausableIntegTestCase.java new file mode 100644 index 0000000000000..7a5072120e5af --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractPausableIntegTestCase.java @@ -0,0 +1,165 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.index.engine.SegmentsStats; +import org.elasticsearch.index.mapper.OnScriptError; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.ScriptPlugin; +import org.elasticsearch.script.LongFieldScript; +import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.script.ScriptEngine; +import org.elasticsearch.search.lookup.SearchLookup; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.json.JsonXContent; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collection; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; + +/** A pausable testcase. Subclasses extend this testcase to simulate slow running queries. + * + * Uses the evaluation of a runtime field in the mappings "pause_me" of type long, along + * with a custom script language "pause", and semaphore "scriptPermits", to block execution. + */ +public abstract class AbstractPausableIntegTestCase extends AbstractEsqlIntegTestCase { + + private static final Logger LOGGER = LogManager.getLogger(AbstractPausableIntegTestCase.class); + + protected static final Semaphore scriptPermits = new Semaphore(0); + + protected int pageSize = -1; + + protected int numberOfDocs = -1; + + @Override + protected Collection> nodePlugins() { + return CollectionUtils.appendToCopy(super.nodePlugins(), PausableFieldPlugin.class); + } + + protected int pageSize() { + if (pageSize == -1) { + pageSize = between(10, 100); + } + return pageSize; + } + + protected int numberOfDocs() { + if (numberOfDocs == -1) { + numberOfDocs = between(4 * pageSize(), 5 * pageSize()); + } + return numberOfDocs; + } + + @Before + public void setupIndex() throws IOException { + assumeTrue("requires query pragmas", canUseQueryPragmas()); + + XContentBuilder mapping = JsonXContent.contentBuilder().startObject(); + mapping.startObject("runtime"); + { + mapping.startObject("pause_me"); + { + mapping.field("type", "long"); + mapping.startObject("script").field("source", "").field("lang", "pause").endObject(); + } + mapping.endObject(); + } + mapping.endObject(); + client().admin() + .indices() + .prepareCreate("test") + .setSettings(Map.of("number_of_shards", 1, "number_of_replicas", 0)) + .setMapping(mapping.endObject()) + .get(); + + BulkRequestBuilder bulk = client().prepareBulk().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + for (int i = 0; i < numberOfDocs(); i++) { + bulk.add(prepareIndex("test").setId(Integer.toString(i)).setSource("foo", i)); + } + bulk.get(); + /* + * forceMerge so we can be sure that we don't bump into tiny + * segments that finish super quickly and cause us to report strange + * statuses when we expect "starting". + */ + client().admin().indices().prepareForceMerge("test").setMaxNumSegments(1).get(); + /* + * Double super extra paranoid check that force merge worked. It's + * failed to reduce the index to a single segment and caused this test + * to fail in very difficult to debug ways. If it fails again, it'll + * trip here. Or maybe it won't! And we'll learn something. Maybe + * it's ghosts. + */ + SegmentsStats stats = client().admin().indices().prepareStats("test").get().getPrimaries().getSegments(); + if (stats.getCount() != 1L) { + fail(Strings.toString(stats)); + } + } + + public static class PausableFieldPlugin extends Plugin implements ScriptPlugin { + + @Override + public ScriptEngine getScriptEngine(Settings settings, Collection> contexts) { + return new ScriptEngine() { + @Override + public String getType() { + return "pause"; + } + + @Override + @SuppressWarnings("unchecked") + public FactoryType compile( + String name, + String code, + ScriptContext context, + Map params + ) { + return (FactoryType) new LongFieldScript.Factory() { + @Override + public LongFieldScript.LeafFactory newFactory( + String fieldName, + Map params, + SearchLookup searchLookup, + OnScriptError onScriptError + ) { + return ctx -> new LongFieldScript(fieldName, params, searchLookup, onScriptError, ctx) { + @Override + public void execute() { + try { + assertTrue(scriptPermits.tryAcquire(1, TimeUnit.MINUTES)); + } catch (Exception e) { + throw new AssertionError(e); + } + LOGGER.debug("--> emitting value"); + emit(1); + } + }; + } + }; + } + + @Override + public Set> getSupportedContexts() { + return Set.of(LongFieldScript.CONTEXT); + } + }; + } + } +} diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AsyncEsqlQueryActionIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AsyncEsqlQueryActionIT.java new file mode 100644 index 0000000000000..6e3873f654778 --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AsyncEsqlQueryActionIT.java @@ -0,0 +1,257 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.compute.operator.exchange.ExchangeService; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.tasks.TaskInfo; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; +import org.elasticsearch.xpack.core.async.DeleteAsyncResultRequest; +import org.elasticsearch.xpack.core.async.GetAsyncResultRequest; +import org.elasticsearch.xpack.core.async.TransportDeleteAsyncResultAction; +import org.elasticsearch.xpack.esql.plugin.QueryPragmas; +import org.hamcrest.core.IsEqual; + +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.core.TimeValue.timeValueMillis; +import static org.elasticsearch.core.TimeValue.timeValueMinutes; +import static org.elasticsearch.core.TimeValue.timeValueSeconds; +import static org.elasticsearch.test.hamcrest.OptionalMatchers.isEmpty; +import static org.elasticsearch.test.hamcrest.OptionalMatchers.isPresent; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.getValuesList; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +/** + * Individual tests for specific aspects of the async query API. + */ +public class AsyncEsqlQueryActionIT extends AbstractPausableIntegTestCase { + + @Override + protected Collection> nodePlugins() { + ArrayList> actions = new ArrayList<>(super.nodePlugins()); + actions.add(EsqlAsyncActionIT.LocalStateEsqlAsync.class); + actions.add(InternalExchangePlugin.class); + return Collections.unmodifiableList(actions); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + return Settings.builder() + .put(ExchangeService.INACTIVE_SINKS_INTERVAL_SETTING, TimeValue.timeValueMillis(between(500, 2000))) + .build(); + } + + public void testBasicAsyncExecution() throws Exception { + try (var initialResponse = sendAsyncQuery()) { + assertThat(initialResponse.asyncExecutionId(), isPresent()); + assertThat(initialResponse.isRunning(), is(true)); + String id = initialResponse.asyncExecutionId().get(); + + if (randomBoolean()) { + // let's timeout first + var getResultsRequest = new GetAsyncResultRequest(id); + getResultsRequest.setWaitForCompletionTimeout(timeValueMillis(10)); + getResultsRequest.setKeepAlive(randomKeepAlive()); + var future = client().execute(EsqlAsyncGetResultAction.INSTANCE, getResultsRequest); + try (var responseWithTimeout = future.get()) { + assertThat(initialResponse.asyncExecutionId(), isPresent()); + assertThat(responseWithTimeout.asyncExecutionId().get(), equalTo(id)); + assertThat(responseWithTimeout.isRunning(), is(true)); + } + } + + // Now we wait + var getResultsRequest = new GetAsyncResultRequest(id); + getResultsRequest.setWaitForCompletionTimeout(timeValueSeconds(60)); + getResultsRequest.setKeepAlive(randomKeepAlive()); + var future = client().execute(EsqlAsyncGetResultAction.INSTANCE, getResultsRequest); + + // release the permits to allow the query to proceed + scriptPermits.release(numberOfDocs()); + + try (var finalResponse = future.get()) { + assertThat(finalResponse, notNullValue()); + assertThat(finalResponse.isRunning(), is(false)); + assertThat(finalResponse.columns(), equalTo(List.of(new ColumnInfo("sum(pause_me)", "long")))); + assertThat(getValuesList(finalResponse).size(), equalTo(1)); + } + + // Get the stored result (again) + var again = client().execute(EsqlAsyncGetResultAction.INSTANCE, getResultsRequest); + try (var finalResponse = again.get()) { + assertThat(finalResponse, notNullValue()); + assertThat(finalResponse.isRunning(), is(false)); + assertThat(finalResponse.columns(), equalTo(List.of(new ColumnInfo("sum(pause_me)", "long")))); + assertThat(getValuesList(finalResponse).size(), equalTo(1)); + } + + AcknowledgedResponse deleteResponse = deleteAsyncId(id); + assertThat(deleteResponse.isAcknowledged(), equalTo(true)); + // the stored response should no longer be retrievable + var e = expectThrows(ResourceNotFoundException.class, () -> deleteAsyncId(id)); + assertThat(e.getMessage(), IsEqual.equalTo(id)); + } finally { + scriptPermits.drainPermits(); + } + } + + public void testAsyncCancellation() throws Exception { + try (var initialResponse = sendAsyncQuery()) { + assertThat(initialResponse.asyncExecutionId(), isPresent()); + assertThat(initialResponse.isRunning(), is(true)); + String id = initialResponse.asyncExecutionId().get(); + + DeleteAsyncResultRequest request = new DeleteAsyncResultRequest(id); + var future = client().execute(TransportDeleteAsyncResultAction.TYPE, request); + + // there should be just one task + List tasks = getEsqlQueryTasks(); + assertThat(tasks.size(), is(1)); + + // release the permits to allow the query to proceed + scriptPermits.release(numberOfDocs()); + + var deleteResponse = future.actionGet(timeValueSeconds(60)); + assertThat(deleteResponse.isAcknowledged(), equalTo(true)); + + // there should be no tasks after delete + tasks = getEsqlQueryTasks(); + assertThat(tasks.size(), is(0)); + + // the stored response should no longer be retrievable + var getResultsRequest = new GetAsyncResultRequest(id); + getResultsRequest.setKeepAlive(timeValueMinutes(10)); + getResultsRequest.setWaitForCompletionTimeout(timeValueSeconds(60)); + var e = expectThrows( + ResourceNotFoundException.class, + () -> client().execute(EsqlAsyncGetResultAction.INSTANCE, getResultsRequest).actionGet() + ); + assertThat(e.getMessage(), equalTo(id)); + } finally { + scriptPermits.drainPermits(); + } + } + + public void testFinishingBeforeTimeoutKeep() { + testFinishingBeforeTimeout(true); + } + + public void testFinishingBeforeTimeoutDoNotKeep() { + testFinishingBeforeTimeout(false); + } + + private void testFinishingBeforeTimeout(boolean keepOnCompletion) { + // don't block the query execution at all + scriptPermits.drainPermits(); + assert scriptPermits.availablePermits() == 0; + + scriptPermits.release(numberOfDocs()); + + var request = EsqlQueryRequestBuilder.newAsyncEsqlQueryRequestBuilder(client()) + .query("from test | stats sum(pause_me)") + .pragmas(queryPragmas()) + .waitForCompletionTimeout(TimeValue.timeValueSeconds(60)) + .keepOnCompletion(keepOnCompletion) + .keepAlive(randomKeepAlive()); + + try (var response = request.execute().actionGet(60, TimeUnit.SECONDS)) { + assertThat(response.isRunning(), is(false)); + assertThat(response.columns(), equalTo(List.of(new ColumnInfo("sum(pause_me)", "long")))); + assertThat(getValuesList(response).size(), equalTo(1)); + + if (keepOnCompletion) { + assertThat(response.asyncExecutionId(), isPresent()); + // we should be able to retrieve the response by id, since it has been kept + String id = response.asyncExecutionId().get(); + var getResultsRequest = new GetAsyncResultRequest(id); + getResultsRequest.setWaitForCompletionTimeout(timeValueSeconds(60)); + var future = client().execute(EsqlAsyncGetResultAction.INSTANCE, getResultsRequest); + try (var resp = future.actionGet(60, TimeUnit.SECONDS)) { + assertThat(resp.asyncExecutionId().get(), equalTo(id)); + assertThat(resp.isRunning(), is(false)); + assertThat(resp.columns(), equalTo(List.of(new ColumnInfo("sum(pause_me)", "long")))); + assertThat(getValuesList(resp).size(), equalTo(1)); + } + } else { + assertThat(response.asyncExecutionId(), isEmpty()); + } + } finally { + scriptPermits.drainPermits(); + } + } + + private List getEsqlQueryTasks() throws Exception { + List foundTasks = new ArrayList<>(); + assertBusy(() -> { + List tasks = client().admin() + .cluster() + .prepareListTasks() + .setActions(EsqlQueryAction.NAME + "[a]") + .setDetailed(true) + .get() + .getTasks(); + foundTasks.addAll(tasks); + }); + return foundTasks; + } + + private EsqlQueryResponse sendAsyncQuery() { + scriptPermits.drainPermits(); + assert scriptPermits.availablePermits() == 0; + + scriptPermits.release(between(1, 5)); + var pragmas = queryPragmas(); + return EsqlQueryRequestBuilder.newAsyncEsqlQueryRequestBuilder(client()) + .query("from test | stats sum(pause_me)") + .pragmas(pragmas) + // deliberately small timeout, to frequently trigger incomplete response + .waitForCompletionTimeout(TimeValue.timeValueNanos(randomIntBetween(1, 20))) + .keepOnCompletion(randomBoolean()) + .keepAlive(randomKeepAlive()) + .execute() + .actionGet(60, TimeUnit.SECONDS); + } + + private QueryPragmas queryPragmas() { + return new QueryPragmas( + Settings.builder() + // Force shard partitioning because that's all the tests know how to match. It is easier to reason about too. + .put("data_partitioning", "shard") + // Limit the page size to something small so we do more than one page worth of work, so we get more status updates. + .put("page_size", pageSize()) + .build() + ); + } + + private AcknowledgedResponse deleteAsyncId(String id) { + DeleteAsyncResultRequest request = new DeleteAsyncResultRequest(id); + return client().execute(TransportDeleteAsyncResultAction.TYPE, request).actionGet(timeValueSeconds(60)); + } + + TimeValue randomKeepAlive() { + return TimeValue.parseTimeValue(randomTimeValue(1, 5, "d"), "test"); + } + + public static class LocalStateEsqlAsync extends LocalStateCompositeXPackPlugin { + public LocalStateEsqlAsync(final Settings settings, final Path configPath) { + super(settings, configPath); + } + } +} diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersCancellationIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersCancellationIT.java new file mode 100644 index 0000000000000..8d7cbc5cd41be --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersCancellationIT.java @@ -0,0 +1,192 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksAction; +import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; +import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.compute.operator.DriverTaskRunner; +import org.elasticsearch.compute.operator.exchange.ExchangeService; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.mapper.OnScriptError; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.ScriptPlugin; +import org.elasticsearch.script.LongFieldScript; +import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.script.ScriptEngine; +import org.elasticsearch.search.lookup.SearchLookup; +import org.elasticsearch.tasks.TaskInfo; +import org.elasticsearch.test.AbstractMultiClustersTestCase; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.xpack.esql.action.AbstractEsqlIntegTestCase.randomPragmas; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasSize; + +public class CrossClustersCancellationIT extends AbstractMultiClustersTestCase { + private static final String REMOTE_CLUSTER = "cluster-a"; + + @Override + protected Collection remoteClusterAlias() { + return List.of(REMOTE_CLUSTER); + } + + @Override + protected Collection> nodePlugins(String clusterAlias) { + List> plugins = new ArrayList<>(super.nodePlugins(clusterAlias)); + plugins.add(EsqlPlugin.class); + plugins.add(InternalExchangePlugin.class); + plugins.add(PauseFieldPlugin.class); + return plugins; + } + + public static class InternalExchangePlugin extends Plugin { + @Override + public List> getSettings() { + return List.of( + Setting.timeSetting( + ExchangeService.INACTIVE_SINKS_INTERVAL_SETTING, + TimeValue.timeValueMillis(between(1000, 3000)), + Setting.Property.NodeScope + ) + ); + } + } + + @Before + public void resetPlugin() { + PauseFieldPlugin.allowEmitting = new CountDownLatch(1); + PauseFieldPlugin.startEmitting = new CountDownLatch(1); + } + + public static class PauseFieldPlugin extends Plugin implements ScriptPlugin { + public static CountDownLatch startEmitting = new CountDownLatch(1); + public static CountDownLatch allowEmitting = new CountDownLatch(1); + + @Override + public ScriptEngine getScriptEngine(Settings settings, Collection> contexts) { + return new ScriptEngine() { + @Override + public String getType() { + return "pause"; + } + + @Override + @SuppressWarnings("unchecked") + public FactoryType compile( + String name, + String code, + ScriptContext context, + Map params + ) { + if (context == LongFieldScript.CONTEXT) { + return (FactoryType) new LongFieldScript.Factory() { + @Override + public LongFieldScript.LeafFactory newFactory( + String fieldName, + Map params, + SearchLookup searchLookup, + OnScriptError onScriptError + ) { + return ctx -> new LongFieldScript(fieldName, params, searchLookup, onScriptError, ctx) { + @Override + public void execute() { + startEmitting.countDown(); + try { + assertTrue(allowEmitting.await(30, TimeUnit.SECONDS)); + } catch (InterruptedException e) { + throw new AssertionError(e); + } + emit(1); + } + }; + } + }; + } + throw new IllegalStateException("unsupported type " + context); + } + + @Override + public Set> getSupportedContexts() { + return Set.of(LongFieldScript.CONTEXT); + } + }; + } + } + + private void createRemoteIndex(int numDocs) throws Exception { + XContentBuilder mapping = JsonXContent.contentBuilder().startObject(); + mapping.startObject("runtime"); + { + mapping.startObject("const"); + { + mapping.field("type", "long"); + mapping.startObject("script").field("source", "").field("lang", "pause").endObject(); + } + mapping.endObject(); + } + mapping.endObject(); + mapping.endObject(); + client(REMOTE_CLUSTER).admin().indices().prepareCreate("test").setMapping(mapping).get(); + BulkRequestBuilder bulk = client(REMOTE_CLUSTER).prepareBulk("test").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + for (int i = 0; i < numDocs; i++) { + bulk.add(new IndexRequest().source("foo", i)); + } + bulk.get(); + } + + public void testCancel() throws Exception { + createRemoteIndex(between(10, 100)); + EsqlQueryRequest request = new EsqlQueryRequest(); + request.query("FROM *:test | STATS total=sum(const) | LIMIT 1"); + request.pragmas(randomPragmas()); + PlainActionFuture requestFuture = new PlainActionFuture<>(); + client().execute(EsqlQueryAction.INSTANCE, request, requestFuture); + assertTrue(PauseFieldPlugin.startEmitting.await(30, TimeUnit.SECONDS)); + List rootTasks = new ArrayList<>(); + assertBusy(() -> { + List tasks = client().admin().cluster().prepareListTasks().setActions(EsqlQueryAction.NAME).get().getTasks(); + assertThat(tasks, hasSize(1)); + rootTasks.addAll(tasks); + }); + var cancelRequest = new CancelTasksRequest().setTargetTaskId(rootTasks.get(0).taskId()).setReason("proxy timeout"); + client().execute(CancelTasksAction.INSTANCE, cancelRequest); + assertBusy(() -> { + List drivers = client(REMOTE_CLUSTER).admin() + .cluster() + .prepareListTasks() + .setActions(DriverTaskRunner.ACTION_NAME) + .get() + .getTasks(); + assertThat(drivers.size(), greaterThanOrEqualTo(1)); + for (TaskInfo driver : drivers) { + assertTrue(driver.cancellable()); + } + }); + PauseFieldPlugin.allowEmitting.countDown(); + Exception error = expectThrows(Exception.class, requestFuture::actionGet); + assertThat(error.getMessage(), containsString("proxy timeout")); + } +} diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersEnrichIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersEnrichIT.java new file mode 100644 index 0000000000000..e3a01bd6f4dd9 --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersEnrichIT.java @@ -0,0 +1,176 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.compute.operator.exchange.ExchangeService; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.ingest.common.IngestCommonPlugin; +import org.elasticsearch.license.LicenseService; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.protocol.xpack.XPackInfoRequest; +import org.elasticsearch.protocol.xpack.XPackInfoResponse; +import org.elasticsearch.reindex.ReindexPlugin; +import org.elasticsearch.test.AbstractMultiClustersTestCase; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.action.TransportXPackInfoAction; +import org.elasticsearch.xpack.core.action.XPackInfoFeatureAction; +import org.elasticsearch.xpack.core.enrich.EnrichPolicy; +import org.elasticsearch.xpack.core.enrich.action.DeleteEnrichPolicyAction; +import org.elasticsearch.xpack.core.enrich.action.ExecuteEnrichPolicyAction; +import org.elasticsearch.xpack.core.enrich.action.PutEnrichPolicyAction; +import org.elasticsearch.xpack.enrich.EnrichPlugin; +import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; +import org.junit.After; + +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.containsString; + +public class CrossClustersEnrichIT extends AbstractMultiClustersTestCase { + private static final String REMOTE_CLUSTER = "cluster_a"; + + @Override + protected Collection remoteClusterAlias() { + return List.of(REMOTE_CLUSTER); + } + + @Override + protected Collection> nodePlugins(String clusterAlias) { + List> plugins = new ArrayList<>(super.nodePlugins(clusterAlias)); + plugins.add(EsqlPlugin.class); + plugins.add(InternalExchangePlugin.class); + plugins.add(LocalStateEnrich.class); + plugins.add(IngestCommonPlugin.class); + plugins.add(ReindexPlugin.class); + return plugins; + } + + @Override + protected Settings nodeSettings() { + return Settings.builder().put(super.nodeSettings()).put(XPackSettings.SECURITY_ENABLED.getKey(), false).build(); + } + + public static class InternalExchangePlugin extends Plugin { + @Override + public List> getSettings() { + return List.of( + Setting.timeSetting( + ExchangeService.INACTIVE_SINKS_INTERVAL_SETTING, + TimeValue.timeValueSeconds(30), + Setting.Property.NodeScope + ) + ); + } + } + + public void testUnsupportedEnrich() { + Client localClient = client(LOCAL_CLUSTER); + localClient.admin().indices().prepareCreate("hosts").setMapping("ip", "type=ip", "os", "type=keyword").get(); + record Host(String ip, String os) { + + } + var hosts = List.of(new Host("192.168.1.3", "Windows")); + for (var h : hosts) { + localClient.prepareIndex("hosts").setSource("ip", h.ip, "os", h.os).get(); + } + localClient.admin().indices().prepareRefresh("hosts").get(); + EnrichPolicy policy = new EnrichPolicy("match", null, List.of("hosts"), "ip", List.of("ip", "os")); + localClient.execute(PutEnrichPolicyAction.INSTANCE, new PutEnrichPolicyAction.Request("hosts", policy)).actionGet(); + localClient.execute(ExecuteEnrichPolicyAction.INSTANCE, new ExecuteEnrichPolicyAction.Request("hosts")).actionGet(); + assertAcked(client(LOCAL_CLUSTER).admin().indices().prepareDelete("hosts")); + + record Event(String ip, String message) { + + } + for (String cluster : List.of(LOCAL_CLUSTER, REMOTE_CLUSTER)) { + var events = List.of(new Event("192.168.1.4", "access denied"), new Event("192.168.1.3", "restart")); + assertAcked(client(cluster).admin().indices().prepareCreate("events").setMapping("ip", "type=ip", "message", "type=text")); + for (Event e : events) { + client(cluster).prepareIndex("events").setSource("ip", e.ip, "message", e.message).get(); + } + client(cluster).admin().indices().prepareRefresh("events").get(); + } + List queries = List.of( + "FROM *:events | EVAL ip_str = TO_STR(ip) | ENRICH hosts on ip_str | LIMIT 1", + "FROM events*,*:events | EVAL ip_str = TO_STR(ip) | ENRICH hosts on ip_str | LIMIT 1", + "FROM *:events | EVAL ip_str = TO_STR(ip) | ENRICH hosts on ip_str | STATS COUNT(*) BY ip | LIMIT 1", + "FROM events*,*:events | EVAL ip_str = TO_STR(ip) | ENRICH hosts on ip_str | STATS COUNT(*) BY ip | LIMIT 1" + ); + for (String q : queries) { + Exception error = expectThrows(IllegalArgumentException.class, () -> runQuery(q).close()); + assertThat(error.getMessage(), containsString("cross clusters query doesn't support enrich yet")); + } + } + + @After + public void cleanClusters() { + cluster(LOCAL_CLUSTER).wipe(Set.of()); + client(LOCAL_CLUSTER).execute(DeleteEnrichPolicyAction.INSTANCE, new DeleteEnrichPolicyAction.Request("hosts")); + cluster(REMOTE_CLUSTER).wipe(Set.of()); + } + + protected EsqlQueryResponse runQuery(String query) { + EsqlQueryRequest request = new EsqlQueryRequest(); + request.query(query); + request.pragmas(AbstractEsqlIntegTestCase.randomPragmas()); + return client(LOCAL_CLUSTER).execute(EsqlQueryAction.INSTANCE, request).actionGet(30, TimeUnit.SECONDS); + } + + public static class LocalStateEnrich extends LocalStateCompositeXPackPlugin { + + public LocalStateEnrich(final Settings settings, final Path configPath) throws Exception { + super(settings, configPath); + + plugins.add(new EnrichPlugin(settings) { + @Override + protected XPackLicenseState getLicenseState() { + return this.getLicenseState(); + } + }); + } + + public static class EnrichTransportXPackInfoAction extends TransportXPackInfoAction { + @Inject + public EnrichTransportXPackInfoAction( + TransportService transportService, + ActionFilters actionFilters, + LicenseService licenseService, + NodeClient client + ) { + super(transportService, actionFilters, licenseService, client); + } + + @Override + protected List infoActions() { + return Collections.singletonList(XPackInfoFeatureAction.ENRICH); + } + } + + @Override + protected Class> getInfoAction() { + return EnrichTransportXPackInfoAction.class; + } + } +} diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java index a24b643a299c2..35e019e3a140b 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java @@ -7,24 +7,35 @@ package org.elasticsearch.xpack.esql.action; +import org.elasticsearch.Build; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.compute.lucene.DataPartitioning; +import org.elasticsearch.compute.operator.DriverProfile; import org.elasticsearch.compute.operator.exchange.ExchangeService; import org.elasticsearch.core.TimeValue; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.AbstractMultiClustersTestCase; +import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; +import org.elasticsearch.xpack.esql.plugin.QueryPragmas; +import org.junit.Before; import java.util.ArrayList; import java.util.Collection; -import java.util.Iterator; import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.getValuesList; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasSize; public class CrossClustersQueryIT extends AbstractMultiClustersTestCase { @@ -37,11 +48,10 @@ protected Collection remoteClusterAlias() { @Override protected Collection> nodePlugins(String clusterAlias) { - List> plugins = new ArrayList<>(); - plugins.addAll(super.nodePlugins(clusterAlias)); + List> plugins = new ArrayList<>(super.nodePlugins(clusterAlias)); plugins.add(EsqlPlugin.class); plugins.add(InternalExchangePlugin.class); - return CollectionUtils.appendToCopy(super.nodePlugins(clusterAlias), EsqlPlugin.class); + return plugins; } public static class InternalExchangePlugin extends Plugin { @@ -57,61 +67,146 @@ public List> getSettings() { } } - public void testUnsupported() { - int numDocs = between(1, 10); - for (String cluster : List.of(LOCAL_CLUSTER, REMOTE_CLUSTER)) { - Client client = client(cluster); - assertAcked( - client.admin() - .indices() - .prepareCreate("events") - .setSettings(Settings.builder().put("index.number_of_shards", randomIntBetween(1, 5))) - .setMapping("tag", "type=keyword", "v", "type=long") - ); - for (int i = 0; i < numDocs; i++) { - client.prepareIndex("events").setSource("tag", cluster, "v", i).get(); - } - client.admin().indices().prepareRefresh("events").get(); + @Before + public void populateLocalIndices() { + Client localClient = client(LOCAL_CLUSTER); + assertAcked( + localClient.admin() + .indices() + .prepareCreate("logs-1") + .setSettings(Settings.builder().put("index.number_of_shards", randomIntBetween(1, 5))) + .setMapping("id", "type=keyword", "tag", "type=keyword", "v", "type=long") + ); + for (int i = 0; i < 10; i++) { + localClient.prepareIndex("logs-1").setSource("id", "local-" + i, "tag", "local", "v", i).get(); } - var emptyQueries = List.of( - "from *:* | LIMIT 0", - "from *,*:* | LIMIT 0", - "from *:events* | LIMIT 0", - "from events,*:events* | LIMIT 0" + localClient.admin().indices().prepareRefresh("logs-1").get(); + } + + @Before + public void populateRemoteIndices() { + Client remoteClient = client(REMOTE_CLUSTER); + assertAcked( + remoteClient.admin() + .indices() + .prepareCreate("logs-2") + .setSettings(Settings.builder().put("index.number_of_shards", randomIntBetween(1, 5))) + .setMapping("id", "type=keyword", "tag", "type=keyword", "v", "type=long") ); - for (String q : emptyQueries) { - try (EsqlQueryResponse resp = runQuery(q)) { - assertThat(resp.columns(), hasSize(2)); - assertFalse(resp.values().hasNext()); + for (int i = 0; i < 10; i++) { + remoteClient.prepareIndex("logs-2").setSource("id", "remote-" + i, "tag", "remote", "v", i * i).get(); + } + remoteClient.admin().indices().prepareRefresh("logs-2").get(); + } + + public void testSimple() { + try (EsqlQueryResponse resp = runQuery("from logs-*,*:logs-* | stats sum (v)")) { + List> values = getValuesList(resp); + assertThat(values, hasSize(1)); + assertThat(values.get(0), equalTo(List.of(330L))); + } + try (EsqlQueryResponse resp = runQuery("from logs-*,*:logs-* | stats count(*) by tag | sort tag | keep tag")) { + List> values = getValuesList(resp); + assertThat(values, hasSize(2)); + assertThat(values.get(0), equalTo(List.of("local"))); + assertThat(values.get(1), equalTo(List.of("remote"))); + } + } + + public void testMetadataIndex() { + try (EsqlQueryResponse resp = runQuery("FROM logs*,*:logs* [METADATA _index] | stats sum(v) by _index | sort _index")) { + List> values = getValuesList(resp); + assertThat(values.get(0), equalTo(List.of(285L, "cluster-a:logs-2"))); + assertThat(values.get(1), equalTo(List.of(45L, "logs-1"))); + } + } + + public void testProfile() { + assumeTrue("pragmas only enabled on snapshot builds", Build.current().isSnapshot()); + final int localOnlyProfiles; + // uses shard partitioning as segments can be merged during these queries + var pragmas = new QueryPragmas(Settings.builder().put(QueryPragmas.DATA_PARTITIONING.getKey(), DataPartitioning.SHARD).build()); + { + EsqlQueryRequest request = new EsqlQueryRequest(); + request.query("FROM logs* | stats sum(v)"); + request.pragmas(pragmas); + request.profile(true); + try (EsqlQueryResponse resp = runQuery(request)) { + List> values = getValuesList(resp); + assertThat(values.get(0), equalTo(List.of(45L))); + assertNotNull(resp.profile()); + List drivers = resp.profile().drivers(); + assertThat(drivers.size(), greaterThanOrEqualTo(2)); // one coordinator and at least one data + localOnlyProfiles = drivers.size(); } } - var remotePatterns = List.of("*:*", "*, *:*", "*:events*", "events, *:events*"); - for (String pattern : remotePatterns) { - var query = "FROM " + pattern + " | LIMIT " + between(1, 100); - IllegalArgumentException error = expectThrows(IllegalArgumentException.class, () -> runQuery(query).close()); - assertThat(error.getMessage(), equalTo("ES|QL does not yet support querying remote indices [" + pattern + "]")); + final int remoteOnlyProfiles; + { + EsqlQueryRequest request = new EsqlQueryRequest(); + request.query("FROM *:logs* | stats sum(v)"); + request.pragmas(pragmas); + request.profile(true); + try (EsqlQueryResponse resp = runQuery(request)) { + List> values = getValuesList(resp); + assertThat(values.get(0), equalTo(List.of(285L))); + assertNotNull(resp.profile()); + List drivers = resp.profile().drivers(); + assertThat(drivers.size(), greaterThanOrEqualTo(3)); // two coordinators and at least one data + remoteOnlyProfiles = drivers.size(); + } } - int limit = between(1, numDocs); - var localQueries = List.of("from events* | LIMIT " + limit, "from * | LIMIT " + limit); - for (String q : localQueries) { - try (EsqlQueryResponse resp = runQuery(q)) { - assertThat(resp.columns(), hasSize(2)); - int rows = 0; - Iterator> values = resp.values(); - while (values.hasNext()) { - values.next(); - ++rows; - } - assertThat(rows, equalTo(limit)); + final int allProfiles; + { + EsqlQueryRequest request = new EsqlQueryRequest(); + request.query("FROM logs*,*:logs* | stats total = sum(v)"); + request.pragmas(pragmas); + request.profile(true); + try (EsqlQueryResponse resp = runQuery(request)) { + List> values = getValuesList(resp); + assertThat(values.get(0), equalTo(List.of(330L))); + assertNotNull(resp.profile()); + List drivers = resp.profile().drivers(); + assertThat(drivers.size(), greaterThanOrEqualTo(4)); // two coordinators and at least two data + allProfiles = drivers.size(); } } + assertThat(allProfiles, equalTo(localOnlyProfiles + remoteOnlyProfiles - 1)); + } + + public void testWarnings() throws Exception { + EsqlQueryRequest request = new EsqlQueryRequest(); + request.query("FROM logs*,*:logs* | EVAL ip = to_ip(id) | STATS total = sum(v) by ip | LIMIT 10"); + PlainActionFuture future = new PlainActionFuture<>(); + InternalTestCluster cluster = cluster(LOCAL_CLUSTER); + String node = randomFrom(cluster.getNodeNames()); + CountDownLatch latch = new CountDownLatch(1); + cluster.client(node).execute(EsqlQueryAction.INSTANCE, request, ActionListener.wrap(resp -> { + TransportService ts = cluster.getInstance(TransportService.class, node); + Map> responseHeaders = ts.getThreadPool().getThreadContext().getResponseHeaders(); + List warnings = responseHeaders.getOrDefault("Warning", List.of()) + .stream() + .filter(w -> w.contains("is not an IP string literal")) + .toList(); + assertThat(warnings.size(), greaterThanOrEqualTo(20)); + List> values = getValuesList(resp); + assertThat(values.get(0).get(0), equalTo(330L)); + assertNull(values.get(0).get(1)); + latch.countDown(); + }, e -> { + latch.countDown(); + throw new AssertionError(e); + })); + assertTrue(latch.await(30, TimeUnit.SECONDS)); } protected EsqlQueryResponse runQuery(String query) { - logger.info("--> query [{}]", query); EsqlQueryRequest request = new EsqlQueryRequest(); request.query(query); request.pragmas(AbstractEsqlIntegTestCase.randomPragmas()); + return runQuery(request); + } + + protected EsqlQueryResponse runQuery(EsqlQueryRequest request) { return client(LOCAL_CLUSTER).execute(EsqlQueryAction.INSTANCE, request).actionGet(30, TimeUnit.SECONDS); } } diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java index 63fbb7176558c..80da888eb4dfb 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java @@ -32,6 +32,7 @@ import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.esql.analysis.VerificationException; import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; +import org.elasticsearch.xpack.esql.parser.ParsingException; import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; import org.junit.Before; @@ -66,6 +67,7 @@ import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -84,6 +86,15 @@ public void setupIndex() { createAndPopulateIndex("test"); } + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + // TODO: Allow relocation once we have retry in ESQL (see #103081) + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal, otherSettings)) + .put("cluster.routing.rebalance.enable", "none") + .build(); + } + public void testProjectConstant() { try (EsqlQueryResponse results = run("from test | eval x = 1 | keep x")) { assertThat(results.columns(), equalTo(List.of(new ColumnInfo("x", "integer")))); @@ -790,7 +801,11 @@ public void testESFilter() throws Exception { long to = randomBoolean() ? Long.MAX_VALUE : randomLongBetween(from, from + 1000); QueryBuilder filter = new RangeQueryBuilder("val").from(from, true).to(to, true); try ( - EsqlQueryResponse results = new EsqlQueryRequestBuilder(client()).query(command).filter(filter).pragmas(randomPragmas()).get() + EsqlQueryResponse results = EsqlQueryRequestBuilder.newSyncEsqlQueryRequestBuilder(client()) + .query(command) + .filter(filter) + .pragmas(randomPragmas()) + .get() ) { logger.info(results); OptionalDouble avg = docs.values().stream().filter(v -> from <= v && v <= to).mapToLong(n -> n).average(); @@ -976,7 +991,27 @@ public void testOverlappingIndexPatterns() throws Exception { .add(new IndexRequest("test_overlapping_index_patterns_2").id("1").source("field", "foo")) .get(); - expectThrows(VerificationException.class, () -> run("from test_overlapping_index_patterns_* | sort field")); + assertVerificationException("from test_overlapping_index_patterns_* | sort field"); + } + + public void testErrorMessageForUnknownColumn() { + var e = assertVerificationException("row a = 1 | eval x = b"); + assertThat(e.getMessage(), containsString("Unknown column [b]")); + } + + // Straightforward verification. Subclasses can override. + protected Exception assertVerificationException(String esqlCommand) { + return expectThrows(VerificationException.class, () -> run(esqlCommand)); + } + + public void testErrorMessageForEmptyParams() { + var e = assertParsingException("row a = 1 | eval x = ?"); + assertThat(e.getMessage(), containsString("Not enough actual parameters 0")); + } + + // Straightforward verification. Subclasses can override. + protected Exception assertParsingException(String esqlCommand) { + return expectThrows(ParsingException.class, () -> run(esqlCommand)); } public void testEmptyIndex() { diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java index e499d3b783bb8..276539a5bbeac 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java @@ -12,45 +12,26 @@ import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksAction; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; -import org.elasticsearch.action.bulk.BulkRequestBuilder; -import org.elasticsearch.action.support.WriteRequest; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.compute.lucene.LuceneSourceOperator; import org.elasticsearch.compute.lucene.ValuesSourceReaderOperator; import org.elasticsearch.compute.operator.DriverStatus; import org.elasticsearch.compute.operator.DriverTaskRunner; import org.elasticsearch.compute.operator.exchange.ExchangeSinkOperator; import org.elasticsearch.compute.operator.exchange.ExchangeSourceOperator; -import org.elasticsearch.index.engine.SegmentsStats; -import org.elasticsearch.index.mapper.OnScriptError; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.ScriptPlugin; -import org.elasticsearch.script.LongFieldScript; -import org.elasticsearch.script.ScriptContext; -import org.elasticsearch.script.ScriptEngine; -import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.test.junit.annotations.TestLogging; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.esql.plugin.QueryPragmas; import org.junit.Before; -import java.io.IOException; import java.util.ArrayList; -import java.util.Collection; import java.util.List; -import java.util.Map; import java.util.Set; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; import static org.elasticsearch.test.MapMatcher.assertMap; import static org.elasticsearch.test.MapMatcher.matchesMap; @@ -71,83 +52,34 @@ value = "org.elasticsearch.xpack.esql:TRACE,org.elasticsearch.compute:TRACE", reason = "These tests were failing frequently, let's learn as much as we can" ) -public class EsqlActionTaskIT extends AbstractEsqlIntegTestCase { - private static int PAGE_SIZE; - private static int NUM_DOCS; +public class EsqlActionTaskIT extends AbstractPausableIntegTestCase { - private static String READ_DESCRIPTION; - private static String MERGE_DESCRIPTION; private static final Logger LOGGER = LogManager.getLogger(EsqlActionTaskIT.class); - @Override - protected Collection> nodePlugins() { - return CollectionUtils.appendToCopy(super.nodePlugins(), PausableFieldPlugin.class); - } + private String READ_DESCRIPTION; + private String MERGE_DESCRIPTION; @Before - public void setupIndex() throws IOException { + public void setup() { assumeTrue("requires query pragmas", canUseQueryPragmas()); - PAGE_SIZE = between(10, 100); - NUM_DOCS = between(4 * PAGE_SIZE, 5 * PAGE_SIZE); READ_DESCRIPTION = """ - \\_LuceneSourceOperator[dataPartitioning = SHARD, maxPageSize = PAGE_SIZE, limit = 2147483647] + \\_LuceneSourceOperator[dataPartitioning = SHARD, maxPageSize = pageSize(), limit = 2147483647] \\_ValuesSourceReaderOperator[fields = [pause_me]] \\_AggregationOperator[mode = INITIAL, aggs = sum of longs] - \\_ExchangeSinkOperator""".replace("PAGE_SIZE", Integer.toString(PAGE_SIZE)); + \\_ExchangeSinkOperator""".replace("pageSize()", Integer.toString(pageSize())); MERGE_DESCRIPTION = """ \\_ExchangeSourceOperator[] \\_AggregationOperator[mode = FINAL, aggs = sum of longs] \\_ProjectOperator[projection = [0]] \\_LimitOperator[limit = 500] \\_OutputOperator[columns = [sum(pause_me)]]"""; - - XContentBuilder mapping = JsonXContent.contentBuilder().startObject(); - mapping.startObject("runtime"); - { - mapping.startObject("pause_me"); - { - mapping.field("type", "long"); - mapping.startObject("script").field("source", "").field("lang", "pause").endObject(); - } - mapping.endObject(); - } - mapping.endObject(); - client().admin() - .indices() - .prepareCreate("test") - .setSettings(Map.of("number_of_shards", 1, "number_of_replicas", 0)) - .setMapping(mapping.endObject()) - .get(); - - BulkRequestBuilder bulk = client().prepareBulk().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); - for (int i = 0; i < NUM_DOCS; i++) { - bulk.add(prepareIndex("test").setId(Integer.toString(i)).setSource("foo", i)); - } - bulk.get(); - /* - * forceMerge so we can be sure that we don't bump into tiny - * segments that finish super quickly and cause us to report strange - * statuses when we expect "starting". - */ - client().admin().indices().prepareForceMerge("test").setMaxNumSegments(1).get(); - /* - * Double super extra paranoid check that force merge worked. It's - * failed to reduce the index to a single segment and caused this test - * to fail in very difficult to debug ways. If it fails again, it'll - * trip here. Or maybe it won't! And we'll learn something. Maybe - * it's ghosts. - */ - SegmentsStats stats = client().admin().indices().prepareStats("test").get().getPrimaries().getSegments(); - if (stats.getCount() != 1L) { - fail(Strings.toString(stats)); - } } public void testTaskContents() throws Exception { ActionFuture response = startEsql(); try { getTasksStarting(); - scriptPermits.release(PAGE_SIZE); + scriptPermits.release(pageSize()); List foundTasks = getTasksRunning(); int luceneSources = 0; int valuesSourceReaders = 0; @@ -158,9 +90,11 @@ public void testTaskContents() throws Exception { assertThat(status.sessionId(), not(emptyOrNullString())); for (DriverStatus.OperatorStatus o : status.activeOperators()) { logger.info("status {}", o); - if (o.operator().startsWith("LuceneSourceOperator[maxPageSize=" + PAGE_SIZE)) { + if (o.operator().startsWith("LuceneSourceOperator[maxPageSize=" + pageSize())) { LuceneSourceOperator.Status oStatus = (LuceneSourceOperator.Status) o.status(); assertThat(oStatus.processedSlices(), lessThanOrEqualTo(oStatus.totalSlices())); + assertThat(oStatus.processedQueries(), equalTo(Set.of("*:*"))); + assertThat(oStatus.processedShards(), equalTo(Set.of("test:0"))); assertThat(oStatus.sliceIndex(), lessThanOrEqualTo(oStatus.totalSlices())); assertThat(oStatus.sliceMin(), greaterThanOrEqualTo(0)); assertThat(oStatus.sliceMax(), greaterThanOrEqualTo(oStatus.sliceMin())); @@ -204,9 +138,9 @@ public void testTaskContents() throws Exception { assertThat(exchangeSinks, greaterThanOrEqualTo(1)); assertThat(exchangeSources, equalTo(1)); } finally { - scriptPermits.release(NUM_DOCS); + scriptPermits.release(numberOfDocs()); try (EsqlQueryResponse esqlResponse = response.get()) { - assertThat(Iterators.flatMap(esqlResponse.values(), i -> i).next(), equalTo((long) NUM_DOCS)); + assertThat(Iterators.flatMap(esqlResponse.values(), i -> i).next(), equalTo((long) numberOfDocs())); } } } @@ -219,7 +153,7 @@ public void testCancelRead() throws Exception { cancelTask(running.taskId()); assertCancelled(response); } finally { - scriptPermits.release(NUM_DOCS); + scriptPermits.release(numberOfDocs()); } } @@ -231,7 +165,7 @@ public void testCancelMerge() throws Exception { cancelTask(running.taskId()); assertCancelled(response); } finally { - scriptPermits.release(NUM_DOCS); + scriptPermits.release(numberOfDocs()); } } @@ -249,7 +183,7 @@ public void testCancelEsqlTask() throws Exception { cancelTask(tasks.get(0).taskId()); assertCancelled(response); } finally { - scriptPermits.release(NUM_DOCS); + scriptPermits.release(numberOfDocs()); } } @@ -261,12 +195,15 @@ private ActionFuture startEsql() { // Force shard partitioning because that's all the tests know how to match. It is easier to reason about too. .put("data_partitioning", "shard") // Limit the page size to something small so we do more than one page worth of work, so we get more status updates. - .put("page_size", PAGE_SIZE) + .put("page_size", pageSize()) // Report the status after every action .put("status_interval", "0ms") .build() ); - return new EsqlQueryRequestBuilder(client()).query("from test | stats sum(pause_me)").pragmas(pragmas).execute(); + return EsqlQueryRequestBuilder.newSyncEsqlQueryRequestBuilder(client()) + .query("from test | stats sum(pause_me)") + .pragmas(pragmas) + .execute(); } private void cancelTask(TaskId taskId) { @@ -274,7 +211,7 @@ private void cancelTask(TaskId taskId) { request.setWaitForCompletion(false); LOGGER.debug("--> cancelling task [{}] without waiting for completion", taskId); client().admin().cluster().execute(CancelTasksAction.INSTANCE, request).actionGet(); - scriptPermits.release(NUM_DOCS); + scriptPermits.release(numberOfDocs()); request = new CancelTasksRequest().setTargetTaskId(taskId).setReason("test cancel"); request.setWaitForCompletion(true); LOGGER.debug("--> cancelling task [{}] with waiting for completion", taskId); @@ -345,7 +282,7 @@ private List getTasksRunning() throws Exception { } private void assertCancelled(ActionFuture response) throws Exception { - Exception e = expectThrows(Exception.class, response::actionGet); + Exception e = expectThrows(Exception.class, response); Throwable cancelException = ExceptionsHelper.unwrap(e, TaskCancelledException.class); assertNotNull(cancelException); /* @@ -367,56 +304,4 @@ private void assertCancelled(ActionFuture response) throws Ex ) ); } - - private static final Semaphore scriptPermits = new Semaphore(0); - - public static class PausableFieldPlugin extends Plugin implements ScriptPlugin { - @Override - public ScriptEngine getScriptEngine(Settings settings, Collection> contexts) { - return new ScriptEngine() { - @Override - public String getType() { - return "pause"; - } - - @Override - @SuppressWarnings("unchecked") - public FactoryType compile( - String name, - String code, - ScriptContext context, - Map params - ) { - return (FactoryType) new LongFieldScript.Factory() { - @Override - public LongFieldScript.LeafFactory newFactory( - String fieldName, - Map params, - SearchLookup searchLookup, - OnScriptError onScriptError - ) { - return ctx -> new LongFieldScript(fieldName, params, searchLookup, onScriptError, ctx) { - @Override - public void execute() { - try { - assertTrue(scriptPermits.tryAcquire(1, TimeUnit.MINUTES)); - } catch (Exception e) { - throw new AssertionError(e); - } - LOGGER.debug("--> emitting value"); - emit(1); - } - }; - } - }; - } - - @Override - public Set> getSupportedContexts() { - return Set.of(LongFieldScript.CONTEXT); - } - }; - } - } - } diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlAsyncActionIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlAsyncActionIT.java new file mode 100644 index 0000000000000..f13321f03f0fe --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlAsyncActionIT.java @@ -0,0 +1,166 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BlockUtils; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; +import org.elasticsearch.xpack.core.async.DeleteAsyncResultRequest; +import org.elasticsearch.xpack.core.async.GetAsyncResultRequest; +import org.elasticsearch.xpack.core.async.TransportDeleteAsyncResultAction; +import org.elasticsearch.xpack.esql.TestBlockFactory; +import org.elasticsearch.xpack.esql.analysis.VerificationException; +import org.elasticsearch.xpack.esql.parser.ParsingException; +import org.elasticsearch.xpack.esql.plugin.QueryPragmas; + +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.core.TimeValue.timeValueSeconds; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.core.IsEqual.equalTo; +import static org.hamcrest.core.IsNot.not; + +/** + * Runs test scenarios from EsqlActionIT, with an extra level of indirection + * through the async query and async get APIs. + */ +public class EsqlAsyncActionIT extends EsqlActionIT { + + @Override + protected Collection> nodePlugins() { + ArrayList> actions = new ArrayList<>(super.nodePlugins()); + actions.add(LocalStateEsqlAsync.class); + return Collections.unmodifiableList(actions); + } + + @Override + protected EsqlQueryResponse run(String esqlCommands, QueryPragmas pragmas, QueryBuilder filter) { + EsqlQueryRequest request = EsqlQueryRequest.asyncEsqlQueryRequest(); + request.query(esqlCommands); + request.pragmas(pragmas); + // deliberately small timeout, to frequently trigger incomplete response + request.waitForCompletionTimeout(TimeValue.timeValueNanos(1)); + request.keepOnCompletion(randomBoolean()); + if (filter != null) { + request.filter(filter); + } + + var response = run(request); + if (response.asyncExecutionId().isPresent()) { + List initialColumns = null; + List initialPages = null; + String id = response.asyncExecutionId().get(); + if (response.isRunning() == false) { + assertThat(request.keepOnCompletion(), is(true)); + assertThat(response.columns(), is(not(empty()))); + assertThat(response.pages(), is(not(empty()))); + initialColumns = List.copyOf(response.columns()); + initialPages = deepCopyOf(response.pages(), TestBlockFactory.getNonBreakingInstance()); + } else { + assertThat(response.columns(), is(empty())); // no partial results + assertThat(response.pages(), is(empty())); + } + response.close(); + var getResponse = getAsyncResponse(id); + + // assert initial contents, if any, are the same as async get contents + if (initialColumns != null) { + assertEquals(initialColumns, getResponse.columns()); + assertEquals(initialPages, getResponse.pages()); + } + + assertDeletable(id); + return getResponse; + } else { + return response; + } + } + + void assertDeletable(String id) { + var resp = deleteAsyncId(id); + assertTrue(resp.isAcknowledged()); + // the stored response should no longer be retrievable + var e = expectThrows(ResourceNotFoundException.class, () -> getAsyncResponse(id)); + assertThat(e.getMessage(), equalTo(id)); + } + + EsqlQueryResponse getAsyncResponse(String id) { + try { + var getResultsRequest = new GetAsyncResultRequest(id).setWaitForCompletionTimeout(timeValueSeconds(60)); + return client().execute(EsqlAsyncGetResultAction.INSTANCE, getResultsRequest).actionGet(30, TimeUnit.SECONDS); + } catch (ElasticsearchTimeoutException e) { + throw new AssertionError("timeout", e); + } + } + + AcknowledgedResponse deleteAsyncId(String id) { + try { + DeleteAsyncResultRequest request = new DeleteAsyncResultRequest(id); + return client().execute(TransportDeleteAsyncResultAction.TYPE, request).actionGet(30, TimeUnit.SECONDS); + } catch (ElasticsearchTimeoutException e) { + throw new AssertionError("timeout", e); + } + } + + // Overridden to allow for not-serializable wrapper. + @Override + protected Exception assertVerificationException(String esqlCommand) { + var e = expectThrowsAnyOf(List.of(NotSerializableExceptionWrapper.class, VerificationException.class), () -> run(esqlCommand)); + if (e instanceof NotSerializableExceptionWrapper wrapper) { + assertThat(wrapper.unwrapCause().getMessage(), containsString("verification_exception")); + } + return e; + } + + // Overridden to allow for not-serializable wrapper. + @Override + protected Exception assertParsingException(String esqlCommand) { + var e = expectThrowsAnyOf(List.of(NotSerializableExceptionWrapper.class, ParsingException.class), () -> run(esqlCommand)); + if (e instanceof NotSerializableExceptionWrapper wrapper) { + assertThat(wrapper.unwrapCause().getMessage(), containsString("parsing_exception")); + } + return e; + } + + public static class LocalStateEsqlAsync extends LocalStateCompositeXPackPlugin { + public LocalStateEsqlAsync(final Settings settings, final Path configPath) { + super(settings, configPath); + } + } + + // -- TODO: eventually remove and use common compute test infra + + public static List deepCopyOf(List pages, BlockFactory blockFactory) { + return pages.stream().map(page -> deepCopyOf(page, blockFactory)).toList(); + } + + public static Page deepCopyOf(Page page, BlockFactory blockFactory) { + Block[] blockCopies = new Block[page.getBlockCount()]; + for (int i = 0; i < blockCopies.length; i++) { + blockCopies[i] = BlockUtils.deepCopyOf(page.getBlock(i), blockFactory); + } + return new Page(blockCopies); + } +} diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/TimeBasedIndicesIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/TimeBasedIndicesIT.java new file mode 100644 index 0000000000000..a1fbee17ef8ec --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/TimeBasedIndicesIT.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.query.RangeQueryBuilder; + +import java.util.List; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.getValuesList; +import static org.hamcrest.Matchers.hasSize; + +public class TimeBasedIndicesIT extends AbstractEsqlIntegTestCase { + + public void testFilter() { + long epoch = System.currentTimeMillis(); + assertAcked(client().admin().indices().prepareCreate("test").setMapping("@timestamp", "type=date", "value", "type=long")); + BulkRequestBuilder bulk = client().prepareBulk("test").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + int oldDocs = between(10, 100); + for (int i = 0; i < oldDocs; i++) { + long timestamp = epoch - TimeValue.timeValueHours(between(1, 2)).millis(); + bulk.add(new IndexRequest().source("@timestamp", timestamp, "value", -i)); + } + int newDocs = between(10, 100); + for (int i = 0; i < newDocs; i++) { + long timestamp = epoch + TimeValue.timeValueHours(between(1, 2)).millis(); + bulk.add(new IndexRequest().source("@timestamp", timestamp, "value", i)); + } + bulk.get(); + { + EsqlQueryRequest request = new EsqlQueryRequest(); + request.query("FROM test | limit 1000"); + request.filter(new RangeQueryBuilder("@timestamp").from(epoch - TimeValue.timeValueHours(3).millis()).to("now")); + try (var resp = run(request)) { + List> values = getValuesList(resp); + assertThat(values, hasSize(oldDocs)); + } + } + { + EsqlQueryRequest request = new EsqlQueryRequest(); + request.query("FROM test | limit 1000"); + request.filter(new RangeQueryBuilder("@timestamp").from("now").to(epoch + TimeValue.timeValueHours(3).millis())); + try (var resp = run(request)) { + List> values = getValuesList(resp); + assertThat(values, hasSize(newDocs)); + } + } + } +} diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/WarningsIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/WarningsIT.java index 12897979a47e0..fb6d23695f837 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/WarningsIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/WarningsIT.java @@ -8,23 +8,21 @@ package org.elasticsearch.xpack.esql.action; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.transport.TransportService; import java.util.List; import java.util.Map; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.greaterThanOrEqualTo; -@TestLogging(value = "org.elasticsearch.xpack.esql:TRACE", reason = "debug") public class WarningsIT extends AbstractEsqlIntegTestCase { - public void testCollectWarnings() { + public void testCollectWarnings() throws Exception { final String node1, node2; if (randomBoolean()) { internalCluster().ensureAtLeastNumDataNodes(2); @@ -64,19 +62,23 @@ public void testCollectWarnings() { EsqlQueryRequest request = new EsqlQueryRequest(); request.query("FROM index-* | EVAL ip = to_ip(host) | STATS s = COUNT(*) by ip | KEEP ip | LIMIT 100"); request.pragmas(randomPragmas()); - PlainActionFuture future = new PlainActionFuture<>(); - client(coordinator.getName()).execute(EsqlQueryAction.INSTANCE, request, ActionListener.runBefore(future, () -> { - var threadpool = internalCluster().getInstance(TransportService.class, coordinator.getName()).getThreadPool(); - Map> responseHeaders = threadpool.getThreadContext().getResponseHeaders(); - List warnings = responseHeaders.getOrDefault("Warning", List.of()) - .stream() - .filter(w -> w.contains("is not an IP string literal")) - .toList(); - int expectedWarnings = Math.min(20, numDocs1 + numDocs2); - // we cap the number of warnings per node - assertThat(warnings.size(), greaterThanOrEqualTo(expectedWarnings)); + CountDownLatch latch = new CountDownLatch(1); + client(coordinator.getName()).execute(EsqlQueryAction.INSTANCE, request, ActionListener.running(() -> { + try { + var threadpool = internalCluster().getInstance(TransportService.class, coordinator.getName()).getThreadPool(); + Map> responseHeaders = threadpool.getThreadContext().getResponseHeaders(); + List warnings = responseHeaders.getOrDefault("Warning", List.of()) + .stream() + .filter(w -> w.contains("is not an IP string literal")) + .toList(); + int expectedWarnings = Math.min(20, numDocs1 + numDocs2); + // we cap the number of warnings per node + assertThat(warnings.size(), greaterThanOrEqualTo(expectedWarnings)); + } finally { + latch.countDown(); + } })); - future.actionGet(30, TimeUnit.SECONDS).close(); + latch.await(30, TimeUnit.SECONDS); } private DiscoveryNode randomDataNode() { diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/CanMatchIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/CanMatchIT.java index 3969190630fd3..0a2e42db4078d 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/CanMatchIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/CanMatchIT.java @@ -205,6 +205,7 @@ public void testAliasFilters() { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103749") public void testFailOnUnavailableShards() throws Exception { internalCluster().ensureAtLeastNumDataNodes(2); String logsOnlyNode = internalCluster().startDataOnlyNode(); diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsGeometriesEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsGeometriesEvaluator.java new file mode 100644 index 0000000000000..025cca53ceab0 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsGeometriesEvaluator.java @@ -0,0 +1,139 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.expression.function.Warnings; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link Equals}. + * This class is generated. Do not edit it. + */ +public final class EqualsGeometriesEvaluator implements EvalOperator.ExpressionEvaluator { + private final Warnings warnings; + + private final EvalOperator.ExpressionEvaluator lhs; + + private final EvalOperator.ExpressionEvaluator rhs; + + private final DriverContext driverContext; + + public EqualsGeometriesEvaluator(Source source, EvalOperator.ExpressionEvaluator lhs, + EvalOperator.ExpressionEvaluator rhs, DriverContext driverContext) { + this.warnings = new Warnings(source); + this.lhs = lhs; + this.rhs = rhs; + this.driverContext = driverContext; + } + + @Override + public Block eval(Page page) { + try (BytesRefBlock lhsBlock = (BytesRefBlock) lhs.eval(page)) { + try (BytesRefBlock rhsBlock = (BytesRefBlock) rhs.eval(page)) { + BytesRefVector lhsVector = lhsBlock.asVector(); + if (lhsVector == null) { + return eval(page.getPositionCount(), lhsBlock, rhsBlock); + } + BytesRefVector rhsVector = rhsBlock.asVector(); + if (rhsVector == null) { + return eval(page.getPositionCount(), lhsBlock, rhsBlock); + } + return eval(page.getPositionCount(), lhsVector, rhsVector).asBlock(); + } + } + } + + public BooleanBlock eval(int positionCount, BytesRefBlock lhsBlock, BytesRefBlock rhsBlock) { + try(BooleanBlock.Builder result = driverContext.blockFactory().newBooleanBlockBuilder(positionCount)) { + BytesRef lhsScratch = new BytesRef(); + BytesRef rhsScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + if (lhsBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (lhsBlock.getValueCount(p) != 1) { + if (lhsBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (rhsBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (rhsBlock.getValueCount(p) != 1) { + if (rhsBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + result.appendBoolean(Equals.processGeometries(lhsBlock.getBytesRef(lhsBlock.getFirstValueIndex(p), lhsScratch), rhsBlock.getBytesRef(rhsBlock.getFirstValueIndex(p), rhsScratch))); + } + return result.build(); + } + } + + public BooleanVector eval(int positionCount, BytesRefVector lhsVector, BytesRefVector rhsVector) { + try(BooleanVector.Builder result = driverContext.blockFactory().newBooleanVectorBuilder(positionCount)) { + BytesRef lhsScratch = new BytesRef(); + BytesRef rhsScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + result.appendBoolean(Equals.processGeometries(lhsVector.getBytesRef(p, lhsScratch), rhsVector.getBytesRef(p, rhsScratch))); + } + return result.build(); + } + } + + @Override + public String toString() { + return "EqualsGeometriesEvaluator[" + "lhs=" + lhs + ", rhs=" + rhs + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(lhs, rhs); + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory lhs; + + private final EvalOperator.ExpressionEvaluator.Factory rhs; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory lhs, + EvalOperator.ExpressionEvaluator.Factory rhs) { + this.source = source; + this.lhs = lhs; + this.rhs = rhs; + } + + @Override + public EqualsGeometriesEvaluator get(DriverContext context) { + return new EqualsGeometriesEvaluator(source, lhs.get(context), rhs.get(context), context); + } + + @Override + public String toString() { + return "EqualsGeometriesEvaluator[" + "lhs=" + lhs + ", rhs=" + rhs + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsGeometriesEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsGeometriesEvaluator.java new file mode 100644 index 0000000000000..d0dd58e86babe --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsGeometriesEvaluator.java @@ -0,0 +1,139 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.expression.function.Warnings; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link NotEquals}. + * This class is generated. Do not edit it. + */ +public final class NotEqualsGeometriesEvaluator implements EvalOperator.ExpressionEvaluator { + private final Warnings warnings; + + private final EvalOperator.ExpressionEvaluator lhs; + + private final EvalOperator.ExpressionEvaluator rhs; + + private final DriverContext driverContext; + + public NotEqualsGeometriesEvaluator(Source source, EvalOperator.ExpressionEvaluator lhs, + EvalOperator.ExpressionEvaluator rhs, DriverContext driverContext) { + this.warnings = new Warnings(source); + this.lhs = lhs; + this.rhs = rhs; + this.driverContext = driverContext; + } + + @Override + public Block eval(Page page) { + try (BytesRefBlock lhsBlock = (BytesRefBlock) lhs.eval(page)) { + try (BytesRefBlock rhsBlock = (BytesRefBlock) rhs.eval(page)) { + BytesRefVector lhsVector = lhsBlock.asVector(); + if (lhsVector == null) { + return eval(page.getPositionCount(), lhsBlock, rhsBlock); + } + BytesRefVector rhsVector = rhsBlock.asVector(); + if (rhsVector == null) { + return eval(page.getPositionCount(), lhsBlock, rhsBlock); + } + return eval(page.getPositionCount(), lhsVector, rhsVector).asBlock(); + } + } + } + + public BooleanBlock eval(int positionCount, BytesRefBlock lhsBlock, BytesRefBlock rhsBlock) { + try(BooleanBlock.Builder result = driverContext.blockFactory().newBooleanBlockBuilder(positionCount)) { + BytesRef lhsScratch = new BytesRef(); + BytesRef rhsScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + if (lhsBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (lhsBlock.getValueCount(p) != 1) { + if (lhsBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (rhsBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (rhsBlock.getValueCount(p) != 1) { + if (rhsBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + result.appendBoolean(NotEquals.processGeometries(lhsBlock.getBytesRef(lhsBlock.getFirstValueIndex(p), lhsScratch), rhsBlock.getBytesRef(rhsBlock.getFirstValueIndex(p), rhsScratch))); + } + return result.build(); + } + } + + public BooleanVector eval(int positionCount, BytesRefVector lhsVector, BytesRefVector rhsVector) { + try(BooleanVector.Builder result = driverContext.blockFactory().newBooleanVectorBuilder(positionCount)) { + BytesRef lhsScratch = new BytesRef(); + BytesRef rhsScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + result.appendBoolean(NotEquals.processGeometries(lhsVector.getBytesRef(p, lhsScratch), rhsVector.getBytesRef(p, rhsScratch))); + } + return result.build(); + } + } + + @Override + public String toString() { + return "NotEqualsGeometriesEvaluator[" + "lhs=" + lhs + ", rhs=" + rhs + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(lhs, rhs); + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory lhs; + + private final EvalOperator.ExpressionEvaluator.Factory rhs; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory lhs, + EvalOperator.ExpressionEvaluator.Factory rhs) { + this.source = source; + this.lhs = lhs; + this.rhs = rhs; + } + + @Override + public NotEqualsGeometriesEvaluator get(DriverContext context) { + return new NotEqualsGeometriesEvaluator(source, lhs.get(context), rhs.get(context), context); + } + + @Override + public String toString() { + return "NotEqualsGeometriesEvaluator[" + "lhs=" + lhs + ", rhs=" + rhs + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPointFromLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPointFromLongEvaluator.java new file mode 100644 index 0000000000000..a88e65a879dd2 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPointFromLongEvaluator.java @@ -0,0 +1,124 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.convert; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Vector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link ToCartesianPoint}. + * This class is generated. Do not edit it. + */ +public final class ToCartesianPointFromLongEvaluator extends AbstractConvertFunction.AbstractEvaluator { + public ToCartesianPointFromLongEvaluator(EvalOperator.ExpressionEvaluator field, Source source, + DriverContext driverContext) { + super(driverContext, field, source); + } + + @Override + public String name() { + return "ToCartesianPointFromLong"; + } + + @Override + public Block evalVector(Vector v) { + LongVector vector = (LongVector) v; + int positionCount = v.getPositionCount(); + if (vector.isConstant()) { + try { + return driverContext.blockFactory().newConstantBytesRefBlockWith(evalValue(vector, 0), positionCount); + } catch (IllegalArgumentException e) { + registerException(e); + return driverContext.blockFactory().newConstantNullBlock(positionCount); + } + } + try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + try { + builder.appendBytesRef(evalValue(vector, p)); + } catch (IllegalArgumentException e) { + registerException(e); + builder.appendNull(); + } + } + return builder.build(); + } + } + + private static BytesRef evalValue(LongVector container, int index) { + long value = container.getLong(index); + return ToCartesianPoint.fromLong(value); + } + + @Override + public Block evalBlock(Block b) { + LongBlock block = (LongBlock) b; + int positionCount = block.getPositionCount(); + try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = block.getValueCount(p); + int start = block.getFirstValueIndex(p); + int end = start + valueCount; + boolean positionOpened = false; + boolean valuesAppended = false; + for (int i = start; i < end; i++) { + try { + BytesRef value = evalValue(block, i); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; + } + builder.appendBytesRef(value); + valuesAppended = true; + } catch (IllegalArgumentException e) { + registerException(e); + } + } + if (valuesAppended == false) { + builder.appendNull(); + } else if (positionOpened) { + builder.endPositionEntry(); + } + } + return builder.build(); + } + } + + private static BytesRef evalValue(LongBlock container, int index) { + long value = container.getLong(index); + return ToCartesianPoint.fromLong(value); + } + + public static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + public Factory(EvalOperator.ExpressionEvaluator.Factory field, Source source) { + this.field = field; + this.source = source; + } + + @Override + public ToCartesianPointFromLongEvaluator get(DriverContext context) { + return new ToCartesianPointFromLongEvaluator(field.get(context), source, context); + } + + @Override + public String toString() { + return "ToCartesianPointFromLongEvaluator[field=" + field + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPointFromStringEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPointFromStringEvaluator.java index 7fffc3845b0e9..ee5159be521d6 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPointFromStringEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPointFromStringEvaluator.java @@ -11,7 +11,6 @@ import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.BytesRefVector; -import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.Vector; import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.EvalOperator; @@ -39,16 +38,16 @@ public Block evalVector(Vector v) { BytesRef scratchPad = new BytesRef(); if (vector.isConstant()) { try { - return driverContext.blockFactory().newConstantLongBlockWith(evalValue(vector, 0, scratchPad), positionCount); + return driverContext.blockFactory().newConstantBytesRefBlockWith(evalValue(vector, 0, scratchPad), positionCount); } catch (IllegalArgumentException e) { registerException(e); return driverContext.blockFactory().newConstantNullBlock(positionCount); } } - try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { + try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { for (int p = 0; p < positionCount; p++) { try { - builder.appendLong(evalValue(vector, p, scratchPad)); + builder.appendBytesRef(evalValue(vector, p, scratchPad)); } catch (IllegalArgumentException e) { registerException(e); builder.appendNull(); @@ -58,7 +57,7 @@ public Block evalVector(Vector v) { } } - private static long evalValue(BytesRefVector container, int index, BytesRef scratchPad) { + private static BytesRef evalValue(BytesRefVector container, int index, BytesRef scratchPad) { BytesRef value = container.getBytesRef(index, scratchPad); return ToCartesianPoint.fromKeyword(value); } @@ -67,7 +66,7 @@ private static long evalValue(BytesRefVector container, int index, BytesRef scra public Block evalBlock(Block b) { BytesRefBlock block = (BytesRefBlock) b; int positionCount = block.getPositionCount(); - try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { + try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { BytesRef scratchPad = new BytesRef(); for (int p = 0; p < positionCount; p++) { int valueCount = block.getValueCount(p); @@ -77,12 +76,12 @@ public Block evalBlock(Block b) { boolean valuesAppended = false; for (int i = start; i < end; i++) { try { - long value = evalValue(block, i, scratchPad); + BytesRef value = evalValue(block, i, scratchPad); if (positionOpened == false && valueCount > 1) { builder.beginPositionEntry(); positionOpened = true; } - builder.appendLong(value); + builder.appendBytesRef(value); valuesAppended = true; } catch (IllegalArgumentException e) { registerException(e); @@ -98,7 +97,7 @@ public Block evalBlock(Block b) { } } - private static long evalValue(BytesRefBlock container, int index, BytesRef scratchPad) { + private static BytesRef evalValue(BytesRefBlock container, int index, BytesRef scratchPad) { BytesRef value = container.getBytesRef(index, scratchPad); return ToCartesianPoint.fromKeyword(value); } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPointFromLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPointFromLongEvaluator.java new file mode 100644 index 0000000000000..32766ff32c14f --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPointFromLongEvaluator.java @@ -0,0 +1,124 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.convert; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Vector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link ToGeoPoint}. + * This class is generated. Do not edit it. + */ +public final class ToGeoPointFromLongEvaluator extends AbstractConvertFunction.AbstractEvaluator { + public ToGeoPointFromLongEvaluator(EvalOperator.ExpressionEvaluator field, Source source, + DriverContext driverContext) { + super(driverContext, field, source); + } + + @Override + public String name() { + return "ToGeoPointFromLong"; + } + + @Override + public Block evalVector(Vector v) { + LongVector vector = (LongVector) v; + int positionCount = v.getPositionCount(); + if (vector.isConstant()) { + try { + return driverContext.blockFactory().newConstantBytesRefBlockWith(evalValue(vector, 0), positionCount); + } catch (IllegalArgumentException e) { + registerException(e); + return driverContext.blockFactory().newConstantNullBlock(positionCount); + } + } + try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + try { + builder.appendBytesRef(evalValue(vector, p)); + } catch (IllegalArgumentException e) { + registerException(e); + builder.appendNull(); + } + } + return builder.build(); + } + } + + private static BytesRef evalValue(LongVector container, int index) { + long value = container.getLong(index); + return ToGeoPoint.fromLong(value); + } + + @Override + public Block evalBlock(Block b) { + LongBlock block = (LongBlock) b; + int positionCount = block.getPositionCount(); + try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = block.getValueCount(p); + int start = block.getFirstValueIndex(p); + int end = start + valueCount; + boolean positionOpened = false; + boolean valuesAppended = false; + for (int i = start; i < end; i++) { + try { + BytesRef value = evalValue(block, i); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; + } + builder.appendBytesRef(value); + valuesAppended = true; + } catch (IllegalArgumentException e) { + registerException(e); + } + } + if (valuesAppended == false) { + builder.appendNull(); + } else if (positionOpened) { + builder.endPositionEntry(); + } + } + return builder.build(); + } + } + + private static BytesRef evalValue(LongBlock container, int index) { + long value = container.getLong(index); + return ToGeoPoint.fromLong(value); + } + + public static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + public Factory(EvalOperator.ExpressionEvaluator.Factory field, Source source) { + this.field = field; + this.source = source; + } + + @Override + public ToGeoPointFromLongEvaluator get(DriverContext context) { + return new ToGeoPointFromLongEvaluator(field.get(context), source, context); + } + + @Override + public String toString() { + return "ToGeoPointFromLongEvaluator[field=" + field + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPointFromStringEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPointFromStringEvaluator.java index de4e66f55ae66..7ef047655b49e 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPointFromStringEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPointFromStringEvaluator.java @@ -11,7 +11,6 @@ import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.BytesRefVector; -import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.Vector; import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.EvalOperator; @@ -39,16 +38,16 @@ public Block evalVector(Vector v) { BytesRef scratchPad = new BytesRef(); if (vector.isConstant()) { try { - return driverContext.blockFactory().newConstantLongBlockWith(evalValue(vector, 0, scratchPad), positionCount); + return driverContext.blockFactory().newConstantBytesRefBlockWith(evalValue(vector, 0, scratchPad), positionCount); } catch (IllegalArgumentException e) { registerException(e); return driverContext.blockFactory().newConstantNullBlock(positionCount); } } - try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { + try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { for (int p = 0; p < positionCount; p++) { try { - builder.appendLong(evalValue(vector, p, scratchPad)); + builder.appendBytesRef(evalValue(vector, p, scratchPad)); } catch (IllegalArgumentException e) { registerException(e); builder.appendNull(); @@ -58,7 +57,7 @@ public Block evalVector(Vector v) { } } - private static long evalValue(BytesRefVector container, int index, BytesRef scratchPad) { + private static BytesRef evalValue(BytesRefVector container, int index, BytesRef scratchPad) { BytesRef value = container.getBytesRef(index, scratchPad); return ToGeoPoint.fromKeyword(value); } @@ -67,7 +66,7 @@ private static long evalValue(BytesRefVector container, int index, BytesRef scra public Block evalBlock(Block b) { BytesRefBlock block = (BytesRefBlock) b; int positionCount = block.getPositionCount(); - try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { + try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { BytesRef scratchPad = new BytesRef(); for (int p = 0; p < positionCount; p++) { int valueCount = block.getValueCount(p); @@ -77,12 +76,12 @@ public Block evalBlock(Block b) { boolean valuesAppended = false; for (int i = start; i < end; i++) { try { - long value = evalValue(block, i, scratchPad); + BytesRef value = evalValue(block, i, scratchPad); if (positionOpened == false && valueCount > 1) { builder.beginPositionEntry(); positionOpened = true; } - builder.appendLong(value); + builder.appendBytesRef(value); valuesAppended = true; } catch (IllegalArgumentException e) { registerException(e); @@ -98,7 +97,7 @@ public Block evalBlock(Block b) { } } - private static long evalValue(BytesRefBlock container, int index, BytesRef scratchPad) { + private static BytesRef evalValue(BytesRefBlock container, int index, BytesRef scratchPad) { BytesRef value = container.getBytesRef(index, scratchPad); return ToGeoPoint.fromKeyword(value); } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongFromCartesianPointEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongFromCartesianPointEvaluator.java new file mode 100644 index 0000000000000..5f424bc4e568b --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongFromCartesianPointEvaluator.java @@ -0,0 +1,111 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.convert; + +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.Vector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link ToLong}. + * This class is generated. Do not edit it. + */ +public final class ToLongFromCartesianPointEvaluator extends AbstractConvertFunction.AbstractEvaluator { + public ToLongFromCartesianPointEvaluator(EvalOperator.ExpressionEvaluator field, Source source, + DriverContext driverContext) { + super(driverContext, field, source); + } + + @Override + public String name() { + return "ToLongFromCartesianPoint"; + } + + @Override + public Block evalVector(Vector v) { + BytesRefVector vector = (BytesRefVector) v; + int positionCount = v.getPositionCount(); + BytesRef scratchPad = new BytesRef(); + if (vector.isConstant()) { + return driverContext.blockFactory().newConstantLongBlockWith(evalValue(vector, 0, scratchPad), positionCount); + } + try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + builder.appendLong(evalValue(vector, p, scratchPad)); + } + return builder.build(); + } + } + + private static long evalValue(BytesRefVector container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return ToLong.fromCartesianPoint(value); + } + + @Override + public Block evalBlock(Block b) { + BytesRefBlock block = (BytesRefBlock) b; + int positionCount = block.getPositionCount(); + try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { + BytesRef scratchPad = new BytesRef(); + for (int p = 0; p < positionCount; p++) { + int valueCount = block.getValueCount(p); + int start = block.getFirstValueIndex(p); + int end = start + valueCount; + boolean positionOpened = false; + boolean valuesAppended = false; + for (int i = start; i < end; i++) { + long value = evalValue(block, i, scratchPad); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; + } + builder.appendLong(value); + valuesAppended = true; + } + if (valuesAppended == false) { + builder.appendNull(); + } else if (positionOpened) { + builder.endPositionEntry(); + } + } + return builder.build(); + } + } + + private static long evalValue(BytesRefBlock container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return ToLong.fromCartesianPoint(value); + } + + public static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + public Factory(EvalOperator.ExpressionEvaluator.Factory field, Source source) { + this.field = field; + this.source = source; + } + + @Override + public ToLongFromCartesianPointEvaluator get(DriverContext context) { + return new ToLongFromCartesianPointEvaluator(field.get(context), source, context); + } + + @Override + public String toString() { + return "ToLongFromCartesianPointEvaluator[field=" + field + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongFromDoubleEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongFromDoubleEvaluator.java index b8b86f1d6cbf1..03daa257e5af2 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongFromDoubleEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongFromDoubleEvaluator.java @@ -14,7 +14,6 @@ import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.xpack.ql.InvalidArgumentException; -import org.elasticsearch.xpack.ql.QlIllegalArgumentException; import org.elasticsearch.xpack.ql.tree.Source; /** @@ -39,7 +38,7 @@ public Block evalVector(Vector v) { if (vector.isConstant()) { try { return driverContext.blockFactory().newConstantLongBlockWith(evalValue(vector, 0), positionCount); - } catch (InvalidArgumentException | QlIllegalArgumentException e) { + } catch (InvalidArgumentException e) { registerException(e); return driverContext.blockFactory().newConstantNullBlock(positionCount); } @@ -48,7 +47,7 @@ public Block evalVector(Vector v) { for (int p = 0; p < positionCount; p++) { try { builder.appendLong(evalValue(vector, p)); - } catch (InvalidArgumentException | QlIllegalArgumentException e) { + } catch (InvalidArgumentException e) { registerException(e); builder.appendNull(); } @@ -82,7 +81,7 @@ public Block evalBlock(Block b) { } builder.appendLong(value); valuesAppended = true; - } catch (InvalidArgumentException | QlIllegalArgumentException e) { + } catch (InvalidArgumentException e) { registerException(e); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongFromGeoPointEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongFromGeoPointEvaluator.java new file mode 100644 index 0000000000000..e85f2191023fe --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongFromGeoPointEvaluator.java @@ -0,0 +1,111 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.convert; + +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.Vector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link ToLong}. + * This class is generated. Do not edit it. + */ +public final class ToLongFromGeoPointEvaluator extends AbstractConvertFunction.AbstractEvaluator { + public ToLongFromGeoPointEvaluator(EvalOperator.ExpressionEvaluator field, Source source, + DriverContext driverContext) { + super(driverContext, field, source); + } + + @Override + public String name() { + return "ToLongFromGeoPoint"; + } + + @Override + public Block evalVector(Vector v) { + BytesRefVector vector = (BytesRefVector) v; + int positionCount = v.getPositionCount(); + BytesRef scratchPad = new BytesRef(); + if (vector.isConstant()) { + return driverContext.blockFactory().newConstantLongBlockWith(evalValue(vector, 0, scratchPad), positionCount); + } + try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + builder.appendLong(evalValue(vector, p, scratchPad)); + } + return builder.build(); + } + } + + private static long evalValue(BytesRefVector container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return ToLong.fromGeoPoint(value); + } + + @Override + public Block evalBlock(Block b) { + BytesRefBlock block = (BytesRefBlock) b; + int positionCount = block.getPositionCount(); + try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { + BytesRef scratchPad = new BytesRef(); + for (int p = 0; p < positionCount; p++) { + int valueCount = block.getValueCount(p); + int start = block.getFirstValueIndex(p); + int end = start + valueCount; + boolean positionOpened = false; + boolean valuesAppended = false; + for (int i = start; i < end; i++) { + long value = evalValue(block, i, scratchPad); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; + } + builder.appendLong(value); + valuesAppended = true; + } + if (valuesAppended == false) { + builder.appendNull(); + } else if (positionOpened) { + builder.endPositionEntry(); + } + } + return builder.build(); + } + } + + private static long evalValue(BytesRefBlock container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return ToLong.fromGeoPoint(value); + } + + public static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + public Factory(EvalOperator.ExpressionEvaluator.Factory field, Source source) { + this.field = field; + this.source = source; + } + + @Override + public ToLongFromGeoPointEvaluator get(DriverContext context) { + return new ToLongFromGeoPointEvaluator(field.get(context), source, context); + } + + @Override + public String toString() { + return "ToLongFromGeoPointEvaluator[field=" + field + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongFromUnsignedLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongFromUnsignedLongEvaluator.java index 41f8980581073..b5999d1a4e1ab 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongFromUnsignedLongEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongFromUnsignedLongEvaluator.java @@ -13,7 +13,6 @@ import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.xpack.ql.InvalidArgumentException; -import org.elasticsearch.xpack.ql.QlIllegalArgumentException; import org.elasticsearch.xpack.ql.tree.Source; /** @@ -38,7 +37,7 @@ public Block evalVector(Vector v) { if (vector.isConstant()) { try { return driverContext.blockFactory().newConstantLongBlockWith(evalValue(vector, 0), positionCount); - } catch (InvalidArgumentException | QlIllegalArgumentException e) { + } catch (InvalidArgumentException e) { registerException(e); return driverContext.blockFactory().newConstantNullBlock(positionCount); } @@ -47,7 +46,7 @@ public Block evalVector(Vector v) { for (int p = 0; p < positionCount; p++) { try { builder.appendLong(evalValue(vector, p)); - } catch (InvalidArgumentException | QlIllegalArgumentException e) { + } catch (InvalidArgumentException e) { registerException(e); builder.appendNull(); } @@ -81,7 +80,7 @@ public Block evalBlock(Block b) { } builder.appendLong(value); valuesAppended = true; - } catch (InvalidArgumentException | QlIllegalArgumentException e) { + } catch (InvalidArgumentException e) { registerException(e); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromCartesianPointEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromCartesianPointEvaluator.java index 58544ddccb682..b15f77608598d 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromCartesianPointEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromCartesianPointEvaluator.java @@ -9,8 +9,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BytesRefBlock; -import org.elasticsearch.compute.data.LongBlock; -import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.BytesRefVector; import org.elasticsearch.compute.data.Vector; import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.EvalOperator; @@ -33,29 +32,31 @@ public String name() { @Override public Block evalVector(Vector v) { - LongVector vector = (LongVector) v; + BytesRefVector vector = (BytesRefVector) v; int positionCount = v.getPositionCount(); + BytesRef scratchPad = new BytesRef(); if (vector.isConstant()) { - return driverContext.blockFactory().newConstantBytesRefBlockWith(evalValue(vector, 0), positionCount); + return driverContext.blockFactory().newConstantBytesRefBlockWith(evalValue(vector, 0, scratchPad), positionCount); } try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { for (int p = 0; p < positionCount; p++) { - builder.appendBytesRef(evalValue(vector, p)); + builder.appendBytesRef(evalValue(vector, p, scratchPad)); } return builder.build(); } } - private static BytesRef evalValue(LongVector container, int index) { - long value = container.getLong(index); + private static BytesRef evalValue(BytesRefVector container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); return ToString.fromCartesianPoint(value); } @Override public Block evalBlock(Block b) { - LongBlock block = (LongBlock) b; + BytesRefBlock block = (BytesRefBlock) b; int positionCount = block.getPositionCount(); try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + BytesRef scratchPad = new BytesRef(); for (int p = 0; p < positionCount; p++) { int valueCount = block.getValueCount(p); int start = block.getFirstValueIndex(p); @@ -63,7 +64,7 @@ public Block evalBlock(Block b) { boolean positionOpened = false; boolean valuesAppended = false; for (int i = start; i < end; i++) { - BytesRef value = evalValue(block, i); + BytesRef value = evalValue(block, i, scratchPad); if (positionOpened == false && valueCount > 1) { builder.beginPositionEntry(); positionOpened = true; @@ -81,8 +82,8 @@ public Block evalBlock(Block b) { } } - private static BytesRef evalValue(LongBlock container, int index) { - long value = container.getLong(index); + private static BytesRef evalValue(BytesRefBlock container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); return ToString.fromCartesianPoint(value); } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromGeoPointEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromGeoPointEvaluator.java index 9d2b514ff2482..32fe16075e046 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromGeoPointEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromGeoPointEvaluator.java @@ -9,8 +9,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BytesRefBlock; -import org.elasticsearch.compute.data.LongBlock; -import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.BytesRefVector; import org.elasticsearch.compute.data.Vector; import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.EvalOperator; @@ -33,29 +32,31 @@ public String name() { @Override public Block evalVector(Vector v) { - LongVector vector = (LongVector) v; + BytesRefVector vector = (BytesRefVector) v; int positionCount = v.getPositionCount(); + BytesRef scratchPad = new BytesRef(); if (vector.isConstant()) { - return driverContext.blockFactory().newConstantBytesRefBlockWith(evalValue(vector, 0), positionCount); + return driverContext.blockFactory().newConstantBytesRefBlockWith(evalValue(vector, 0, scratchPad), positionCount); } try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { for (int p = 0; p < positionCount; p++) { - builder.appendBytesRef(evalValue(vector, p)); + builder.appendBytesRef(evalValue(vector, p, scratchPad)); } return builder.build(); } } - private static BytesRef evalValue(LongVector container, int index) { - long value = container.getLong(index); + private static BytesRef evalValue(BytesRefVector container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); return ToString.fromGeoPoint(value); } @Override public Block evalBlock(Block b) { - LongBlock block = (LongBlock) b; + BytesRefBlock block = (BytesRefBlock) b; int positionCount = block.getPositionCount(); try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + BytesRef scratchPad = new BytesRef(); for (int p = 0; p < positionCount; p++) { int valueCount = block.getValueCount(p); int start = block.getFirstValueIndex(p); @@ -63,7 +64,7 @@ public Block evalBlock(Block b) { boolean positionOpened = false; boolean valuesAppended = false; for (int i = start; i < end; i++) { - BytesRef value = evalValue(block, i); + BytesRef value = evalValue(block, i, scratchPad); if (positionOpened == false && valueCount > 1) { builder.beginPositionEntry(); positionOpened = true; @@ -81,8 +82,8 @@ public Block evalBlock(Block b) { } } - private static BytesRef evalValue(LongBlock container, int index) { - long value = container.getLong(index); + private static BytesRef evalValue(BytesRefBlock container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); return ToString.fromGeoPoint(value); } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongFromDoubleEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongFromDoubleEvaluator.java index 6d57bbd978370..6a45dcf907889 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongFromDoubleEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongFromDoubleEvaluator.java @@ -14,7 +14,6 @@ import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.xpack.ql.InvalidArgumentException; -import org.elasticsearch.xpack.ql.QlIllegalArgumentException; import org.elasticsearch.xpack.ql.tree.Source; /** @@ -39,7 +38,7 @@ public Block evalVector(Vector v) { if (vector.isConstant()) { try { return driverContext.blockFactory().newConstantLongBlockWith(evalValue(vector, 0), positionCount); - } catch (InvalidArgumentException | QlIllegalArgumentException e) { + } catch (InvalidArgumentException e) { registerException(e); return driverContext.blockFactory().newConstantNullBlock(positionCount); } @@ -48,7 +47,7 @@ public Block evalVector(Vector v) { for (int p = 0; p < positionCount; p++) { try { builder.appendLong(evalValue(vector, p)); - } catch (InvalidArgumentException | QlIllegalArgumentException e) { + } catch (InvalidArgumentException e) { registerException(e); builder.appendNull(); } @@ -82,7 +81,7 @@ public Block evalBlock(Block b) { } builder.appendLong(value); valuesAppended = true; - } catch (InvalidArgumentException | QlIllegalArgumentException e) { + } catch (InvalidArgumentException e) { registerException(e); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateDiffConstantEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateDiffConstantEvaluator.java new file mode 100644 index 0000000000000..3cb41d0028d54 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateDiffConstantEvaluator.java @@ -0,0 +1,154 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.date; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.expression.function.Warnings; +import org.elasticsearch.xpack.ql.InvalidArgumentException; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link DateDiff}. + * This class is generated. Do not edit it. + */ +public final class DateDiffConstantEvaluator implements EvalOperator.ExpressionEvaluator { + private final Warnings warnings; + + private final DateDiff.Part datePartFieldUnit; + + private final EvalOperator.ExpressionEvaluator startTimestamp; + + private final EvalOperator.ExpressionEvaluator endTimestamp; + + private final DriverContext driverContext; + + public DateDiffConstantEvaluator(Source source, DateDiff.Part datePartFieldUnit, + EvalOperator.ExpressionEvaluator startTimestamp, + EvalOperator.ExpressionEvaluator endTimestamp, DriverContext driverContext) { + this.warnings = new Warnings(source); + this.datePartFieldUnit = datePartFieldUnit; + this.startTimestamp = startTimestamp; + this.endTimestamp = endTimestamp; + this.driverContext = driverContext; + } + + @Override + public Block eval(Page page) { + try (LongBlock startTimestampBlock = (LongBlock) startTimestamp.eval(page)) { + try (LongBlock endTimestampBlock = (LongBlock) endTimestamp.eval(page)) { + LongVector startTimestampVector = startTimestampBlock.asVector(); + if (startTimestampVector == null) { + return eval(page.getPositionCount(), startTimestampBlock, endTimestampBlock); + } + LongVector endTimestampVector = endTimestampBlock.asVector(); + if (endTimestampVector == null) { + return eval(page.getPositionCount(), startTimestampBlock, endTimestampBlock); + } + return eval(page.getPositionCount(), startTimestampVector, endTimestampVector); + } + } + } + + public IntBlock eval(int positionCount, LongBlock startTimestampBlock, + LongBlock endTimestampBlock) { + try(IntBlock.Builder result = driverContext.blockFactory().newIntBlockBuilder(positionCount)) { + position: for (int p = 0; p < positionCount; p++) { + if (startTimestampBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (startTimestampBlock.getValueCount(p) != 1) { + if (startTimestampBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (endTimestampBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (endTimestampBlock.getValueCount(p) != 1) { + if (endTimestampBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + try { + result.appendInt(DateDiff.process(datePartFieldUnit, startTimestampBlock.getLong(startTimestampBlock.getFirstValueIndex(p)), endTimestampBlock.getLong(endTimestampBlock.getFirstValueIndex(p)))); + } catch (IllegalArgumentException | InvalidArgumentException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + public IntBlock eval(int positionCount, LongVector startTimestampVector, + LongVector endTimestampVector) { + try(IntBlock.Builder result = driverContext.blockFactory().newIntBlockBuilder(positionCount)) { + position: for (int p = 0; p < positionCount; p++) { + try { + result.appendInt(DateDiff.process(datePartFieldUnit, startTimestampVector.getLong(p), endTimestampVector.getLong(p))); + } catch (IllegalArgumentException | InvalidArgumentException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + @Override + public String toString() { + return "DateDiffConstantEvaluator[" + "datePartFieldUnit=" + datePartFieldUnit + ", startTimestamp=" + startTimestamp + ", endTimestamp=" + endTimestamp + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(startTimestamp, endTimestamp); + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final DateDiff.Part datePartFieldUnit; + + private final EvalOperator.ExpressionEvaluator.Factory startTimestamp; + + private final EvalOperator.ExpressionEvaluator.Factory endTimestamp; + + public Factory(Source source, DateDiff.Part datePartFieldUnit, + EvalOperator.ExpressionEvaluator.Factory startTimestamp, + EvalOperator.ExpressionEvaluator.Factory endTimestamp) { + this.source = source; + this.datePartFieldUnit = datePartFieldUnit; + this.startTimestamp = startTimestamp; + this.endTimestamp = endTimestamp; + } + + @Override + public DateDiffConstantEvaluator get(DriverContext context) { + return new DateDiffConstantEvaluator(source, datePartFieldUnit, startTimestamp.get(context), endTimestamp.get(context), context); + } + + @Override + public String toString() { + return "DateDiffConstantEvaluator[" + "datePartFieldUnit=" + datePartFieldUnit + ", startTimestamp=" + startTimestamp + ", endTimestamp=" + endTimestamp + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateDiffEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateDiffEvaluator.java new file mode 100644 index 0000000000000..952a819a014a9 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateDiffEvaluator.java @@ -0,0 +1,176 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.date; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.expression.function.Warnings; +import org.elasticsearch.xpack.ql.InvalidArgumentException; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link DateDiff}. + * This class is generated. Do not edit it. + */ +public final class DateDiffEvaluator implements EvalOperator.ExpressionEvaluator { + private final Warnings warnings; + + private final EvalOperator.ExpressionEvaluator unit; + + private final EvalOperator.ExpressionEvaluator startTimestamp; + + private final EvalOperator.ExpressionEvaluator endTimestamp; + + private final DriverContext driverContext; + + public DateDiffEvaluator(Source source, EvalOperator.ExpressionEvaluator unit, + EvalOperator.ExpressionEvaluator startTimestamp, + EvalOperator.ExpressionEvaluator endTimestamp, DriverContext driverContext) { + this.warnings = new Warnings(source); + this.unit = unit; + this.startTimestamp = startTimestamp; + this.endTimestamp = endTimestamp; + this.driverContext = driverContext; + } + + @Override + public Block eval(Page page) { + try (BytesRefBlock unitBlock = (BytesRefBlock) unit.eval(page)) { + try (LongBlock startTimestampBlock = (LongBlock) startTimestamp.eval(page)) { + try (LongBlock endTimestampBlock = (LongBlock) endTimestamp.eval(page)) { + BytesRefVector unitVector = unitBlock.asVector(); + if (unitVector == null) { + return eval(page.getPositionCount(), unitBlock, startTimestampBlock, endTimestampBlock); + } + LongVector startTimestampVector = startTimestampBlock.asVector(); + if (startTimestampVector == null) { + return eval(page.getPositionCount(), unitBlock, startTimestampBlock, endTimestampBlock); + } + LongVector endTimestampVector = endTimestampBlock.asVector(); + if (endTimestampVector == null) { + return eval(page.getPositionCount(), unitBlock, startTimestampBlock, endTimestampBlock); + } + return eval(page.getPositionCount(), unitVector, startTimestampVector, endTimestampVector); + } + } + } + } + + public IntBlock eval(int positionCount, BytesRefBlock unitBlock, LongBlock startTimestampBlock, + LongBlock endTimestampBlock) { + try(IntBlock.Builder result = driverContext.blockFactory().newIntBlockBuilder(positionCount)) { + BytesRef unitScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + if (unitBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (unitBlock.getValueCount(p) != 1) { + if (unitBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (startTimestampBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (startTimestampBlock.getValueCount(p) != 1) { + if (startTimestampBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (endTimestampBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (endTimestampBlock.getValueCount(p) != 1) { + if (endTimestampBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + try { + result.appendInt(DateDiff.process(unitBlock.getBytesRef(unitBlock.getFirstValueIndex(p), unitScratch), startTimestampBlock.getLong(startTimestampBlock.getFirstValueIndex(p)), endTimestampBlock.getLong(endTimestampBlock.getFirstValueIndex(p)))); + } catch (IllegalArgumentException | InvalidArgumentException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + public IntBlock eval(int positionCount, BytesRefVector unitVector, + LongVector startTimestampVector, LongVector endTimestampVector) { + try(IntBlock.Builder result = driverContext.blockFactory().newIntBlockBuilder(positionCount)) { + BytesRef unitScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + try { + result.appendInt(DateDiff.process(unitVector.getBytesRef(p, unitScratch), startTimestampVector.getLong(p), endTimestampVector.getLong(p))); + } catch (IllegalArgumentException | InvalidArgumentException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + @Override + public String toString() { + return "DateDiffEvaluator[" + "unit=" + unit + ", startTimestamp=" + startTimestamp + ", endTimestamp=" + endTimestamp + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(unit, startTimestamp, endTimestamp); + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory unit; + + private final EvalOperator.ExpressionEvaluator.Factory startTimestamp; + + private final EvalOperator.ExpressionEvaluator.Factory endTimestamp; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory unit, + EvalOperator.ExpressionEvaluator.Factory startTimestamp, + EvalOperator.ExpressionEvaluator.Factory endTimestamp) { + this.source = source; + this.unit = unit; + this.startTimestamp = startTimestamp; + this.endTimestamp = endTimestamp; + } + + @Override + public DateDiffEvaluator get(DriverContext context) { + return new DateDiffEvaluator(source, unit.get(context), startTimestamp.get(context), endTimestamp.get(context), context); + } + + @Override + public String toString() { + return "DateDiffEvaluator[" + "unit=" + unit + ", startTimestamp=" + startTimestamp + ", endTimestamp=" + endTimestamp + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgDoubleEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgDoubleEvaluator.java index 6a9278efd2f6a..d87444746d2c6 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgDoubleEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgDoubleEvaluator.java @@ -18,11 +18,8 @@ * This class is generated. Do not edit it. */ public final class MvAvgDoubleEvaluator extends AbstractMultivalueFunction.AbstractEvaluator { - private final DriverContext driverContext; - public MvAvgDoubleEvaluator(EvalOperator.ExpressionEvaluator field, DriverContext driverContext) { - super(field); - this.driverContext = driverContext; + super(driverContext, field); } @Override diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgIntEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgIntEvaluator.java index 8f2abc5e759b4..63e6b4eb12106 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgIntEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgIntEvaluator.java @@ -19,11 +19,8 @@ * This class is generated. Do not edit it. */ public final class MvAvgIntEvaluator extends AbstractMultivalueFunction.AbstractEvaluator { - private final DriverContext driverContext; - public MvAvgIntEvaluator(EvalOperator.ExpressionEvaluator field, DriverContext driverContext) { - super(field); - this.driverContext = driverContext; + super(driverContext, field); } @Override diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgLongEvaluator.java index b01424846c4a7..d699070747b49 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgLongEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgLongEvaluator.java @@ -19,11 +19,8 @@ * This class is generated. Do not edit it. */ public final class MvAvgLongEvaluator extends AbstractMultivalueFunction.AbstractEvaluator { - private final DriverContext driverContext; - public MvAvgLongEvaluator(EvalOperator.ExpressionEvaluator field, DriverContext driverContext) { - super(field); - this.driverContext = driverContext; + super(driverContext, field); } @Override diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgUnsignedLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgUnsignedLongEvaluator.java index 41e18cf1424a3..5c63508fa3560 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgUnsignedLongEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgUnsignedLongEvaluator.java @@ -19,12 +19,9 @@ * This class is generated. Do not edit it. */ public final class MvAvgUnsignedLongEvaluator extends AbstractMultivalueFunction.AbstractEvaluator { - private final DriverContext driverContext; - public MvAvgUnsignedLongEvaluator(EvalOperator.ExpressionEvaluator field, DriverContext driverContext) { - super(field); - this.driverContext = driverContext; + super(driverContext, field); } @Override diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstBooleanEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstBooleanEvaluator.java new file mode 100644 index 0000000000000..bf946aab347d2 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstBooleanEvaluator.java @@ -0,0 +1,89 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import java.lang.Override; +import java.lang.String; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link MvFirst}. + * This class is generated. Do not edit it. + */ +public final class MvFirstBooleanEvaluator extends AbstractMultivalueFunction.AbstractEvaluator { + public MvFirstBooleanEvaluator(EvalOperator.ExpressionEvaluator field, + DriverContext driverContext) { + super(driverContext, field); + } + + @Override + public String name() { + return "MvFirst"; + } + + /** + * Evaluate blocks containing at least one multivalued field. + */ + @Override + public Block evalNullable(Block fieldVal) { + BooleanBlock v = (BooleanBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (BooleanBlock.Builder builder = driverContext.blockFactory().newBooleanBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; + } + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + boolean result = MvFirst.process(v, first, end); + builder.appendBoolean(result); + } + return builder.build(); + } + } + + /** + * Evaluate blocks containing at least one multivalued field. + */ + @Override + public Block evalNotNullable(Block fieldVal) { + BooleanBlock v = (BooleanBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (BooleanVector.FixedBuilder builder = driverContext.blockFactory().newBooleanVectorFixedBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + boolean result = MvFirst.process(v, first, end); + builder.appendBoolean(result); + } + return builder.build().asBlock(); + } + } + + public static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final EvalOperator.ExpressionEvaluator.Factory field; + + public Factory(EvalOperator.ExpressionEvaluator.Factory field) { + this.field = field; + } + + @Override + public MvFirstBooleanEvaluator get(DriverContext context) { + return new MvFirstBooleanEvaluator(field.get(context), context); + } + + @Override + public String toString() { + return "MvFirst[field=" + field + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstBytesRefEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstBytesRefEvaluator.java new file mode 100644 index 0000000000000..e0cb6ca4c289b --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstBytesRefEvaluator.java @@ -0,0 +1,92 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link MvFirst}. + * This class is generated. Do not edit it. + */ +public final class MvFirstBytesRefEvaluator extends AbstractMultivalueFunction.AbstractEvaluator { + public MvFirstBytesRefEvaluator(EvalOperator.ExpressionEvaluator field, + DriverContext driverContext) { + super(driverContext, field); + } + + @Override + public String name() { + return "MvFirst"; + } + + /** + * Evaluate blocks containing at least one multivalued field. + */ + @Override + public Block evalNullable(Block fieldVal) { + BytesRefBlock v = (BytesRefBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + BytesRef valueScratch = new BytesRef(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; + } + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + BytesRef result = MvFirst.process(v, first, end, valueScratch); + builder.appendBytesRef(result); + } + return builder.build(); + } + } + + /** + * Evaluate blocks containing at least one multivalued field. + */ + @Override + public Block evalNotNullable(Block fieldVal) { + BytesRefBlock v = (BytesRefBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (BytesRefVector.Builder builder = driverContext.blockFactory().newBytesRefVectorBuilder(positionCount)) { + BytesRef valueScratch = new BytesRef(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + BytesRef result = MvFirst.process(v, first, end, valueScratch); + builder.appendBytesRef(result); + } + return builder.build().asBlock(); + } + } + + public static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final EvalOperator.ExpressionEvaluator.Factory field; + + public Factory(EvalOperator.ExpressionEvaluator.Factory field) { + this.field = field; + } + + @Override + public MvFirstBytesRefEvaluator get(DriverContext context) { + return new MvFirstBytesRefEvaluator(field.get(context), context); + } + + @Override + public String toString() { + return "MvFirst[field=" + field + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstDoubleEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstDoubleEvaluator.java new file mode 100644 index 0000000000000..584319cc1ab82 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstDoubleEvaluator.java @@ -0,0 +1,89 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import java.lang.Override; +import java.lang.String; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.DoubleVector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link MvFirst}. + * This class is generated. Do not edit it. + */ +public final class MvFirstDoubleEvaluator extends AbstractMultivalueFunction.AbstractEvaluator { + public MvFirstDoubleEvaluator(EvalOperator.ExpressionEvaluator field, + DriverContext driverContext) { + super(driverContext, field); + } + + @Override + public String name() { + return "MvFirst"; + } + + /** + * Evaluate blocks containing at least one multivalued field. + */ + @Override + public Block evalNullable(Block fieldVal) { + DoubleBlock v = (DoubleBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; + } + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + double result = MvFirst.process(v, first, end); + builder.appendDouble(result); + } + return builder.build(); + } + } + + /** + * Evaluate blocks containing at least one multivalued field. + */ + @Override + public Block evalNotNullable(Block fieldVal) { + DoubleBlock v = (DoubleBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (DoubleVector.FixedBuilder builder = driverContext.blockFactory().newDoubleVectorFixedBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + double result = MvFirst.process(v, first, end); + builder.appendDouble(result); + } + return builder.build().asBlock(); + } + } + + public static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final EvalOperator.ExpressionEvaluator.Factory field; + + public Factory(EvalOperator.ExpressionEvaluator.Factory field) { + this.field = field; + } + + @Override + public MvFirstDoubleEvaluator get(DriverContext context) { + return new MvFirstDoubleEvaluator(field.get(context), context); + } + + @Override + public String toString() { + return "MvFirst[field=" + field + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstIntEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstIntEvaluator.java new file mode 100644 index 0000000000000..1e9c50d135559 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstIntEvaluator.java @@ -0,0 +1,88 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import java.lang.Override; +import java.lang.String; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link MvFirst}. + * This class is generated. Do not edit it. + */ +public final class MvFirstIntEvaluator extends AbstractMultivalueFunction.AbstractEvaluator { + public MvFirstIntEvaluator(EvalOperator.ExpressionEvaluator field, DriverContext driverContext) { + super(driverContext, field); + } + + @Override + public String name() { + return "MvFirst"; + } + + /** + * Evaluate blocks containing at least one multivalued field. + */ + @Override + public Block evalNullable(Block fieldVal) { + IntBlock v = (IntBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (IntBlock.Builder builder = driverContext.blockFactory().newIntBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; + } + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + int result = MvFirst.process(v, first, end); + builder.appendInt(result); + } + return builder.build(); + } + } + + /** + * Evaluate blocks containing at least one multivalued field. + */ + @Override + public Block evalNotNullable(Block fieldVal) { + IntBlock v = (IntBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (IntVector.FixedBuilder builder = driverContext.blockFactory().newIntVectorFixedBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + int result = MvFirst.process(v, first, end); + builder.appendInt(result); + } + return builder.build().asBlock(); + } + } + + public static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final EvalOperator.ExpressionEvaluator.Factory field; + + public Factory(EvalOperator.ExpressionEvaluator.Factory field) { + this.field = field; + } + + @Override + public MvFirstIntEvaluator get(DriverContext context) { + return new MvFirstIntEvaluator(field.get(context), context); + } + + @Override + public String toString() { + return "MvFirst[field=" + field + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstLongEvaluator.java new file mode 100644 index 0000000000000..7e118a1eb9eb8 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstLongEvaluator.java @@ -0,0 +1,88 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import java.lang.Override; +import java.lang.String; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link MvFirst}. + * This class is generated. Do not edit it. + */ +public final class MvFirstLongEvaluator extends AbstractMultivalueFunction.AbstractEvaluator { + public MvFirstLongEvaluator(EvalOperator.ExpressionEvaluator field, DriverContext driverContext) { + super(driverContext, field); + } + + @Override + public String name() { + return "MvFirst"; + } + + /** + * Evaluate blocks containing at least one multivalued field. + */ + @Override + public Block evalNullable(Block fieldVal) { + LongBlock v = (LongBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; + } + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + long result = MvFirst.process(v, first, end); + builder.appendLong(result); + } + return builder.build(); + } + } + + /** + * Evaluate blocks containing at least one multivalued field. + */ + @Override + public Block evalNotNullable(Block fieldVal) { + LongBlock v = (LongBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (LongVector.FixedBuilder builder = driverContext.blockFactory().newLongVectorFixedBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + long result = MvFirst.process(v, first, end); + builder.appendLong(result); + } + return builder.build().asBlock(); + } + } + + public static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final EvalOperator.ExpressionEvaluator.Factory field; + + public Factory(EvalOperator.ExpressionEvaluator.Factory field) { + this.field = field; + } + + @Override + public MvFirstLongEvaluator get(DriverContext context) { + return new MvFirstLongEvaluator(field.get(context), context); + } + + @Override + public String toString() { + return "MvFirst[field=" + field + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastBooleanEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastBooleanEvaluator.java new file mode 100644 index 0000000000000..75b49f2c3e8ee --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastBooleanEvaluator.java @@ -0,0 +1,89 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import java.lang.Override; +import java.lang.String; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link MvLast}. + * This class is generated. Do not edit it. + */ +public final class MvLastBooleanEvaluator extends AbstractMultivalueFunction.AbstractEvaluator { + public MvLastBooleanEvaluator(EvalOperator.ExpressionEvaluator field, + DriverContext driverContext) { + super(driverContext, field); + } + + @Override + public String name() { + return "MvLast"; + } + + /** + * Evaluate blocks containing at least one multivalued field. + */ + @Override + public Block evalNullable(Block fieldVal) { + BooleanBlock v = (BooleanBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (BooleanBlock.Builder builder = driverContext.blockFactory().newBooleanBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; + } + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + boolean result = MvLast.process(v, first, end); + builder.appendBoolean(result); + } + return builder.build(); + } + } + + /** + * Evaluate blocks containing at least one multivalued field. + */ + @Override + public Block evalNotNullable(Block fieldVal) { + BooleanBlock v = (BooleanBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (BooleanVector.FixedBuilder builder = driverContext.blockFactory().newBooleanVectorFixedBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + boolean result = MvLast.process(v, first, end); + builder.appendBoolean(result); + } + return builder.build().asBlock(); + } + } + + public static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final EvalOperator.ExpressionEvaluator.Factory field; + + public Factory(EvalOperator.ExpressionEvaluator.Factory field) { + this.field = field; + } + + @Override + public MvLastBooleanEvaluator get(DriverContext context) { + return new MvLastBooleanEvaluator(field.get(context), context); + } + + @Override + public String toString() { + return "MvLast[field=" + field + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastBytesRefEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastBytesRefEvaluator.java new file mode 100644 index 0000000000000..fcca356b38576 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastBytesRefEvaluator.java @@ -0,0 +1,92 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link MvLast}. + * This class is generated. Do not edit it. + */ +public final class MvLastBytesRefEvaluator extends AbstractMultivalueFunction.AbstractEvaluator { + public MvLastBytesRefEvaluator(EvalOperator.ExpressionEvaluator field, + DriverContext driverContext) { + super(driverContext, field); + } + + @Override + public String name() { + return "MvLast"; + } + + /** + * Evaluate blocks containing at least one multivalued field. + */ + @Override + public Block evalNullable(Block fieldVal) { + BytesRefBlock v = (BytesRefBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + BytesRef valueScratch = new BytesRef(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; + } + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + BytesRef result = MvLast.process(v, first, end, valueScratch); + builder.appendBytesRef(result); + } + return builder.build(); + } + } + + /** + * Evaluate blocks containing at least one multivalued field. + */ + @Override + public Block evalNotNullable(Block fieldVal) { + BytesRefBlock v = (BytesRefBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (BytesRefVector.Builder builder = driverContext.blockFactory().newBytesRefVectorBuilder(positionCount)) { + BytesRef valueScratch = new BytesRef(); + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + BytesRef result = MvLast.process(v, first, end, valueScratch); + builder.appendBytesRef(result); + } + return builder.build().asBlock(); + } + } + + public static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final EvalOperator.ExpressionEvaluator.Factory field; + + public Factory(EvalOperator.ExpressionEvaluator.Factory field) { + this.field = field; + } + + @Override + public MvLastBytesRefEvaluator get(DriverContext context) { + return new MvLastBytesRefEvaluator(field.get(context), context); + } + + @Override + public String toString() { + return "MvLast[field=" + field + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastDoubleEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastDoubleEvaluator.java new file mode 100644 index 0000000000000..b0cf7bf59900b --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastDoubleEvaluator.java @@ -0,0 +1,89 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import java.lang.Override; +import java.lang.String; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.DoubleVector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link MvLast}. + * This class is generated. Do not edit it. + */ +public final class MvLastDoubleEvaluator extends AbstractMultivalueFunction.AbstractEvaluator { + public MvLastDoubleEvaluator(EvalOperator.ExpressionEvaluator field, + DriverContext driverContext) { + super(driverContext, field); + } + + @Override + public String name() { + return "MvLast"; + } + + /** + * Evaluate blocks containing at least one multivalued field. + */ + @Override + public Block evalNullable(Block fieldVal) { + DoubleBlock v = (DoubleBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; + } + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + double result = MvLast.process(v, first, end); + builder.appendDouble(result); + } + return builder.build(); + } + } + + /** + * Evaluate blocks containing at least one multivalued field. + */ + @Override + public Block evalNotNullable(Block fieldVal) { + DoubleBlock v = (DoubleBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (DoubleVector.FixedBuilder builder = driverContext.blockFactory().newDoubleVectorFixedBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + double result = MvLast.process(v, first, end); + builder.appendDouble(result); + } + return builder.build().asBlock(); + } + } + + public static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final EvalOperator.ExpressionEvaluator.Factory field; + + public Factory(EvalOperator.ExpressionEvaluator.Factory field) { + this.field = field; + } + + @Override + public MvLastDoubleEvaluator get(DriverContext context) { + return new MvLastDoubleEvaluator(field.get(context), context); + } + + @Override + public String toString() { + return "MvLast[field=" + field + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastIntEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastIntEvaluator.java new file mode 100644 index 0000000000000..5c2af9218308d --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastIntEvaluator.java @@ -0,0 +1,88 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import java.lang.Override; +import java.lang.String; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link MvLast}. + * This class is generated. Do not edit it. + */ +public final class MvLastIntEvaluator extends AbstractMultivalueFunction.AbstractEvaluator { + public MvLastIntEvaluator(EvalOperator.ExpressionEvaluator field, DriverContext driverContext) { + super(driverContext, field); + } + + @Override + public String name() { + return "MvLast"; + } + + /** + * Evaluate blocks containing at least one multivalued field. + */ + @Override + public Block evalNullable(Block fieldVal) { + IntBlock v = (IntBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (IntBlock.Builder builder = driverContext.blockFactory().newIntBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; + } + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + int result = MvLast.process(v, first, end); + builder.appendInt(result); + } + return builder.build(); + } + } + + /** + * Evaluate blocks containing at least one multivalued field. + */ + @Override + public Block evalNotNullable(Block fieldVal) { + IntBlock v = (IntBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (IntVector.FixedBuilder builder = driverContext.blockFactory().newIntVectorFixedBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + int result = MvLast.process(v, first, end); + builder.appendInt(result); + } + return builder.build().asBlock(); + } + } + + public static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final EvalOperator.ExpressionEvaluator.Factory field; + + public Factory(EvalOperator.ExpressionEvaluator.Factory field) { + this.field = field; + } + + @Override + public MvLastIntEvaluator get(DriverContext context) { + return new MvLastIntEvaluator(field.get(context), context); + } + + @Override + public String toString() { + return "MvLast[field=" + field + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastLongEvaluator.java new file mode 100644 index 0000000000000..37b95378f1f5b --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastLongEvaluator.java @@ -0,0 +1,88 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import java.lang.Override; +import java.lang.String; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link MvLast}. + * This class is generated. Do not edit it. + */ +public final class MvLastLongEvaluator extends AbstractMultivalueFunction.AbstractEvaluator { + public MvLastLongEvaluator(EvalOperator.ExpressionEvaluator field, DriverContext driverContext) { + super(driverContext, field); + } + + @Override + public String name() { + return "MvLast"; + } + + /** + * Evaluate blocks containing at least one multivalued field. + */ + @Override + public Block evalNullable(Block fieldVal) { + LongBlock v = (LongBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + if (valueCount == 0) { + builder.appendNull(); + continue; + } + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + long result = MvLast.process(v, first, end); + builder.appendLong(result); + } + return builder.build(); + } + } + + /** + * Evaluate blocks containing at least one multivalued field. + */ + @Override + public Block evalNotNullable(Block fieldVal) { + LongBlock v = (LongBlock) fieldVal; + int positionCount = v.getPositionCount(); + try (LongVector.FixedBuilder builder = driverContext.blockFactory().newLongVectorFixedBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + int valueCount = v.getValueCount(p); + int first = v.getFirstValueIndex(p); + int end = first + valueCount; + long result = MvLast.process(v, first, end); + builder.appendLong(result); + } + return builder.build().asBlock(); + } + } + + public static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final EvalOperator.ExpressionEvaluator.Factory field; + + public Factory(EvalOperator.ExpressionEvaluator.Factory field) { + this.field = field; + } + + @Override + public MvLastLongEvaluator get(DriverContext context) { + return new MvLastLongEvaluator(field.get(context), context); + } + + @Override + public String toString() { + return "MvLast[field=" + field + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxBooleanEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxBooleanEvaluator.java index 46155b23d7512..44b4432edbf6f 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxBooleanEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxBooleanEvaluator.java @@ -17,12 +17,9 @@ * This class is generated. Do not edit it. */ public final class MvMaxBooleanEvaluator extends AbstractMultivalueFunction.AbstractEvaluator { - private final DriverContext driverContext; - public MvMaxBooleanEvaluator(EvalOperator.ExpressionEvaluator field, DriverContext driverContext) { - super(field); - this.driverContext = driverContext; + super(driverContext, field); } @Override diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxBytesRefEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxBytesRefEvaluator.java index 6f1469e365336..4e2180f2ec467 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxBytesRefEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxBytesRefEvaluator.java @@ -18,12 +18,9 @@ * This class is generated. Do not edit it. */ public final class MvMaxBytesRefEvaluator extends AbstractMultivalueFunction.AbstractEvaluator { - private final DriverContext driverContext; - public MvMaxBytesRefEvaluator(EvalOperator.ExpressionEvaluator field, DriverContext driverContext) { - super(field); - this.driverContext = driverContext; + super(driverContext, field); } @Override diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxDoubleEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxDoubleEvaluator.java index 34e51c2d6f221..4a1be1673bb7c 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxDoubleEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxDoubleEvaluator.java @@ -17,11 +17,8 @@ * This class is generated. Do not edit it. */ public final class MvMaxDoubleEvaluator extends AbstractMultivalueFunction.AbstractEvaluator { - private final DriverContext driverContext; - public MvMaxDoubleEvaluator(EvalOperator.ExpressionEvaluator field, DriverContext driverContext) { - super(field); - this.driverContext = driverContext; + super(driverContext, field); } @Override diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxIntEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxIntEvaluator.java index 5382f2dff2fd8..b0ed499efd84f 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxIntEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxIntEvaluator.java @@ -17,11 +17,8 @@ * This class is generated. Do not edit it. */ public final class MvMaxIntEvaluator extends AbstractMultivalueFunction.AbstractEvaluator { - private final DriverContext driverContext; - public MvMaxIntEvaluator(EvalOperator.ExpressionEvaluator field, DriverContext driverContext) { - super(field); - this.driverContext = driverContext; + super(driverContext, field); } @Override diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxLongEvaluator.java index 331d070315ea6..24397b64c9ccc 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxLongEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxLongEvaluator.java @@ -17,11 +17,8 @@ * This class is generated. Do not edit it. */ public final class MvMaxLongEvaluator extends AbstractMultivalueFunction.AbstractEvaluator { - private final DriverContext driverContext; - public MvMaxLongEvaluator(EvalOperator.ExpressionEvaluator field, DriverContext driverContext) { - super(field); - this.driverContext = driverContext; + super(driverContext, field); } @Override diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianDoubleEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianDoubleEvaluator.java index 4870712f8f2fb..c3ea505a29e88 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianDoubleEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianDoubleEvaluator.java @@ -17,12 +17,9 @@ * This class is generated. Do not edit it. */ public final class MvMedianDoubleEvaluator extends AbstractMultivalueFunction.AbstractEvaluator { - private final DriverContext driverContext; - public MvMedianDoubleEvaluator(EvalOperator.ExpressionEvaluator field, DriverContext driverContext) { - super(field); - this.driverContext = driverContext; + super(driverContext, field); } @Override diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianIntEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianIntEvaluator.java index 83376cb634a8f..d07dc41e1d04b 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianIntEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianIntEvaluator.java @@ -17,11 +17,8 @@ * This class is generated. Do not edit it. */ public final class MvMedianIntEvaluator extends AbstractMultivalueFunction.AbstractEvaluator { - private final DriverContext driverContext; - public MvMedianIntEvaluator(EvalOperator.ExpressionEvaluator field, DriverContext driverContext) { - super(field); - this.driverContext = driverContext; + super(driverContext, field); } @Override diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianLongEvaluator.java index bf324d4db4f72..f1cd87aefd3d0 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianLongEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianLongEvaluator.java @@ -17,12 +17,9 @@ * This class is generated. Do not edit it. */ public final class MvMedianLongEvaluator extends AbstractMultivalueFunction.AbstractEvaluator { - private final DriverContext driverContext; - public MvMedianLongEvaluator(EvalOperator.ExpressionEvaluator field, DriverContext driverContext) { - super(field); - this.driverContext = driverContext; + super(driverContext, field); } @Override diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianUnsignedLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianUnsignedLongEvaluator.java index 3f95ba060f825..031280b767b41 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianUnsignedLongEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianUnsignedLongEvaluator.java @@ -17,12 +17,9 @@ * This class is generated. Do not edit it. */ public final class MvMedianUnsignedLongEvaluator extends AbstractMultivalueFunction.AbstractEvaluator { - private final DriverContext driverContext; - public MvMedianUnsignedLongEvaluator(EvalOperator.ExpressionEvaluator field, DriverContext driverContext) { - super(field); - this.driverContext = driverContext; + super(driverContext, field); } @Override diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinBooleanEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinBooleanEvaluator.java index a8546837479a8..ea8b04cc5c4a5 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinBooleanEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinBooleanEvaluator.java @@ -17,12 +17,9 @@ * This class is generated. Do not edit it. */ public final class MvMinBooleanEvaluator extends AbstractMultivalueFunction.AbstractEvaluator { - private final DriverContext driverContext; - public MvMinBooleanEvaluator(EvalOperator.ExpressionEvaluator field, DriverContext driverContext) { - super(field); - this.driverContext = driverContext; + super(driverContext, field); } @Override diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinBytesRefEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinBytesRefEvaluator.java index f00e7272ae378..31d41ff61e196 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinBytesRefEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinBytesRefEvaluator.java @@ -18,12 +18,9 @@ * This class is generated. Do not edit it. */ public final class MvMinBytesRefEvaluator extends AbstractMultivalueFunction.AbstractEvaluator { - private final DriverContext driverContext; - public MvMinBytesRefEvaluator(EvalOperator.ExpressionEvaluator field, DriverContext driverContext) { - super(field); - this.driverContext = driverContext; + super(driverContext, field); } @Override diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinDoubleEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinDoubleEvaluator.java index 5cd7ee9039a33..5390350751ee7 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinDoubleEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinDoubleEvaluator.java @@ -17,11 +17,8 @@ * This class is generated. Do not edit it. */ public final class MvMinDoubleEvaluator extends AbstractMultivalueFunction.AbstractEvaluator { - private final DriverContext driverContext; - public MvMinDoubleEvaluator(EvalOperator.ExpressionEvaluator field, DriverContext driverContext) { - super(field); - this.driverContext = driverContext; + super(driverContext, field); } @Override diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinIntEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinIntEvaluator.java index 93b4612f898ad..918b049780905 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinIntEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinIntEvaluator.java @@ -17,11 +17,8 @@ * This class is generated. Do not edit it. */ public final class MvMinIntEvaluator extends AbstractMultivalueFunction.AbstractEvaluator { - private final DriverContext driverContext; - public MvMinIntEvaluator(EvalOperator.ExpressionEvaluator field, DriverContext driverContext) { - super(field); - this.driverContext = driverContext; + super(driverContext, field); } @Override diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinLongEvaluator.java index 9c974caecc40d..37a6709d46d4d 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinLongEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinLongEvaluator.java @@ -17,11 +17,8 @@ * This class is generated. Do not edit it. */ public final class MvMinLongEvaluator extends AbstractMultivalueFunction.AbstractEvaluator { - private final DriverContext driverContext; - public MvMinLongEvaluator(EvalOperator.ExpressionEvaluator field, DriverContext driverContext) { - super(field); - this.driverContext = driverContext; + super(driverContext, field); } @Override diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumDoubleEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumDoubleEvaluator.java index cc54ebad77667..b49a92404ecd1 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumDoubleEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumDoubleEvaluator.java @@ -18,11 +18,8 @@ * This class is generated. Do not edit it. */ public final class MvSumDoubleEvaluator extends AbstractMultivalueFunction.AbstractEvaluator { - private final DriverContext driverContext; - public MvSumDoubleEvaluator(EvalOperator.ExpressionEvaluator field, DriverContext driverContext) { - super(field); - this.driverContext = driverContext; + super(driverContext, field); } @Override diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumIntEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumIntEvaluator.java index bd24d4a917e84..20ae9a4047385 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumIntEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumIntEvaluator.java @@ -21,13 +21,10 @@ public final class MvSumIntEvaluator extends AbstractMultivalueFunction.AbstractNullableEvaluator { private final Warnings warnings; - private final DriverContext driverContext; - public MvSumIntEvaluator(Source source, EvalOperator.ExpressionEvaluator field, DriverContext driverContext) { - super(field); + super(driverContext, field); this.warnings = new Warnings(source); - this.driverContext = driverContext; } @Override diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumLongEvaluator.java index 823d6fa17bee2..bff596a76d697 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumLongEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumLongEvaluator.java @@ -21,13 +21,10 @@ public final class MvSumLongEvaluator extends AbstractMultivalueFunction.AbstractNullableEvaluator { private final Warnings warnings; - private final DriverContext driverContext; - public MvSumLongEvaluator(Source source, EvalOperator.ExpressionEvaluator field, DriverContext driverContext) { - super(field); + super(driverContext, field); this.warnings = new Warnings(source); - this.driverContext = driverContext; } @Override diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumUnsignedLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumUnsignedLongEvaluator.java index 8203b46b57a51..28ae5e5a2da3b 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumUnsignedLongEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumUnsignedLongEvaluator.java @@ -21,13 +21,10 @@ public final class MvSumUnsignedLongEvaluator extends AbstractMultivalueFunction.AbstractNullableEvaluator { private final Warnings warnings; - private final DriverContext driverContext; - public MvSumUnsignedLongEvaluator(Source source, EvalOperator.ExpressionEvaluator field, DriverContext driverContext) { - super(field); + super(driverContext, field); this.warnings = new Warnings(source); - this.driverContext = driverContext; } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ColumnInfo.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ColumnInfo.java index 5636fbbb3b23c..673ec0bc4a184 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ColumnInfo.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ColumnInfo.java @@ -75,7 +75,7 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par } public abstract class PositionToXContent { - private final Block block; + protected final Block block; PositionToXContent(Block block) { this.block = block; @@ -166,20 +166,14 @@ protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Pa @Override protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) throws IOException { - // TODO Perhaps this is just a long for geo_point? And for more advanced types we need a new block type - long encoded = ((LongBlock) block).getLong(valueIndex); - String wkt = GEO.pointAsString(GEO.longAsPoint(encoded)); - return builder.value(wkt); + return builder.value(GEO.wkbAsString(((BytesRefBlock) block).getBytesRef(valueIndex, scratch))); } }; case "cartesian_point" -> new PositionToXContent(block) { @Override protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) throws IOException { - // TODO Perhaps this is just a long for cartesian_point? And for more advanced types we need a new block type - long encoded = ((LongBlock) block).getLong(valueIndex); - String wkt = CARTESIAN.pointAsString(CARTESIAN.longAsPoint(encoded)); - return builder.value(wkt); + return builder.value(CARTESIAN.wkbAsString(((BytesRefBlock) block).getBytesRef(valueIndex, scratch))); } }; case "boolean" -> new PositionToXContent(block) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlAsyncGetResultAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlAsyncGetResultAction.java new file mode 100644 index 0000000000000..1603dd8fd3746 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlAsyncGetResultAction.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.action.ActionType; + +public class EsqlAsyncGetResultAction extends ActionType { + + public static final EsqlAsyncGetResultAction INSTANCE = new EsqlAsyncGetResultAction(); + + public static final String NAME = "indices:data/read/esql/async/get"; + + private EsqlAsyncGetResultAction() { + super(NAME, in -> { throw new IllegalArgumentException("can't transport EsqlAsyncGetResultAction"); }); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequest.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequest.java index 0de89a4d8de2a..841137d749d93 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequest.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequest.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.tasks.CancellableTask; @@ -43,6 +44,9 @@ public class EsqlQueryRequest extends ActionRequest implements CompositeIndicesRequest { + public static TimeValue DEFAULT_KEEP_ALIVE = TimeValue.timeValueDays(5); + public static TimeValue DEFAULT_WAIT_FOR_COMPLETION = TimeValue.timeValueSeconds(1); + private static final ConstructingObjectParser PARAM_PARSER = new ConstructingObjectParser<>( "params", true, @@ -64,7 +68,14 @@ public class EsqlQueryRequest extends ActionRequest implements CompositeIndicesR private static final ParseField LOCALE_FIELD = new ParseField("locale"); private static final ParseField PROFILE_FIELD = new ParseField("profile"); - private static final ObjectParser PARSER = objectParser(EsqlQueryRequest::new); + static final ParseField WAIT_FOR_COMPLETION_TIMEOUT = new ParseField("wait_for_completion_timeout"); + static final ParseField KEEP_ALIVE = new ParseField("keep_alive"); + static final ParseField KEEP_ON_COMPLETION = new ParseField("keep_on_completion"); + + private static final ObjectParser SYNC_PARSER = objectParserSync(EsqlQueryRequest::syncEsqlQueryRequest); + private static final ObjectParser ASYNC_PARSER = objectParserAsync(EsqlQueryRequest::asyncEsqlQueryRequest); + + private boolean async; private String query; private boolean columnar; @@ -73,6 +84,21 @@ public class EsqlQueryRequest extends ActionRequest implements CompositeIndicesR private QueryBuilder filter; private QueryPragmas pragmas = new QueryPragmas(Settings.EMPTY); private List params = List.of(); + private TimeValue waitForCompletionTimeout = DEFAULT_WAIT_FOR_COMPLETION; + private TimeValue keepAlive = DEFAULT_KEEP_ALIVE; + private boolean keepOnCompletion; + + static EsqlQueryRequest syncEsqlQueryRequest() { + return new EsqlQueryRequest(false); + } + + static EsqlQueryRequest asyncEsqlQueryRequest() { + return new EsqlQueryRequest(true); + } + + private EsqlQueryRequest(boolean async) { + this.async = async; + } public EsqlQueryRequest(StreamInput in) throws IOException { super(in); @@ -100,6 +126,10 @@ public String query() { return query; } + public boolean async() { + return async; + } + public void columnar(boolean columnar) { this.columnar = columnar; } @@ -155,12 +185,39 @@ public void params(List params) { this.params = params; } - public static EsqlQueryRequest fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); + public TimeValue waitForCompletionTimeout() { + return waitForCompletionTimeout; } - private static ObjectParser objectParser(Supplier supplier) { - ObjectParser parser = new ObjectParser<>("esql/query", false, supplier); + public void waitForCompletionTimeout(TimeValue waitForCompletionTimeout) { + this.waitForCompletionTimeout = waitForCompletionTimeout; + } + + public TimeValue keepAlive() { + return keepAlive; + } + + public void keepAlive(TimeValue keepAlive) { + this.keepAlive = keepAlive; + } + + public boolean keepOnCompletion() { + return keepOnCompletion; + } + + public void keepOnCompletion(boolean keepOnCompletion) { + this.keepOnCompletion = keepOnCompletion; + } + + public static EsqlQueryRequest fromXContentSync(XContentParser parser) { + return SYNC_PARSER.apply(parser, null); + } + + public static EsqlQueryRequest fromXContentAsync(XContentParser parser) { + return ASYNC_PARSER.apply(parser, null); + } + + private static void objectParserCommon(ObjectParser parser) { parser.declareString(EsqlQueryRequest::query, QUERY_FIELD); parser.declareBoolean(EsqlQueryRequest::columnar, COLUMNAR_FIELD); parser.declareObject(EsqlQueryRequest::filter, (p, c) -> AbstractQueryBuilder.parseTopLevelQuery(p), FILTER_FIELD); @@ -172,7 +229,30 @@ private static ObjectParser objectParser(Supplier request.locale(Locale.forLanguageTag(localeTag)), LOCALE_FIELD); parser.declareBoolean(EsqlQueryRequest::profile, PROFILE_FIELD); + } + + private static ObjectParser objectParserSync(Supplier supplier) { + ObjectParser parser = new ObjectParser<>("esql/query", false, supplier); + objectParserCommon(parser); + return parser; + } + private static ObjectParser objectParserAsync(Supplier supplier) { + ObjectParser parser = new ObjectParser<>("esql/async_query", false, supplier); + objectParserCommon(parser); + parser.declareBoolean(EsqlQueryRequest::keepOnCompletion, KEEP_ON_COMPLETION); + parser.declareField( + EsqlQueryRequest::waitForCompletionTimeout, + (p, c) -> TimeValue.parseTimeValue(p.text(), WAIT_FOR_COMPLETION_TIMEOUT.getPreferredName()), + WAIT_FOR_COMPLETION_TIMEOUT, + ObjectParser.ValueType.VALUE + ); + parser.declareField( + EsqlQueryRequest::keepAlive, + (p, c) -> TimeValue.parseTimeValue(p.text(), KEEP_ALIVE.getPreferredName()), + KEEP_ALIVE, + ObjectParser.ValueType.VALUE + ); return parser; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestBuilder.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestBuilder.java index be3aeec190ded..5b00208342296 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestBuilder.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestBuilder.java @@ -9,13 +9,22 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.client.internal.ElasticsearchClient; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.xpack.esql.plugin.QueryPragmas; public class EsqlQueryRequestBuilder extends ActionRequestBuilder { - public EsqlQueryRequestBuilder(ElasticsearchClient client) { - super(client, EsqlQueryAction.INSTANCE, new EsqlQueryRequest()); + public static EsqlQueryRequestBuilder newAsyncEsqlQueryRequestBuilder(ElasticsearchClient client) { + return new EsqlQueryRequestBuilder(client, EsqlQueryRequest.asyncEsqlQueryRequest()); + } + + public static EsqlQueryRequestBuilder newSyncEsqlQueryRequestBuilder(ElasticsearchClient client) { + return new EsqlQueryRequestBuilder(client, EsqlQueryRequest.syncEsqlQueryRequest()); + } + + private EsqlQueryRequestBuilder(ElasticsearchClient client, EsqlQueryRequest request) { + super(client, EsqlQueryAction.INSTANCE, request); } public EsqlQueryRequestBuilder query(String query) { @@ -37,4 +46,19 @@ public EsqlQueryRequestBuilder pragmas(QueryPragmas pragmas) { request.pragmas(pragmas); return this; } + + public EsqlQueryRequestBuilder waitForCompletionTimeout(TimeValue waitForCompletionTimeout) { + request.waitForCompletionTimeout(waitForCompletionTimeout); + return this; + } + + public EsqlQueryRequestBuilder keepAlive(TimeValue keepAlive) { + request.keepAlive(keepAlive); + return this; + } + + public EsqlQueryRequestBuilder keepOnCompletion(boolean keepOnCompletion) { + request.keepOnCompletion(keepOnCompletion); + return this; + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java index e571713420950..63686820574b5 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java @@ -7,12 +7,9 @@ package org.elasticsearch.xpack.esql.action; -import org.apache.lucene.util.BytesRef; import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -20,106 +17,92 @@ import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.common.xcontent.ChunkedToXContentObject; -import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockStreamInput; -import org.elasticsearch.compute.data.BooleanBlock; -import org.elasticsearch.compute.data.BytesRefBlock; -import org.elasticsearch.compute.data.DoubleBlock; -import org.elasticsearch.compute.data.IntBlock; -import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.Page; -import org.elasticsearch.compute.lucene.UnsupportedValueSource; import org.elasticsearch.compute.operator.DriverProfile; +import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; -import org.elasticsearch.search.DocValueFormat; -import org.elasticsearch.xcontent.InstantiatingObjectParser; -import org.elasticsearch.xcontent.ObjectParser; -import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParserConfiguration; -import org.elasticsearch.xcontent.json.JsonXContent; -import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; -import org.elasticsearch.xpack.esql.planner.PlannerUtils; -import org.elasticsearch.xpack.esql.type.EsqlDataTypes; -import org.elasticsearch.xpack.versionfield.Version; import java.io.IOException; -import java.io.UncheckedIOException; -import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; import java.util.List; -import java.util.Map; import java.util.Objects; -import java.util.function.Function; - -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; -import static org.elasticsearch.xpack.ql.util.DateUtils.UTC_DATE_TIME_FORMATTER; -import static org.elasticsearch.xpack.ql.util.NumericUtils.asLongUnsigned; -import static org.elasticsearch.xpack.ql.util.NumericUtils.unsignedLongAsNumber; -import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.CARTESIAN; -import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.GEO; -import static org.elasticsearch.xpack.ql.util.StringUtils.parseIP; +import java.util.Optional; public class EsqlQueryResponse extends ActionResponse implements ChunkedToXContentObject, Releasable { - private static final InstantiatingObjectParser PARSER; - static { - InstantiatingObjectParser.Builder parser = InstantiatingObjectParser.builder( - "esql/query_response", - true, - EsqlQueryResponse.class - ); - parser.declareObjectArray(constructorArg(), (p, c) -> ColumnInfo.fromXContent(p), new ParseField("columns")); - parser.declareField(constructorArg(), (p, c) -> p.list(), new ParseField("values"), ObjectParser.ValueType.OBJECT_ARRAY); - PARSER = parser.build(); - } + + @SuppressWarnings("this-escape") + private final AbstractRefCounted counted = AbstractRefCounted.of(this::closeInternal); private final List columns; private final List pages; private final Profile profile; private final boolean columnar; - - public EsqlQueryResponse(List columns, List pages, @Nullable Profile profile, boolean columnar) { + private final String asyncExecutionId; + private final boolean isRunning; + // True if this response is as a result of an async query request + private final boolean isAsync; + + public EsqlQueryResponse( + List columns, + List pages, + @Nullable Profile profile, + boolean columnar, + @Nullable String asyncExecutionId, + boolean isRunning, + boolean isAsync + ) { this.columns = columns; this.pages = pages; this.profile = profile; this.columnar = columnar; + this.asyncExecutionId = asyncExecutionId; + this.isRunning = isRunning; + this.isAsync = isAsync; } - public EsqlQueryResponse(List columns, List> values) { - this.columns = columns; - this.pages = List.of(valuesToPage(columns.stream().map(ColumnInfo::type).toList(), values)); - this.profile = null; - this.columnar = false; + public EsqlQueryResponse(List columns, List pages, @Nullable Profile profile, boolean columnar, boolean isAsync) { + this(columns, pages, profile, columnar, null, false, isAsync); } /** * Build a reader for the response. */ public static Writeable.Reader reader(BlockFactory blockFactory) { - return in -> new EsqlQueryResponse(new BlockStreamInput(in, blockFactory)); + return in -> deserialize(new BlockStreamInput(in, blockFactory)); } - private EsqlQueryResponse(BlockStreamInput in) throws IOException { - super(in); - this.columns = in.readCollectionAsList(ColumnInfo::new); - this.pages = in.readCollectionAsList(Page::new); + static EsqlQueryResponse deserialize(BlockStreamInput in) throws IOException { + String asyncExecutionId = null; + boolean isRunning = false; + boolean isAsync = false; + Profile profile = null; + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_ASYNC_QUERY)) { + asyncExecutionId = in.readOptionalString(); + isRunning = in.readBoolean(); + isAsync = in.readBoolean(); + } + List columns = in.readCollectionAsList(ColumnInfo::new); + List pages = in.readCollectionAsList(Page::new); if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE)) { - this.profile = in.readOptionalWriteable(Profile::new); - } else { - this.profile = null; + profile = in.readOptionalWriteable(Profile::new); } - this.columnar = in.readBoolean(); + boolean columnar = in.readBoolean(); + return new EsqlQueryResponse(columns, pages, profile, columnar, asyncExecutionId, isRunning, isAsync); } @Override public void writeTo(StreamOutput out) throws IOException { + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_ASYNC_QUERY)) { + out.writeOptionalString(asyncExecutionId); + out.writeBoolean(isRunning); + out.writeBoolean(isAsync); + } out.writeCollection(columns); out.writeCollection(pages); if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE)) { @@ -137,7 +120,8 @@ List pages() { } public Iterator> values() { - return pagesToValues(columns.stream().map(ColumnInfo::type).toList(), pages); + List dataTypes = columns.stream().map(ColumnInfo::type).toList(); + return ResponseValueUtils.pagesToValues(dataTypes, pages); } public Profile profile() { @@ -148,63 +132,42 @@ public boolean columnar() { return columnar; } - @Override - public Iterator toXContentChunked(ToXContent.Params params) { - final BytesRef scratch = new BytesRef(); - final Iterator valuesIt; - if (pages.isEmpty()) { - valuesIt = Collections.emptyIterator(); - } else if (columnar) { - valuesIt = Iterators.flatMap( - Iterators.forRange( - 0, - columns().size(), - column -> Iterators.concat( - Iterators.single(((builder, p) -> builder.startArray())), - Iterators.flatMap(pages.iterator(), page -> { - ColumnInfo.PositionToXContent toXContent = columns.get(column) - .positionToXContent(page.getBlock(column), scratch); - return Iterators.forRange( - 0, - page.getPositionCount(), - position -> (builder, p) -> toXContent.positionToXContent(builder, p, position) - ); - }), - ChunkedToXContentHelper.endArray() - ) - ), - Function.identity() - ); - } else { - valuesIt = Iterators.flatMap(pages.iterator(), page -> { - final int columnCount = columns.size(); - assert page.getBlockCount() == columnCount : page.getBlockCount() + " != " + columnCount; - final ColumnInfo.PositionToXContent[] toXContents = new ColumnInfo.PositionToXContent[columnCount]; - for (int column = 0; column < columnCount; column++) { - toXContents[column] = columns.get(column).positionToXContent(page.getBlock(column), scratch); + public Optional asyncExecutionId() { + return Optional.ofNullable(asyncExecutionId); + } + + public boolean isRunning() { + return isRunning; + } + + public boolean isAsync() { + return isRunning; + } + + private Iterator asyncPropertiesOrEmpty() { + if (isAsync) { + return ChunkedToXContentHelper.singleChunk((builder, params) -> { + if (asyncExecutionId != null) { + builder.field("id", asyncExecutionId); } - return Iterators.forRange(0, page.getPositionCount(), position -> (builder, p) -> { - builder.startArray(); - for (int c = 0; c < columnCount; c++) { - toXContents[c].positionToXContent(builder, p, position); - } - return builder.endArray(); - }); + builder.field("is_running", isRunning); + return builder; }); + } else { + return Collections.emptyIterator(); } - Iterator columnsRender = ChunkedToXContentHelper.singleChunk((builder, p) -> { - builder.startArray("columns"); - for (ColumnInfo col : columns) { - col.toXContent(builder, p); - } - return builder.endArray(); - }); + } + + @Override + public Iterator toXContentChunked(ToXContent.Params params) { + final Iterator valuesIt = ResponseXContentUtils.columnValues(this.columns, this.pages, columnar); Iterator profileRender = profile == null ? List.of().iterator() : ChunkedToXContentHelper.field("profile", profile, params); return Iterators.concat( ChunkedToXContentHelper.startObject(), - columnsRender, + asyncPropertiesOrEmpty(), + ResponseXContentUtils.columnHeadings(columns), ChunkedToXContentHelper.array("values", valuesIt), profileRender, ChunkedToXContentHelper.endObject() @@ -216,16 +179,14 @@ public boolean isFragment() { return false; } - public static EsqlQueryResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } - @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; EsqlQueryResponse that = (EsqlQueryResponse) o; return Objects.equals(columns, that.columns) + && Objects.equals(asyncExecutionId, that.asyncExecutionId) + && Objects.equals(isRunning, that.isRunning) && columnar == that.columnar && Iterators.equals(values(), that.values(), (row1, row2) -> Iterators.equals(row1, row2, Objects::equals)) && Objects.equals(profile, that.profile); @@ -233,7 +194,13 @@ public boolean equals(Object o) { @Override public int hashCode() { - return Objects.hash(columns, Iterators.hashCode(values(), row -> Iterators.hashCode(row, Objects::hashCode)), columnar); + return Objects.hash( + asyncExecutionId, + isRunning, + columns, + Iterators.hashCode(values(), row -> Iterators.hashCode(row, Objects::hashCode)), + columnar + ); } @Override @@ -242,129 +209,32 @@ public String toString() { } @Override - public void close() { - Releasables.close(() -> Iterators.map(pages.iterator(), p -> p::releaseBlocks)); + public void incRef() { + tryIncRef(); } - public static Iterator> pagesToValues(List dataTypes, List pages) { - BytesRef scratch = new BytesRef(); - return Iterators.flatMap( - pages.iterator(), - page -> Iterators.forRange(0, page.getPositionCount(), p -> Iterators.forRange(0, page.getBlockCount(), b -> { - Block block = page.getBlock(b); - if (block.isNull(p)) { - return null; - } - /* - * Use the ESQL data type to map to the output to make sure compute engine - * respects its types. See the INTEGER clause where is doesn't always - * respect it. - */ - int count = block.getValueCount(p); - int start = block.getFirstValueIndex(p); - String dataType = dataTypes.get(b); - if (count == 1) { - return valueAt(dataType, block, start, scratch); - } - List thisResult = new ArrayList<>(count); - int end = count + start; - for (int i = start; i < end; i++) { - thisResult.add(valueAt(dataType, block, i, scratch)); - } - return thisResult; - })) - ); + @Override + public boolean tryIncRef() { + return counted.tryIncRef(); } - private static Object valueAt(String dataType, Block block, int offset, BytesRef scratch) { - return switch (dataType) { - case "unsigned_long" -> unsignedLongAsNumber(((LongBlock) block).getLong(offset)); - case "long" -> ((LongBlock) block).getLong(offset); - case "integer" -> ((IntBlock) block).getInt(offset); - case "double" -> ((DoubleBlock) block).getDouble(offset); - case "keyword", "text" -> ((BytesRefBlock) block).getBytesRef(offset, scratch).utf8ToString(); - case "ip" -> { - BytesRef val = ((BytesRefBlock) block).getBytesRef(offset, scratch); - yield DocValueFormat.IP.format(val); - } - case "date" -> { - long longVal = ((LongBlock) block).getLong(offset); - yield UTC_DATE_TIME_FORMATTER.formatMillis(longVal); - } - case "boolean" -> ((BooleanBlock) block).getBoolean(offset); - case "version" -> new Version(((BytesRefBlock) block).getBytesRef(offset, scratch)).toString(); - case "geo_point" -> GEO.longAsPoint(((LongBlock) block).getLong(offset)); - case "cartesian_point" -> CARTESIAN.longAsPoint(((LongBlock) block).getLong(offset)); - case "unsupported" -> UnsupportedValueSource.UNSUPPORTED_OUTPUT; - case "_source" -> { - BytesRef val = ((BytesRefBlock) block).getBytesRef(offset, scratch); - try { - try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, new BytesArray(val))) { - parser.nextToken(); - yield parser.mapOrdered(); - } - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } - default -> throw EsqlIllegalArgumentException.illegalDataType(dataType); - }; + @Override + public boolean decRef() { + return counted.decRef(); } - /** - * Convert a list of values to Pages so we can parse from xcontent. It's not - * super efficient but it doesn't really have to be. - */ - private static Page valuesToPage(List dataTypes, List> values) { - List results = dataTypes.stream() - .map(c -> PlannerUtils.toElementType(EsqlDataTypes.fromName(c)).newBlockBuilder(values.size())) - .toList(); - - for (List row : values) { - for (int c = 0; c < row.size(); c++) { - var builder = results.get(c); - var value = row.get(c); - switch (dataTypes.get(c)) { - case "unsigned_long" -> ((LongBlock.Builder) builder).appendLong(asLongUnsigned(((Number) value).longValue())); - case "long" -> ((LongBlock.Builder) builder).appendLong(((Number) value).longValue()); - case "integer" -> ((IntBlock.Builder) builder).appendInt(((Number) value).intValue()); - case "double" -> ((DoubleBlock.Builder) builder).appendDouble(((Number) value).doubleValue()); - case "keyword", "text", "unsupported" -> ((BytesRefBlock.Builder) builder).appendBytesRef( - new BytesRef(value.toString()) - ); - case "ip" -> ((BytesRefBlock.Builder) builder).appendBytesRef(parseIP(value.toString())); - case "date" -> { - long longVal = UTC_DATE_TIME_FORMATTER.parseMillis(value.toString()); - ((LongBlock.Builder) builder).appendLong(longVal); - } - case "boolean" -> ((BooleanBlock.Builder) builder).appendBoolean(((Boolean) value)); - case "null" -> builder.appendNull(); - case "version" -> ((BytesRefBlock.Builder) builder).appendBytesRef(new Version(value.toString()).toBytesRef()); - case "_source" -> { - @SuppressWarnings("unchecked") - Map o = (Map) value; - try { - try (XContentBuilder sourceBuilder = JsonXContent.contentBuilder()) { - sourceBuilder.map(o); - ((BytesRefBlock.Builder) builder).appendBytesRef(BytesReference.bytes(sourceBuilder).toBytesRef()); - } - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } - case "geo_point" -> { - long longVal = GEO.pointAsLong(GEO.stringAsPoint(value.toString())); - ((LongBlock.Builder) builder).appendLong(longVal); - } - case "cartesian_point" -> { - long longVal = CARTESIAN.pointAsLong(CARTESIAN.stringAsPoint(value.toString())); - ((LongBlock.Builder) builder).appendLong(longVal); - } - default -> throw EsqlIllegalArgumentException.illegalDataType(dataTypes.get(c)); - } - } - } - return new Page(results.stream().map(Block.Builder::build).toArray(Block[]::new)); + @Override + public boolean hasReferences() { + return counted.hasReferences(); + } + + @Override + public void close() { + decRef(); + } + + void closeInternal() { + Releasables.close(() -> Iterators.map(pages.iterator(), p -> p::releaseBlocks)); } public static class Profile implements Writeable, ChunkedToXContentObject { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryTask.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryTask.java new file mode 100644 index 0000000000000..917355b2d88b5 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryTask.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.xpack.core.async.AsyncExecutionId; +import org.elasticsearch.xpack.core.async.StoredAsyncTask; + +import java.util.List; +import java.util.Map; + +public class EsqlQueryTask extends StoredAsyncTask { + + public EsqlQueryTask( + long id, + String type, + String action, + String description, + TaskId parentTaskId, + Map headers, + Map originHeaders, + AsyncExecutionId asyncExecutionId, + TimeValue keepAlive + ) { + super(id, type, action, description, parentTaskId, headers, originHeaders, asyncExecutionId, keepAlive); + } + + @Override + public EsqlQueryResponse getCurrentResult() { + return new EsqlQueryResponse(List.of(), List.of(), null, false, getExecutionId().getEncoded(), true, true); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlResponseListener.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlResponseListener.java index ee641cd9209a7..7b525642009a7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlResponseListener.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlResponseListener.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.esql.action; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.core.Releasable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; @@ -16,11 +17,12 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.rest.action.RestResponseListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import org.elasticsearch.xcontent.MediaType; import org.elasticsearch.xpack.esql.formatter.TextFormat; import org.elasticsearch.xpack.esql.plugin.EsqlMediaTypeParser; +import java.io.IOException; import java.util.Locale; import java.util.concurrent.TimeUnit; @@ -31,7 +33,7 @@ /** * Listens for a single {@link EsqlQueryResponse}, builds a corresponding {@link RestResponse} and sends it. */ -public class EsqlResponseListener extends RestResponseListener { +public final class EsqlResponseListener extends RestRefCountedChunkedToXContentListener { /** * A simple, thread-safe stop watch for timing a single action. * Allows to stop the time for building a response and to log it at a later point. @@ -118,8 +120,13 @@ public EsqlResponseListener(RestChannel channel, RestRequest restRequest, EsqlQu } @Override - public RestResponse buildResponse(EsqlQueryResponse esqlResponse) throws Exception { + protected void processResponse(EsqlQueryResponse esqlQueryResponse) throws IOException { + channel.sendResponse(buildResponse(esqlQueryResponse)); + } + + private RestResponse buildResponse(EsqlQueryResponse esqlResponse) throws IOException { boolean success = false; + final Releasable releasable = releasableFromResponse(esqlResponse); try { RestResponse restResponse; if (mediaType instanceof TextFormat format) { @@ -128,13 +135,13 @@ public RestResponse buildResponse(EsqlQueryResponse esqlResponse) throws Excepti ChunkedRestResponseBody.fromTextChunks( format.contentType(restRequest), format.format(restRequest, esqlResponse), - esqlResponse + releasable ) ); } else { restResponse = RestResponse.chunked( RestStatus.OK, - ChunkedRestResponseBody.fromXContent(esqlResponse, channel.request(), channel, esqlResponse) + ChunkedRestResponseBody.fromXContent(esqlResponse, channel.request(), channel, releasable) ); } long tookNanos = stopWatch.stop().getNanos(); @@ -143,7 +150,7 @@ public RestResponse buildResponse(EsqlQueryResponse esqlResponse) throws Excepti return restResponse; } finally { if (success == false) { - esqlResponse.close(); + releasable.close(); } } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseValueUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseValueUtils.java new file mode 100644 index 0000000000000..625b488b1e857 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseValueUtils.java @@ -0,0 +1,180 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.Iterators; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.lucene.UnsupportedValueSource; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; +import org.elasticsearch.xpack.esql.planner.PlannerUtils; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import org.elasticsearch.xpack.versionfield.Version; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.xpack.ql.util.DateUtils.UTC_DATE_TIME_FORMATTER; +import static org.elasticsearch.xpack.ql.util.NumericUtils.asLongUnsigned; +import static org.elasticsearch.xpack.ql.util.NumericUtils.unsignedLongAsNumber; +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.CARTESIAN; +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.GEO; +import static org.elasticsearch.xpack.ql.util.StringUtils.parseIP; + +/** + * Collection of static utility methods for helping transform response data between pages and values. + */ +public final class ResponseValueUtils { + + /** + * Returns an iterator of iterators over the values in the given pages. There is one iterator + * for each block. + */ + public static Iterator> pagesToValues(List dataTypes, List pages) { + BytesRef scratch = new BytesRef(); + return Iterators.flatMap( + pages.iterator(), + page -> Iterators.forRange(0, page.getPositionCount(), p -> Iterators.forRange(0, page.getBlockCount(), b -> { + Block block = page.getBlock(b); + if (block.isNull(p)) { + return null; + } + /* + * Use the ESQL data type to map to the output to make sure compute engine + * respects its types. See the INTEGER clause where is doesn't always + * respect it. + */ + int count = block.getValueCount(p); + int start = block.getFirstValueIndex(p); + String dataType = dataTypes.get(b); + if (count == 1) { + return valueAt(dataType, block, start, scratch); + } + List thisResult = new ArrayList<>(count); + int end = count + start; + for (int i = start; i < end; i++) { + thisResult.add(valueAt(dataType, block, i, scratch)); + } + return thisResult; + })) + ); + } + + private static Object valueAt(String dataType, Block block, int offset, BytesRef scratch) { + return switch (dataType) { + case "unsigned_long" -> unsignedLongAsNumber(((LongBlock) block).getLong(offset)); + case "long" -> ((LongBlock) block).getLong(offset); + case "integer" -> ((IntBlock) block).getInt(offset); + case "double" -> ((DoubleBlock) block).getDouble(offset); + case "keyword", "text" -> ((BytesRefBlock) block).getBytesRef(offset, scratch).utf8ToString(); + case "ip" -> { + BytesRef val = ((BytesRefBlock) block).getBytesRef(offset, scratch); + yield DocValueFormat.IP.format(val); + } + case "date" -> { + long longVal = ((LongBlock) block).getLong(offset); + yield UTC_DATE_TIME_FORMATTER.formatMillis(longVal); + } + case "boolean" -> ((BooleanBlock) block).getBoolean(offset); + case "version" -> new Version(((BytesRefBlock) block).getBytesRef(offset, scratch)).toString(); + case "geo_point" -> GEO.wkbAsString(((BytesRefBlock) block).getBytesRef(offset, scratch)); + case "cartesian_point" -> CARTESIAN.wkbAsString(((BytesRefBlock) block).getBytesRef(offset, scratch)); + case "unsupported" -> UnsupportedValueSource.UNSUPPORTED_OUTPUT; + case "_source" -> { + BytesRef val = ((BytesRefBlock) block).getBytesRef(offset, scratch); + try { + try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, new BytesArray(val))) { + parser.nextToken(); + yield parser.mapOrdered(); + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + default -> throw EsqlIllegalArgumentException.illegalDataType(dataType); + }; + } + + /** + * Converts a list of values to Pages so that we can parse from xcontent. It's not + * super efficient, but it doesn't really have to be. + */ + static Page valuesToPage(BlockFactory blockFactory, List columns, List> values) { + List dataTypes = columns.stream().map(ColumnInfo::type).toList(); + List results = dataTypes.stream() + .map(c -> PlannerUtils.toElementType(EsqlDataTypes.fromName(c)).newBlockBuilder(values.size(), blockFactory)) + .toList(); + + for (List row : values) { + for (int c = 0; c < row.size(); c++) { + var builder = results.get(c); + var value = row.get(c); + switch (dataTypes.get(c)) { + case "unsigned_long" -> ((LongBlock.Builder) builder).appendLong(asLongUnsigned(((Number) value).longValue())); + case "long" -> ((LongBlock.Builder) builder).appendLong(((Number) value).longValue()); + case "integer" -> ((IntBlock.Builder) builder).appendInt(((Number) value).intValue()); + case "double" -> ((DoubleBlock.Builder) builder).appendDouble(((Number) value).doubleValue()); + case "keyword", "text", "unsupported" -> ((BytesRefBlock.Builder) builder).appendBytesRef( + new BytesRef(value.toString()) + ); + case "ip" -> ((BytesRefBlock.Builder) builder).appendBytesRef(parseIP(value.toString())); + case "date" -> { + long longVal = UTC_DATE_TIME_FORMATTER.parseMillis(value.toString()); + ((LongBlock.Builder) builder).appendLong(longVal); + } + case "boolean" -> ((BooleanBlock.Builder) builder).appendBoolean(((Boolean) value)); + case "null" -> builder.appendNull(); + case "version" -> ((BytesRefBlock.Builder) builder).appendBytesRef(new Version(value.toString()).toBytesRef()); + case "_source" -> { + @SuppressWarnings("unchecked") + Map o = (Map) value; + try { + try (XContentBuilder sourceBuilder = JsonXContent.contentBuilder()) { + sourceBuilder.map(o); + ((BytesRefBlock.Builder) builder).appendBytesRef(BytesReference.bytes(sourceBuilder).toBytesRef()); + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + case "geo_point" -> { + // This just converts WKT to WKB, so does not need CRS knowledge, we could merge GEO and CARTESIAN here + BytesRef wkb = GEO.stringAsWKB(value.toString()); + ((BytesRefBlock.Builder) builder).appendBytesRef(wkb); + } + case "cartesian_point" -> { + // This just converts WKT to WKB, so does not need CRS knowledge, we could merge GEO and CARTESIAN here + BytesRef wkb = CARTESIAN.stringAsWKB(value.toString()); + ((BytesRefBlock.Builder) builder).appendBytesRef(wkb); + } + default -> throw EsqlIllegalArgumentException.illegalDataType(dataTypes.get(c)); + } + } + } + return new Page(results.stream().map(Block.Builder::build).toArray(Block[]::new)); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseXContentUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseXContentUtils.java new file mode 100644 index 0000000000000..e28e6beebabed --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseXContentUtils.java @@ -0,0 +1,91 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.collect.Iterators; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.xcontent.ToXContent; + +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.function.Function; + +/** + * Collection of static utility methods for helping transform response data to XContent. + */ +final class ResponseXContentUtils { + + /** Returns the column headings for the given columns. */ + static Iterator columnHeadings(List columns) { + return ChunkedToXContentHelper.singleChunk((builder, params) -> { + builder.startArray("columns"); + for (ColumnInfo col : columns) { + col.toXContent(builder, params); + } + return builder.endArray(); + }); + } + + /** Returns the column values for the given pages (described by the column infos). */ + static Iterator columnValues(List columns, List pages, boolean columnar) { + if (pages.isEmpty()) { + return Collections.emptyIterator(); + } else if (columnar) { + return columnarValues(columns, pages); + } else { + return rowValues(columns, pages); + } + } + + /** Returns a columnar based representation of the values in the given pages (described by the column infos). */ + static Iterator columnarValues(List columns, List pages) { + final BytesRef scratch = new BytesRef(); + return Iterators.flatMap( + Iterators.forRange( + 0, + columns.size(), + column -> Iterators.concat( + Iterators.single(((builder, params) -> builder.startArray())), + Iterators.flatMap(pages.iterator(), page -> { + ColumnInfo.PositionToXContent toXContent = columns.get(column).positionToXContent(page.getBlock(column), scratch); + return Iterators.forRange( + 0, + page.getPositionCount(), + position -> (builder, params) -> toXContent.positionToXContent(builder, params, position) + ); + }), + ChunkedToXContentHelper.endArray() + ) + ), + Function.identity() + ); + } + + /** Returns a row based representation of the values in the given pages (described by the column infos). */ + static Iterator rowValues(List columns, List pages) { + final BytesRef scratch = new BytesRef(); + return Iterators.flatMap(pages.iterator(), page -> { + final int columnCount = columns.size(); + assert page.getBlockCount() == columnCount : page.getBlockCount() + " != " + columnCount; + final ColumnInfo.PositionToXContent[] toXContents = new ColumnInfo.PositionToXContent[columnCount]; + for (int column = 0; column < columnCount; column++) { + toXContents[column] = columns.get(column).positionToXContent(page.getBlock(column), scratch); + } + return Iterators.forRange(0, page.getPositionCount(), position -> (builder, params) -> { + builder.startArray(); + for (int c = 0; c < columnCount; c++) { + toXContents[c].positionToXContent(builder, params, position); + } + return builder.endArray(); + }); + }); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlAsyncQueryAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlAsyncQueryAction.java new file mode 100644 index 0000000000000..3dea461ccf8b7 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlAsyncQueryAction.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; +import org.elasticsearch.rest.action.RestCancellableNodeClient; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Set; + +import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.xpack.esql.formatter.TextFormat.URL_PARAM_DELIMITER; + +@ServerlessScope(Scope.PUBLIC) +public class RestEsqlAsyncQueryAction extends BaseRestHandler { + private static final Logger LOGGER = LogManager.getLogger(RestEsqlAsyncQueryAction.class); + + @Override + public String getName() { + return "esql_async_query"; + } + + @Override + public List routes() { + return List.of(new Route(POST, "/_query/async")); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + EsqlQueryRequest esqlRequest; + try (XContentParser parser = request.contentOrSourceParamParser()) { + esqlRequest = EsqlQueryRequest.fromXContentAsync(parser); + } + + LOGGER.info("Beginning execution of ESQL async query.\nQuery string: [{}]", esqlRequest.query()); + + return channel -> { + RestCancellableNodeClient cancellableClient = new RestCancellableNodeClient(client, request.getHttpChannel()); + cancellableClient.execute( + EsqlQueryAction.INSTANCE, + esqlRequest, + new EsqlResponseListener(channel, request, esqlRequest).wrapWithLogging() + ); + }; + } + + @Override + protected Set responseParams() { + return Collections.singleton(URL_PARAM_DELIMITER); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlDeleteAsyncResultAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlDeleteAsyncResultAction.java new file mode 100644 index 0000000000000..7a325bd16b29f --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlDeleteAsyncResultAction.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.async.DeleteAsyncResultRequest; +import org.elasticsearch.xpack.core.async.TransportDeleteAsyncResultAction; + +import java.util.List; + +import static org.elasticsearch.rest.RestRequest.Method.DELETE; + +@ServerlessScope(Scope.PUBLIC) +public class RestEsqlDeleteAsyncResultAction extends BaseRestHandler { + @Override + public List routes() { + return List.of(new RestHandler.Route(DELETE, "/_query/async/{id}")); + } + + @Override + public String getName() { + return "esql_delete_async_result"; + } + + @Override + protected BaseRestHandler.RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { + DeleteAsyncResultRequest delete = new DeleteAsyncResultRequest(request.param("id")); + return channel -> client.execute(TransportDeleteAsyncResultAction.TYPE, delete, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlGetAsyncResultAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlGetAsyncResultAction.java new file mode 100644 index 0000000000000..35a679e23d1f7 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlGetAsyncResultAction.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; +import org.elasticsearch.xpack.core.async.GetAsyncResultRequest; + +import java.util.List; + +import static org.elasticsearch.rest.RestRequest.Method.GET; + +@ServerlessScope(Scope.PUBLIC) +public class RestEsqlGetAsyncResultAction extends BaseRestHandler { + @Override + public List routes() { + return List.of(new Route(GET, "/_query/async/{id}")); + } + + @Override + public String getName() { + return "esql_get_async_result"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { + GetAsyncResultRequest get = new GetAsyncResultRequest(request.param("id")); + if (request.hasParam("wait_for_completion_timeout")) { + get.setWaitForCompletionTimeout(request.paramAsTime("wait_for_completion_timeout", get.getWaitForCompletionTimeout())); + } + if (request.hasParam("keep_alive")) { + get.setKeepAlive(request.paramAsTime("keep_alive", get.getKeepAlive())); + } + return channel -> client.execute(EsqlAsyncGetResultAction.INSTANCE, get, new RestRefCountedChunkedToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlQueryAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlQueryAction.java index 7a1b7f7b9b927..6b8e7fc397865 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlQueryAction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlQueryAction.java @@ -48,7 +48,7 @@ public List routes() { protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { EsqlQueryRequest esqlRequest; try (XContentParser parser = request.contentOrSourceParamParser()) { - esqlRequest = EsqlQueryRequest.fromXContent(parser); + esqlRequest = EsqlQueryRequest.fromXContentSync(parser); } LOGGER.info("Beginning execution of ESQL query.\nQuery string: [{}]", esqlRequest.query()); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java index 945f543329c15..a533c373ad2ca 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.UnavailableShardsException; +import org.elasticsearch.action.support.ChannelActionListener; import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.ClusterState; @@ -24,7 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.compute.OwningChannelActionListener; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockStreamInput; @@ -42,6 +42,7 @@ import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.Releasables; +import org.elasticsearch.index.mapper.BlockLoader; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.shard.ShardId; @@ -273,12 +274,23 @@ private void doLookup( NamedExpression extractField = extractFields.get(i); final ElementType elementType = PlannerUtils.toElementType(extractField.dataType()); mergingTypes[i] = elementType; - var loaders = BlockReaderFactories.loaders( - List.of(searchContext), + BlockLoader loader = BlockReaderFactories.loader( + searchContext.getSearchExecutionContext(), extractField instanceof Alias a ? ((NamedExpression) a.child()).name() : extractField.name(), EsqlDataTypes.isUnsupported(extractField.dataType()) ); - fields.add(new ValuesSourceReaderOperator.FieldInfo(extractField.name(), loaders)); + fields.add( + new ValuesSourceReaderOperator.FieldInfo( + extractField.name(), + PlannerUtils.toElementType(extractField.dataType()), + shardIdx -> { + if (shardIdx != 0) { + throw new IllegalStateException("only one shard"); + } + return loader; + } + ) + ); } intermediateOperators.add( new ValuesSourceReaderOperator( @@ -369,7 +381,7 @@ private class TransportHandler implements TransportRequestHandler @Override public void messageReceived(LookupRequest request, TransportChannel channel, Task task) { request.incRef(); - ActionListener listener = ActionListener.runBefore(new OwningChannelActionListener<>(channel), request::decRef); + ActionListener listener = ActionListener.runBefore(new ChannelActionListener<>(channel), request::decRef); doLookup( request.sessionId, (CancellableTask) task, @@ -378,7 +390,7 @@ public void messageReceived(LookupRequest request, TransportChannel channel, Tas request.matchField, request.inputPage, request.extractFields, - listener.map(LookupResponse::new) + listener.delegateFailureAndWrap((l, outPage) -> ActionListener.respondAndRelease(l, new LookupResponse(outPage))) ); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java index 246849896bcdf..1e21886a7ac4b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java @@ -9,12 +9,12 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; +import org.elasticsearch.action.support.ChannelActionListener; import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.compute.OwningChannelActionListener; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportChannel; @@ -110,7 +110,7 @@ public void messageReceived(ResolveRequest request, TransportChannel channel, Ta String policyName = request.policyName; EnrichPolicy policy = policies().get(policyName); ThreadContext threadContext = threadPool.getThreadContext(); - ActionListener listener = new OwningChannelActionListener<>(channel); + ActionListener listener = new ChannelActionListener<>(channel); listener = ContextPreservingActionListener.wrapPreservingContext(listener, threadContext); try (ThreadContext.StoredContext ignored = threadContext.stashWithOrigin(ClientHelper.ENRICH_ORIGIN)) { indexResolver.resolveAsMergedMapping( diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichQuerySourceOperator.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichQuerySourceOperator.java index 8d23a59779e6b..b0582e211fdba 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichQuerySourceOperator.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichQuerySourceOperator.java @@ -94,7 +94,7 @@ private Page queryOneLeaf(Weight weight, int leafIndex) throws IOException { if (scorer == null) { return null; } - IntVector docs = null, segments = null, shards = null; + IntVector docs = null, segments = null, shards = null, positions = null; boolean success = false; try (IntVector.Builder docsBuilder = blockFactory.newIntVectorBuilder(1)) { scorer.score(new DocCollector(docsBuilder), leafReaderContext.reader().getLiveDocs()); @@ -102,12 +102,13 @@ private Page queryOneLeaf(Weight weight, int leafIndex) throws IOException { final int positionCount = docs.getPositionCount(); segments = blockFactory.newConstantIntVector(leafIndex, positionCount); shards = blockFactory.newConstantIntVector(0, positionCount); - var positions = blockFactory.newConstantIntBlockWith(queryPosition, positionCount); + positions = blockFactory.newConstantIntVector(queryPosition, positionCount); + Page page = new Page(new DocVector(shards, segments, docs, true).asBlock(), positions.asBlock()); success = true; - return new Page(new DocVector(shards, segments, docs, true).asBlock(), positions); + return page; } finally { if (success == false) { - Releasables.close(docs, shards, segments); + Releasables.close(docs, shards, segments, positions); } } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/EvalMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/EvalMapper.java index 280ef898c3b90..54c9fec4da96a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/EvalMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/EvalMapper.java @@ -105,7 +105,7 @@ public Block eval(Page page) { */ private Block eval(Block lhs, Block rhs) { int positionCount = lhs.getPositionCount(); - try (BooleanBlock.Builder result = BooleanBlock.newBlockBuilder(positionCount, lhs.blockFactory())) { + try (BooleanBlock.Builder result = lhs.blockFactory().newBooleanBlockBuilder(positionCount)) { for (int p = 0; p < positionCount; p++) { if (lhs.getValueCount(p) > 1) { result.appendNull(); @@ -132,7 +132,7 @@ private Block eval(Block lhs, Block rhs) { private Block eval(BooleanVector lhs, BooleanVector rhs) { int positionCount = lhs.getPositionCount(); - try (var result = BooleanVector.newVectorFixedBuilder(positionCount, lhs.blockFactory())) { + try (var result = lhs.blockFactory().newBooleanVectorFixedBuilder(positionCount)) { for (int p = 0; p < positionCount; p++) { result.appendBoolean(bl.function().apply(lhs.getBoolean(p), rhs.getBoolean(p))); } @@ -225,12 +225,12 @@ public String toString() { private static Block block(Literal lit, BlockFactory blockFactory, int positions) { var value = lit.value(); if (value == null) { - return Block.constantNullBlock(positions, blockFactory); + return blockFactory.newConstantNullBlock(positions); } if (value instanceof List multiValue) { if (multiValue.isEmpty()) { - return Block.constantNullBlock(positions, blockFactory); + return blockFactory.newConstantNullBlock(positions); } var wrapper = BlockUtils.wrapperFor(blockFactory, ElementType.fromJava(multiValue.get(0).getClass()), positions); for (int i = 0; i < positions; i++) { @@ -267,14 +267,9 @@ record IsNullEvaluator(DriverContext driverContext, EvalOperator.ExpressionEvalu public Block eval(Page page) { try (Block fieldBlock = field.eval(page)) { if (fieldBlock.asVector() != null) { - return BooleanBlock.newConstantBlockWith(false, page.getPositionCount(), driverContext.blockFactory()); + return driverContext.blockFactory().newConstantBooleanBlockWith(false, page.getPositionCount()); } - try ( - BooleanVector.FixedBuilder builder = BooleanVector.newVectorFixedBuilder( - page.getPositionCount(), - driverContext.blockFactory() - ) - ) { + try (var builder = driverContext.blockFactory().newBooleanVectorFixedBuilder(page.getPositionCount())) { for (int p = 0; p < page.getPositionCount(); p++) { builder.appendBoolean(fieldBlock.isNull(p)); } @@ -321,14 +316,9 @@ record IsNotNullEvaluator(DriverContext driverContext, EvalOperator.ExpressionEv public Block eval(Page page) { try (Block fieldBlock = field.eval(page)) { if (fieldBlock.asVector() != null) { - return BooleanBlock.newConstantBlockWith(true, page.getPositionCount(), driverContext.blockFactory()); + return driverContext.blockFactory().newConstantBooleanBlockWith(true, page.getPositionCount()); } - try ( - BooleanVector.FixedBuilder builder = BooleanVector.newVectorFixedBuilder( - page.getPositionCount(), - driverContext.blockFactory() - ) - ) { + try (var builder = driverContext.blockFactory().newBooleanVectorFixedBuilder(page.getPositionCount())) { for (int p = 0; p < page.getPositionCount(); p++) { builder.appendBoolean(fieldBlock.isNull(p) == false); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/mapper/EvaluatorMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/mapper/EvaluatorMapper.java index 3ab555799ee34..e536547e006fd 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/mapper/EvaluatorMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/mapper/EvaluatorMapper.java @@ -26,6 +26,16 @@ * Expressions that have a mapping to an {@link ExpressionEvaluator}. */ public interface EvaluatorMapper { + /** + * Build an {@link ExpressionEvaluator.Factory} for the tree of + * expressions rooted at this node. This is only guaranteed to return + * a sensible evaluator if this node has a valid type. If this node + * is a subclass of {@link Expression} then "valid type" means that + * {@link Expression#typeResolved} returns a non-error resolution. + * If {@linkplain Expression#typeResolved} returns an error then + * this method may throw. Or return an evaluator that produces + * garbage. Or return an evaluator that throws when run. + */ ExpressionEvaluator.Factory toEvaluator(Function toEvaluator); /** diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/ComparisonMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/ComparisonMapper.java index 36c19825fab85..7f5a6079cc6d7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/ComparisonMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/ComparisonMapper.java @@ -29,7 +29,8 @@ public abstract class ComparisonMapper extends Expre org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.EqualsLongsEvaluator.Factory::new, org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.EqualsDoublesEvaluator.Factory::new, org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.EqualsKeywordsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.EqualsBoolsEvaluator.Factory::new + org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.EqualsBoolsEvaluator.Factory::new, + (s, l, r, t) -> new EqualsGeometriesEvaluator.Factory(s, l, r) ) { }; @@ -38,7 +39,8 @@ public abstract class ComparisonMapper extends Expre org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEqualsLongsEvaluator.Factory::new, org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEqualsDoublesEvaluator.Factory::new, org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEqualsKeywordsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEqualsBoolsEvaluator.Factory::new + org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEqualsBoolsEvaluator.Factory::new, + (s, l, r, t) -> new NotEqualsGeometriesEvaluator.Factory(s, l, r) ) { }; @@ -79,6 +81,28 @@ public abstract class ComparisonMapper extends Expre private final TriFunction doubles; private final TriFunction keywords; private final TriFunction bools; + private final EvaluatorFunctionWithType geometries; + + @FunctionalInterface + private interface EvaluatorFunctionWithType { + ExpressionEvaluator.Factory apply(Source s, ExpressionEvaluator.Factory t, ExpressionEvaluator.Factory u, T dataType); + } + + private ComparisonMapper( + TriFunction ints, + TriFunction longs, + TriFunction doubles, + TriFunction keywords, + TriFunction bools, + EvaluatorFunctionWithType geometries + ) { + this.ints = ints; + this.longs = longs; + this.doubles = doubles; + this.keywords = keywords; + this.bools = bools; + this.geometries = geometries; + } private ComparisonMapper( TriFunction ints, @@ -92,6 +116,7 @@ private ComparisonMapper( this.doubles = doubles; this.keywords = keywords; this.bools = bools; + this.geometries = (source, lhs, rhs, dataType) -> { throw EsqlIllegalArgumentException.illegalDataType(dataType); }; } ComparisonMapper( @@ -105,6 +130,7 @@ private ComparisonMapper( this.doubles = doubles; this.keywords = keywords; this.bools = (source, lhs, rhs) -> { throw EsqlIllegalArgumentException.illegalDataType(DataTypes.BOOLEAN); }; + this.geometries = (source, lhs, rhs, dataType) -> { throw EsqlIllegalArgumentException.illegalDataType(dataType); }; } @Override @@ -138,11 +164,10 @@ public final ExpressionEvaluator.Factory map(BinaryComparison bc, Layout layout) return longs.apply(bc.source(), leftEval, rightEval); } if (leftType == EsqlDataTypes.GEO_POINT) { - return longs.apply(bc.source(), leftEval, rightEval); + return geometries.apply(bc.source(), leftEval, rightEval, leftType); } - // TODO: Perhaps neithger geo_point, not cartesian_point should support comparisons? if (leftType == EsqlDataTypes.CARTESIAN_POINT) { - return longs.apply(bc.source(), leftEval, rightEval); + return geometries.apply(bc.source(), leftEval, rightEval, leftType); } throw new EsqlIllegalArgumentException("resolved type for [" + bc + "] but didn't implement mapping"); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/Equals.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/Equals.java index db3822f047573..9fb899b8e36df 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/Equals.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/Equals.java @@ -77,4 +77,9 @@ static boolean processKeywords(BytesRef lhs, BytesRef rhs) { static boolean processBools(boolean lhs, boolean rhs) { return lhs == rhs; } + + @Evaluator(extraName = "Geometries") + static boolean processGeometries(BytesRef lhs, BytesRef rhs) { + return lhs.equals(rhs); + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InMapper.java index 6ef37abf5a9b4..7b4e867adad91 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InMapper.java @@ -8,11 +8,11 @@ package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.BooleanArrayBlock; -import org.elasticsearch.compute.data.BooleanArrayVector; +import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.BooleanVector; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; import org.elasticsearch.core.Releasables; @@ -41,10 +41,12 @@ public ExpressionEvaluator.Factory map(In in, Layout layout) { ExpressionEvaluator.Factory eqEvaluator = ((ExpressionMapper) EQUALS).map(eq, layout); listEvaluators.add(eqEvaluator); }); - return dvrCtx -> new InExpressionEvaluator(listEvaluators.stream().map(fac -> fac.get(dvrCtx)).toList()); + return dvrCtx -> new InExpressionEvaluator(dvrCtx, listEvaluators.stream().map(fac -> fac.get(dvrCtx)).toList()); } - record InExpressionEvaluator(List listEvaluators) implements EvalOperator.ExpressionEvaluator { + record InExpressionEvaluator(DriverContext driverContext, List listEvaluators) + implements + EvalOperator.ExpressionEvaluator { @Override public Block eval(Page page) { int positionCount = page.getPositionCount(); @@ -68,7 +70,7 @@ public Block eval(Page page) { } } - return evalWithNulls(values, nulls, nullInValues); + return evalWithNulls(driverContext.blockFactory(), values, nulls, nullInValues); } private static void updateValues(BooleanVector vector, boolean[] values) { @@ -94,9 +96,9 @@ private static void updateValues(BooleanBlock block, boolean[] values, BitSet nu } } - private static Block evalWithNulls(boolean[] values, BitSet nulls, boolean nullInValues) { + private static Block evalWithNulls(BlockFactory blockFactory, boolean[] values, BitSet nulls, boolean nullInValues) { if (nulls.isEmpty() && nullInValues == false) { - return new BooleanArrayVector(values, values.length).asBlock(); + return blockFactory.newBooleanArrayVector(values, values.length).asBlock(); } else { // 3VL: true trumps null; null trumps false. for (int i = 0; i < values.length; i++) { @@ -108,9 +110,9 @@ private static Block evalWithNulls(boolean[] values, BitSet nulls, boolean nullI } if (nulls.isEmpty()) { // no nulls and no multi-values means we must use a Vector - return new BooleanArrayVector(values, values.length).asBlock(); + return blockFactory.newBooleanArrayVector(values, values.length).asBlock(); } else { - return new BooleanArrayBlock(values, values.length, null, nulls, Block.MvOrdering.UNORDERED); + return blockFactory.newBooleanArrayBlock(values, values.length, null, nulls, Block.MvOrdering.UNORDERED); } } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEquals.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEquals.java index 67319bab11b19..6fbed572cdc01 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEquals.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEquals.java @@ -73,4 +73,9 @@ static boolean processKeywords(BytesRef lhs, BytesRef rhs) { static boolean processBools(boolean lhs, boolean rhs) { return lhs != rhs; } + + @Evaluator(extraName = "Geometries") + static boolean processGeometries(BytesRef lhs, BytesRef rhs) { + return false == lhs.equals(rhs); + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java index b0cdad5095bbe..5b72a601180d8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java @@ -32,6 +32,7 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToString; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToUnsignedLong; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToVersion; +import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateDiff; import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateExtract; import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateFormat; import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateParse; @@ -66,6 +67,8 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvConcat; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvCount; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvDedupe; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvFirst; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvLast; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMax; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMedian; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMin; @@ -160,6 +163,7 @@ private FunctionDefinition[][] functions() { def(EndsWith.class, EndsWith::new, "ends_with") }, // date new FunctionDefinition[] { + def(DateDiff.class, DateDiff::new, "date_diff"), def(DateExtract.class, DateExtract::new, "date_extract"), def(DateFormat.class, DateFormat::new, "date_format"), def(DateParse.class, DateParse::new, "date_parse"), @@ -192,6 +196,8 @@ private FunctionDefinition[][] functions() { def(MvConcat.class, MvConcat::new, "mv_concat"), def(MvCount.class, MvCount::new, "mv_count"), def(MvDedupe.class, MvDedupe::new, "mv_dedupe"), + def(MvFirst.class, MvFirst::new, "mv_first"), + def(MvLast.class, MvLast::new, "mv_last"), def(MvMax.class, MvMax::new, "mv_max"), def(MvMedian.class, MvMedian::new, "mv_median"), def(MvMin.class, MvMin::new, "mv_min"), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Count.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Count.java index 959907ef93257..dcb52b6a3f2c1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Count.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Count.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.esql.expression.function.aggregate; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.CountAggregatorFunction; import org.elasticsearch.xpack.esql.expression.EsqlTypeResolutions; @@ -52,8 +51,8 @@ public DataType dataType() { } @Override - public AggregatorFunctionSupplier supplier(BigArrays bigArrays, List inputChannels) { - return CountAggregatorFunction.supplier(bigArrays, inputChannels); + public AggregatorFunctionSupplier supplier(List inputChannels) { + return CountAggregatorFunction.supplier(inputChannels); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java index 85330c80750e7..c49f9d6c45c1d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.esql.expression.function.aggregate; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.CountDistinctBooleanAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.CountDistinctBytesRefAggregatorFunctionSupplier; @@ -71,24 +70,24 @@ protected TypeResolution resolveType() { } @Override - public AggregatorFunctionSupplier supplier(BigArrays bigArrays, List inputChannels) { + public AggregatorFunctionSupplier supplier(List inputChannels) { DataType type = field().dataType(); int precision = this.precision == null ? DEFAULT_PRECISION : ((Number) this.precision.fold()).intValue(); if (type == DataTypes.BOOLEAN) { // Booleans ignore the precision because there are only two possible values anyway - return new CountDistinctBooleanAggregatorFunctionSupplier(bigArrays, inputChannels); + return new CountDistinctBooleanAggregatorFunctionSupplier(inputChannels); } if (type == DataTypes.DATETIME || type == DataTypes.LONG) { - return new CountDistinctLongAggregatorFunctionSupplier(bigArrays, inputChannels, precision); + return new CountDistinctLongAggregatorFunctionSupplier(inputChannels, precision); } if (type == DataTypes.INTEGER) { - return new CountDistinctIntAggregatorFunctionSupplier(bigArrays, inputChannels, precision); + return new CountDistinctIntAggregatorFunctionSupplier(inputChannels, precision); } if (type == DataTypes.DOUBLE) { - return new CountDistinctDoubleAggregatorFunctionSupplier(bigArrays, inputChannels, precision); + return new CountDistinctDoubleAggregatorFunctionSupplier(inputChannels, precision); } if (type == DataTypes.KEYWORD || type == DataTypes.IP || type == DataTypes.TEXT) { - return new CountDistinctBytesRefAggregatorFunctionSupplier(bigArrays, inputChannels, precision); + return new CountDistinctBytesRefAggregatorFunctionSupplier(inputChannels, precision); } throw EsqlIllegalArgumentException.illegalDataType(type); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java index 7b65d4ba40b1e..0964ce2bd5d67 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.esql.expression.function.aggregate; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.MaxDoubleAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.MaxIntAggregatorFunctionSupplier; @@ -46,17 +45,17 @@ public DataType dataType() { } @Override - protected AggregatorFunctionSupplier longSupplier(BigArrays bigArrays, List inputChannels) { - return new MaxLongAggregatorFunctionSupplier(bigArrays, inputChannels); + protected AggregatorFunctionSupplier longSupplier(List inputChannels) { + return new MaxLongAggregatorFunctionSupplier(inputChannels); } @Override - protected AggregatorFunctionSupplier intSupplier(BigArrays bigArrays, List inputChannels) { - return new MaxIntAggregatorFunctionSupplier(bigArrays, inputChannels); + protected AggregatorFunctionSupplier intSupplier(List inputChannels) { + return new MaxIntAggregatorFunctionSupplier(inputChannels); } @Override - protected AggregatorFunctionSupplier doubleSupplier(BigArrays bigArrays, List inputChannels) { - return new MaxDoubleAggregatorFunctionSupplier(bigArrays, inputChannels); + protected AggregatorFunctionSupplier doubleSupplier(List inputChannels) { + return new MaxDoubleAggregatorFunctionSupplier(inputChannels); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MedianAbsoluteDeviation.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MedianAbsoluteDeviation.java index cb7bac2c2f66e..6fafbeae8e1f4 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MedianAbsoluteDeviation.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MedianAbsoluteDeviation.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.esql.expression.function.aggregate; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.MedianAbsoluteDeviationDoubleAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.MedianAbsoluteDeviationIntAggregatorFunctionSupplier; @@ -36,17 +35,17 @@ public MedianAbsoluteDeviation replaceChildren(List newChildren) { } @Override - protected AggregatorFunctionSupplier longSupplier(BigArrays bigArrays, List inputChannels) { - return new MedianAbsoluteDeviationLongAggregatorFunctionSupplier(bigArrays, inputChannels); + protected AggregatorFunctionSupplier longSupplier(List inputChannels) { + return new MedianAbsoluteDeviationLongAggregatorFunctionSupplier(inputChannels); } @Override - protected AggregatorFunctionSupplier intSupplier(BigArrays bigArrays, List inputChannels) { - return new MedianAbsoluteDeviationIntAggregatorFunctionSupplier(bigArrays, inputChannels); + protected AggregatorFunctionSupplier intSupplier(List inputChannels) { + return new MedianAbsoluteDeviationIntAggregatorFunctionSupplier(inputChannels); } @Override - protected AggregatorFunctionSupplier doubleSupplier(BigArrays bigArrays, List inputChannels) { - return new MedianAbsoluteDeviationDoubleAggregatorFunctionSupplier(bigArrays, inputChannels); + protected AggregatorFunctionSupplier doubleSupplier(List inputChannels) { + return new MedianAbsoluteDeviationDoubleAggregatorFunctionSupplier(inputChannels); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java index 6a0e4aa52e721..9625322fb72c8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.esql.expression.function.aggregate; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.MinDoubleAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.MinIntAggregatorFunctionSupplier; @@ -46,17 +45,17 @@ protected boolean supportsDates() { } @Override - protected AggregatorFunctionSupplier longSupplier(BigArrays bigArrays, List inputChannels) { - return new MinLongAggregatorFunctionSupplier(bigArrays, inputChannels); + protected AggregatorFunctionSupplier longSupplier(List inputChannels) { + return new MinLongAggregatorFunctionSupplier(inputChannels); } @Override - protected AggregatorFunctionSupplier intSupplier(BigArrays bigArrays, List inputChannels) { - return new MinIntAggregatorFunctionSupplier(bigArrays, inputChannels); + protected AggregatorFunctionSupplier intSupplier(List inputChannels) { + return new MinIntAggregatorFunctionSupplier(inputChannels); } @Override - protected AggregatorFunctionSupplier doubleSupplier(BigArrays bigArrays, List inputChannels) { - return new MinDoubleAggregatorFunctionSupplier(bigArrays, inputChannels); + protected AggregatorFunctionSupplier doubleSupplier(List inputChannels) { + return new MinDoubleAggregatorFunctionSupplier(inputChannels); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/NumericAggregate.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/NumericAggregate.java index 6d3ef52c8965a..297aeb7fc0e29 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/NumericAggregate.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/NumericAggregate.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.esql.expression.function.aggregate; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.planner.ToAggregator; @@ -57,26 +56,26 @@ public DataType dataType() { } @Override - public final AggregatorFunctionSupplier supplier(BigArrays bigArrays, List inputChannels) { + public final AggregatorFunctionSupplier supplier(List inputChannels) { DataType type = field().dataType(); if (supportsDates() && type == DataTypes.DATETIME) { - return longSupplier(bigArrays, inputChannels); + return longSupplier(inputChannels); } if (type == DataTypes.LONG) { - return longSupplier(bigArrays, inputChannels); + return longSupplier(inputChannels); } if (type == DataTypes.INTEGER) { - return intSupplier(bigArrays, inputChannels); + return intSupplier(inputChannels); } if (type == DataTypes.DOUBLE) { - return doubleSupplier(bigArrays, inputChannels); + return doubleSupplier(inputChannels); } throw EsqlIllegalArgumentException.illegalDataType(type); } - protected abstract AggregatorFunctionSupplier longSupplier(BigArrays bigArrays, List inputChannels); + protected abstract AggregatorFunctionSupplier longSupplier(List inputChannels); - protected abstract AggregatorFunctionSupplier intSupplier(BigArrays bigArrays, List inputChannels); + protected abstract AggregatorFunctionSupplier intSupplier(List inputChannels); - protected abstract AggregatorFunctionSupplier doubleSupplier(BigArrays bigArrays, List inputChannels); + protected abstract AggregatorFunctionSupplier doubleSupplier(List inputChannels); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Percentile.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Percentile.java index 9e4eccb964de4..9620e112fbda7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Percentile.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Percentile.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.esql.expression.function.aggregate; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.PercentileDoubleAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.PercentileIntAggregatorFunctionSupplier; @@ -60,18 +59,18 @@ protected TypeResolution resolveType() { } @Override - protected AggregatorFunctionSupplier longSupplier(BigArrays bigArrays, List inputChannels) { - return new PercentileLongAggregatorFunctionSupplier(bigArrays, inputChannels, percentileValue()); + protected AggregatorFunctionSupplier longSupplier(List inputChannels) { + return new PercentileLongAggregatorFunctionSupplier(inputChannels, percentileValue()); } @Override - protected AggregatorFunctionSupplier intSupplier(BigArrays bigArrays, List inputChannels) { - return new PercentileIntAggregatorFunctionSupplier(bigArrays, inputChannels, percentileValue()); + protected AggregatorFunctionSupplier intSupplier(List inputChannels) { + return new PercentileIntAggregatorFunctionSupplier(inputChannels, percentileValue()); } @Override - protected AggregatorFunctionSupplier doubleSupplier(BigArrays bigArrays, List inputChannels) { - return new PercentileDoubleAggregatorFunctionSupplier(bigArrays, inputChannels, percentileValue()); + protected AggregatorFunctionSupplier doubleSupplier(List inputChannels) { + return new PercentileDoubleAggregatorFunctionSupplier(inputChannels, percentileValue()); } private int percentileValue() { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Sum.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Sum.java index 1f4c493613353..115e2f9759fa9 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Sum.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Sum.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.esql.expression.function.aggregate; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.SumDoubleAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.SumIntAggregatorFunctionSupplier; @@ -48,17 +47,17 @@ public DataType dataType() { } @Override - protected AggregatorFunctionSupplier longSupplier(BigArrays bigArrays, List inputChannels) { - return new SumLongAggregatorFunctionSupplier(bigArrays, inputChannels); + protected AggregatorFunctionSupplier longSupplier(List inputChannels) { + return new SumLongAggregatorFunctionSupplier(inputChannels); } @Override - protected AggregatorFunctionSupplier intSupplier(BigArrays bigArrays, List inputChannels) { - return new SumIntAggregatorFunctionSupplier(bigArrays, inputChannels); + protected AggregatorFunctionSupplier intSupplier(List inputChannels) { + return new SumIntAggregatorFunctionSupplier(inputChannels); } @Override - protected AggregatorFunctionSupplier doubleSupplier(BigArrays bigArrays, List inputChannels) { - return new SumDoubleAggregatorFunctionSupplier(bigArrays, inputChannels); + protected AggregatorFunctionSupplier doubleSupplier(List inputChannels) { + return new SumDoubleAggregatorFunctionSupplier(inputChannels); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPoint.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPoint.java index bb384ae846f26..da3f1bd829a7f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPoint.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPoint.java @@ -9,6 +9,8 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.ann.ConvertEvaluator; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.NodeInfo; import org.elasticsearch.xpack.ql.tree.Source; @@ -21,20 +23,19 @@ import static org.elasticsearch.xpack.ql.type.DataTypes.KEYWORD; import static org.elasticsearch.xpack.ql.type.DataTypes.LONG; import static org.elasticsearch.xpack.ql.type.DataTypes.TEXT; -import static org.elasticsearch.xpack.ql.type.DataTypes.UNSIGNED_LONG; import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.CARTESIAN; public class ToCartesianPoint extends AbstractConvertFunction { private static final Map EVALUATORS = Map.ofEntries( Map.entry(CARTESIAN_POINT, (fieldEval, source) -> fieldEval), - Map.entry(LONG, (fieldEval, source) -> fieldEval), - Map.entry(UNSIGNED_LONG, (fieldEval, source) -> fieldEval), + Map.entry(LONG, ToCartesianPointFromLongEvaluator.Factory::new), Map.entry(KEYWORD, ToCartesianPointFromStringEvaluator.Factory::new), Map.entry(TEXT, ToCartesianPointFromStringEvaluator.Factory::new) ); - public ToCartesianPoint(Source source, Expression field) { + @FunctionInfo(returnType = "cartesian_point") + public ToCartesianPoint(Source source, @Param(name = "v", type = { "cartesian_point", "long", "keyword", "text" }) Expression field) { super(source, field); } @@ -59,7 +60,12 @@ protected NodeInfo info() { } @ConvertEvaluator(extraName = "FromString", warnExceptions = { IllegalArgumentException.class }) - static long fromKeyword(BytesRef in) { - return CARTESIAN.pointAsLong(CARTESIAN.stringAsPoint(in.utf8ToString())); + static BytesRef fromKeyword(BytesRef in) { + return CARTESIAN.stringAsWKB(in.utf8ToString()); + } + + @ConvertEvaluator(extraName = "FromLong", warnExceptions = { IllegalArgumentException.class }) + static BytesRef fromLong(long encoded) { + return CARTESIAN.longAsWKB(encoded); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPoint.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPoint.java index 75ef5c324541b..f981bf7b3923b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPoint.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPoint.java @@ -9,6 +9,8 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.ann.ConvertEvaluator; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.NodeInfo; import org.elasticsearch.xpack.ql.tree.Source; @@ -21,20 +23,19 @@ import static org.elasticsearch.xpack.ql.type.DataTypes.KEYWORD; import static org.elasticsearch.xpack.ql.type.DataTypes.LONG; import static org.elasticsearch.xpack.ql.type.DataTypes.TEXT; -import static org.elasticsearch.xpack.ql.type.DataTypes.UNSIGNED_LONG; import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.GEO; public class ToGeoPoint extends AbstractConvertFunction { private static final Map EVALUATORS = Map.ofEntries( Map.entry(GEO_POINT, (fieldEval, source) -> fieldEval), - Map.entry(LONG, (fieldEval, source) -> fieldEval), - Map.entry(UNSIGNED_LONG, (fieldEval, source) -> fieldEval), + Map.entry(LONG, ToGeoPointFromLongEvaluator.Factory::new), Map.entry(KEYWORD, ToGeoPointFromStringEvaluator.Factory::new), Map.entry(TEXT, ToGeoPointFromStringEvaluator.Factory::new) ); - public ToGeoPoint(Source source, Expression field) { + @FunctionInfo(returnType = "geo_point") + public ToGeoPoint(Source source, @Param(name = "v", type = { "geo_point", "long", "keyword", "text" }) Expression field) { super(source, field); } @@ -59,7 +60,12 @@ protected NodeInfo info() { } @ConvertEvaluator(extraName = "FromString", warnExceptions = { IllegalArgumentException.class }) - static long fromKeyword(BytesRef in) { - return GEO.pointAsLong(GEO.stringAsPoint(in.utf8ToString())); + static BytesRef fromKeyword(BytesRef in) { + return GEO.stringAsWKB(in.utf8ToString()); + } + + @ConvertEvaluator(extraName = "FromLong", warnExceptions = { IllegalArgumentException.class }) + static BytesRef fromLong(long encoded) { + return GEO.longAsWKB(encoded); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLong.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLong.java index 0a2546297f038..06f56e81fc50d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLong.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLong.java @@ -12,7 +12,6 @@ import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.ql.InvalidArgumentException; -import org.elasticsearch.xpack.ql.QlIllegalArgumentException; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.NodeInfo; import org.elasticsearch.xpack.ql.tree.Source; @@ -34,14 +33,16 @@ import static org.elasticsearch.xpack.ql.type.DataTypes.TEXT; import static org.elasticsearch.xpack.ql.type.DataTypes.UNSIGNED_LONG; import static org.elasticsearch.xpack.ql.util.NumericUtils.unsignedLongAsNumber; +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.CARTESIAN; +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.GEO; public class ToLong extends AbstractConvertFunction { private static final Map EVALUATORS = Map.ofEntries( Map.entry(LONG, (fieldEval, source) -> fieldEval), Map.entry(DATETIME, (fieldEval, source) -> fieldEval), - Map.entry(GEO_POINT, (fieldEval, source) -> fieldEval), - Map.entry(CARTESIAN_POINT, (fieldEval, source) -> fieldEval), + Map.entry(GEO_POINT, ToLongFromGeoPointEvaluator.Factory::new), + Map.entry(CARTESIAN_POINT, ToLongFromCartesianPointEvaluator.Factory::new), Map.entry(BOOLEAN, ToLongFromBooleanEvaluator.Factory::new), Map.entry(KEYWORD, ToLongFromStringEvaluator.Factory::new), Map.entry(TEXT, ToLongFromStringEvaluator.Factory::new), @@ -100,12 +101,12 @@ static long fromKeyword(BytesRef in) { } } - @ConvertEvaluator(extraName = "FromDouble", warnExceptions = { InvalidArgumentException.class, QlIllegalArgumentException.class }) + @ConvertEvaluator(extraName = "FromDouble", warnExceptions = { InvalidArgumentException.class }) static long fromDouble(double dbl) { return safeDoubleToLong(dbl); } - @ConvertEvaluator(extraName = "FromUnsignedLong", warnExceptions = { InvalidArgumentException.class, QlIllegalArgumentException.class }) + @ConvertEvaluator(extraName = "FromUnsignedLong", warnExceptions = { InvalidArgumentException.class }) static long fromUnsignedLong(long ul) { return safeToLong(unsignedLongAsNumber(ul)); } @@ -114,4 +115,14 @@ static long fromUnsignedLong(long ul) { static long fromInt(int i) { return i; } + + @ConvertEvaluator(extraName = "FromGeoPoint") + static long fromGeoPoint(BytesRef wkb) { + return GEO.wkbAsLong(wkb); + } + + @ConvertEvaluator(extraName = "FromCartesianPoint") + static long fromCartesianPoint(BytesRef wkb) { + return CARTESIAN.wkbAsLong(wkb); + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToString.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToString.java index 41d8f87aee436..26baac4f8bcb6 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToString.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToString.java @@ -140,12 +140,12 @@ static BytesRef fromUnsignedLong(long lng) { } @ConvertEvaluator(extraName = "FromGeoPoint") - static BytesRef fromGeoPoint(long point) { - return new BytesRef(GEO.pointAsString(GEO.longAsPoint(point))); + static BytesRef fromGeoPoint(BytesRef wkb) { + return new BytesRef(GEO.wkbAsString(wkb)); } @ConvertEvaluator(extraName = "FromCartesianPoint") - static BytesRef fromCartesianPoint(long point) { - return new BytesRef(CARTESIAN.pointAsString(CARTESIAN.longAsPoint(point))); + static BytesRef fromCartesianPoint(BytesRef wkb) { + return new BytesRef(CARTESIAN.wkbAsString(wkb)); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLong.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLong.java index cfa24cd6d8ff8..651259db06054 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLong.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLong.java @@ -12,7 +12,6 @@ import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.ql.InvalidArgumentException; -import org.elasticsearch.xpack.ql.QlIllegalArgumentException; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.NodeInfo; import org.elasticsearch.xpack.ql.tree.Source; @@ -86,7 +85,7 @@ static long fromKeyword(BytesRef in) { return asLongUnsigned(safeToUnsignedLong(asString)); } - @ConvertEvaluator(extraName = "FromDouble", warnExceptions = { InvalidArgumentException.class, QlIllegalArgumentException.class }) + @ConvertEvaluator(extraName = "FromDouble", warnExceptions = { InvalidArgumentException.class }) static long fromDouble(double dbl) { return asLongUnsigned(safeToUnsignedLong(dbl)); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateDiff.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateDiff.java new file mode 100644 index 0000000000000..63184774540b0 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateDiff.java @@ -0,0 +1,220 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.date; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.ann.Evaluator; +import org.elasticsearch.compute.ann.Fixed; +import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; +import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.ql.InvalidArgumentException; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.function.OptionalArgument; +import org.elasticsearch.xpack.ql.expression.function.scalar.ScalarFunction; +import org.elasticsearch.xpack.ql.expression.gen.script.ScriptTemplate; +import org.elasticsearch.xpack.ql.tree.NodeInfo; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataType; +import org.elasticsearch.xpack.ql.type.DataTypes; + +import java.time.Instant; +import java.time.ZoneId; +import java.time.ZonedDateTime; +import java.time.temporal.ChronoUnit; +import java.time.temporal.IsoFields; +import java.util.Arrays; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.function.BiFunction; +import java.util.function.Function; + +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.FIRST; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.SECOND; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.THIRD; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isDate; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isString; +import static org.elasticsearch.xpack.ql.type.DataTypeConverter.safeToInt; + +/** + * Subtract the second argument from the third argument and return their difference + * in multiples of the unit specified in the first argument. + * If the second argument (start) is greater than the third argument (end), then negative values are returned. + */ +public class DateDiff extends ScalarFunction implements OptionalArgument, EvaluatorMapper { + + public static final ZoneId UTC = ZoneId.of("Z"); + + private final Expression unit; + private final Expression startTimestamp; + private final Expression endTimestamp; + + /** + * Represents units that can be used for DATE_DIFF function and how the difference + * between 2 dates is calculated + */ + public enum Part implements DateTimeField { + + YEAR((start, end) -> end.getYear() - start.getYear(), "years", "yyyy", "yy"), + QUARTER((start, end) -> safeToInt(IsoFields.QUARTER_YEARS.between(start, end)), "quarters", "qq", "q"), + MONTH((start, end) -> safeToInt(ChronoUnit.MONTHS.between(start, end)), "months", "mm", "m"), + DAYOFYEAR((start, end) -> safeToInt(ChronoUnit.DAYS.between(start, end)), "dy", "y"), + DAY(DAYOFYEAR::diff, "days", "dd", "d"), + WEEK((start, end) -> safeToInt(ChronoUnit.WEEKS.between(start, end)), "weeks", "wk", "ww"), + WEEKDAY(DAYOFYEAR::diff, "weekdays", "dw"), + HOUR((start, end) -> safeToInt(ChronoUnit.HOURS.between(start, end)), "hours", "hh"), + MINUTE((start, end) -> safeToInt(ChronoUnit.MINUTES.between(start, end)), "minutes", "mi", "n"), + SECOND((start, end) -> safeToInt(ChronoUnit.SECONDS.between(start, end)), "seconds", "ss", "s"), + MILLISECOND((start, end) -> safeToInt(ChronoUnit.MILLIS.between(start, end)), "milliseconds", "ms"), + MICROSECOND((start, end) -> safeToInt(ChronoUnit.MICROS.between(start, end)), "microseconds", "mcs"), + NANOSECOND((start, end) -> safeToInt(ChronoUnit.NANOS.between(start, end)), "nanoseconds", "ns"); + + private static final Map NAME_TO_PART = DateTimeField.initializeResolutionMap(values()); + + private final BiFunction diffFunction; + private final Set aliases; + + Part(BiFunction diffFunction, String... aliases) { + this.diffFunction = diffFunction; + this.aliases = Set.of(aliases); + } + + public Integer diff(ZonedDateTime startTimestamp, ZonedDateTime endTimestamp) { + return diffFunction.apply(startTimestamp, endTimestamp); + } + + @Override + public Iterable aliases() { + return aliases; + } + + public static Part resolve(String dateTimeUnit) { + Part datePartField = DateTimeField.resolveMatch(NAME_TO_PART, dateTimeUnit); + if (datePartField == null) { + List similar = DateTimeField.findSimilar(NAME_TO_PART.keySet(), dateTimeUnit); + String errorMessage; + if (similar.isEmpty() == false) { + errorMessage = String.format( + Locale.ROOT, + "Received value [%s] is not valid date part to add; did you mean %s?", + dateTimeUnit, + similar + ); + } else { + errorMessage = String.format( + Locale.ROOT, + "A value of %s or their aliases is required; received [%s]", + Arrays.asList(Part.values()), + dateTimeUnit + ); + } + throw new IllegalArgumentException(errorMessage); + } + + return datePartField; + } + } + + @FunctionInfo( + returnType = "integer", + description = "Subtract 2 dates and return their difference in multiples of a unit specified in the 1st argument" + ) + public DateDiff( + Source source, + @Param(name = "unit", type = { "keyword", "text" }, description = "A valid date unit") Expression unit, + @Param( + name = "startTimestamp", + type = { "date" }, + description = "A string representing a start timestamp" + ) Expression startTimestamp, + @Param(name = "endTimestamp", type = { "date" }, description = "A string representing an end timestamp") Expression endTimestamp + ) { + super(source, List.of(unit, startTimestamp, endTimestamp)); + this.unit = unit; + this.startTimestamp = startTimestamp; + this.endTimestamp = endTimestamp; + } + + @Evaluator(extraName = "Constant", warnExceptions = { IllegalArgumentException.class, InvalidArgumentException.class }) + static int process(@Fixed Part datePartFieldUnit, long startTimestamp, long endTimestamp) throws IllegalArgumentException { + ZonedDateTime zdtStart = ZonedDateTime.ofInstant(Instant.ofEpochMilli(startTimestamp), UTC); + ZonedDateTime zdtEnd = ZonedDateTime.ofInstant(Instant.ofEpochMilli(endTimestamp), UTC); + return datePartFieldUnit.diff(zdtStart, zdtEnd); + } + + @Evaluator(warnExceptions = { IllegalArgumentException.class, InvalidArgumentException.class }) + static int process(BytesRef unit, long startTimestamp, long endTimestamp) throws IllegalArgumentException { + return process(Part.resolve(unit.utf8ToString()), startTimestamp, endTimestamp); + } + + @Override + public ExpressionEvaluator.Factory toEvaluator(Function toEvaluator) { + ExpressionEvaluator.Factory startTimestampEvaluator = toEvaluator.apply(startTimestamp); + ExpressionEvaluator.Factory endTimestampEvaluator = toEvaluator.apply(endTimestamp); + + if (unit.foldable()) { + try { + Part datePartField = Part.resolve(((BytesRef) unit.fold()).utf8ToString()); + return new DateDiffConstantEvaluator.Factory(source(), datePartField, startTimestampEvaluator, endTimestampEvaluator); + } catch (IllegalArgumentException e) { + throw new InvalidArgumentException("invalid unit format for [{}]: {}", sourceText(), e.getMessage()); + } + } + ExpressionEvaluator.Factory unitEvaluator = toEvaluator.apply(unit); + return new DateDiffEvaluator.Factory(source(), unitEvaluator, startTimestampEvaluator, endTimestampEvaluator); + } + + @Override + protected TypeResolution resolveType() { + if (childrenResolved() == false) { + return new TypeResolution("Unresolved children"); + } + + TypeResolution resolution = isString(unit, sourceText(), FIRST).and(isDate(startTimestamp, sourceText(), SECOND)) + .and(isDate(endTimestamp, sourceText(), THIRD)); + + if (resolution.unresolved()) { + return resolution; + } + + return TypeResolution.TYPE_RESOLVED; + } + + @Override + public Object fold() { + return EvaluatorMapper.super.fold(); + } + + @Override + public boolean foldable() { + return unit.foldable() && startTimestamp.foldable() && endTimestamp.foldable(); + } + + @Override + public DataType dataType() { + return DataTypes.INTEGER; + } + + @Override + public ScriptTemplate asScript() { + throw new UnsupportedOperationException("functions do not support scripting"); + } + + @Override + public Expression replaceChildren(List newChildren) { + return new DateDiff(source(), newChildren.get(0), newChildren.get(1), newChildren.get(2)); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, DateDiff::new, children().get(0), children().get(1), children().get(2)); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtract.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtract.java index c5d5dc5054653..eadea746a1bd1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtract.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtract.java @@ -12,8 +12,8 @@ import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.ann.Fixed; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; -import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; +import org.elasticsearch.xpack.ql.InvalidArgumentException; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.TypeResolutions; import org.elasticsearch.xpack.ql.expression.function.scalar.ConfigurationFunction; @@ -49,7 +49,7 @@ public ExpressionEvaluator.Factory toEvaluator(Function Map initializeResolutionMap(D[] values) { + Map nameToPart = new HashMap<>(); + + for (D datePart : values) { + String lowerCaseName = datePart.name().toLowerCase(Locale.ROOT); + + nameToPart.put(lowerCaseName, datePart); + for (String alias : datePart.aliases()) { + nameToPart.put(alias, datePart); + } + } + return Collections.unmodifiableMap(nameToPart); + } + + static List initializeValidValues(D[] values) { + return Arrays.stream(values).map(D::name).collect(Collectors.toList()); + } + + static D resolveMatch(Map resolutionMap, String possibleMatch) { + return resolutionMap.get(possibleMatch.toLowerCase(Locale.ROOT)); + } + + static List findSimilar(Iterable similars, String match) { + return StringUtils.findSimilar(match, similars); + } + + String name(); + + Iterable aliases(); +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Cast.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Cast.java index e2162a481d5cc..1c9f42de2f640 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Cast.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Cast.java @@ -11,7 +11,7 @@ import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; -import org.elasticsearch.xpack.ql.QlIllegalArgumentException; +import org.elasticsearch.xpack.ql.InvalidArgumentException; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; import org.elasticsearch.xpack.ql.type.DataTypes; @@ -88,9 +88,10 @@ static long castIntToUnsignedLong(int v) { } @Evaluator(extraName = "LongToUnsignedLong") + // TODO: catch-to-null in evaluator? static long castLongToUnsignedLong(long v) { if (v < 0) { - throw new QlIllegalArgumentException("[" + v + "] out of [unsigned_long] range"); + throw new InvalidArgumentException("[" + v + "] out of [unsigned_long] range"); } return v; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsFinite.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsFinite.java index 012b8ce25f258..f4bf8a628c9b6 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsFinite.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsFinite.java @@ -9,6 +9,8 @@ import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.NodeInfo; import org.elasticsearch.xpack.ql.tree.Source; @@ -17,7 +19,9 @@ import java.util.function.Function; public class IsFinite extends RationalUnaryPredicate { - public IsFinite(Source source, Expression field) { + + @FunctionInfo(returnType = "boolean", description = "Returns true if the argument is a finite floating-point value.") + public IsFinite(Source source, @Param(name = "n", type = { "double" }, description = "A floating-point value") Expression field) { super(source, field); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsInfinite.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsInfinite.java index 80068f3aaf8d4..c0c3b3149f3d0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsInfinite.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsInfinite.java @@ -9,6 +9,8 @@ import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.NodeInfo; import org.elasticsearch.xpack.ql.tree.Source; @@ -17,7 +19,12 @@ import java.util.function.Function; public class IsInfinite extends RationalUnaryPredicate { - public IsInfinite(Source source, Expression field) { + + @FunctionInfo( + returnType = "boolean", + description = "Returns true if the specified floating-point value is infinitely large in magnitude." + ) + public IsInfinite(Source source, @Param(name = "n", type = { "double" }, description = "A floating-point value") Expression field) { super(source, field); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsNaN.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsNaN.java index 07875987f74d7..27ddd39c86c21 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsNaN.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsNaN.java @@ -9,6 +9,8 @@ import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.NodeInfo; import org.elasticsearch.xpack.ql.tree.Source; @@ -17,7 +19,9 @@ import java.util.function.Function; public class IsNaN extends RationalUnaryPredicate { - public IsNaN(Source source, Expression field) { + + @FunctionInfo(returnType = "boolean", description = "Returns true if the argument is a Not-a-Number (NaN) value.") + public IsNaN(Source source, @Param(name = "n", type = { "double" }, description = "A floating-point value") Expression field) { super(source, field); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunction.java index e3bb8212aebab..9edb67db668d0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunction.java @@ -9,6 +9,7 @@ import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; import org.elasticsearch.core.Releasables; @@ -54,8 +55,8 @@ public final ExpressionEvaluator.Factory toEvaluator(java.util.function.Function * Base evaluator that can handle both nulls- and no-nulls-containing blocks. */ public abstract static class AbstractEvaluator extends AbstractNullableEvaluator { - protected AbstractEvaluator(EvalOperator.ExpressionEvaluator field) { - super(field); + protected AbstractEvaluator(DriverContext driverContext, EvalOperator.ExpressionEvaluator field) { + super(driverContext, field); } /** @@ -102,9 +103,11 @@ public final Block eval(Page page) { * Base evaluator that can handle evaluator-checked exceptions; i.e. for expressions that can be evaluated to null. */ public abstract static class AbstractNullableEvaluator implements EvalOperator.ExpressionEvaluator { + protected final DriverContext driverContext; protected final EvalOperator.ExpressionEvaluator field; - protected AbstractNullableEvaluator(EvalOperator.ExpressionEvaluator field) { + protected AbstractNullableEvaluator(DriverContext driverContext, EvalOperator.ExpressionEvaluator field) { + this.driverContext = driverContext; this.field = field; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvConcat.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvConcat.java index 9f5c492d7fe7c..5df0ac03206c4 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvConcat.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvConcat.java @@ -123,7 +123,7 @@ private static class Evaluator implements ExpressionEvaluator { public final Block eval(Page page) { try (BytesRefBlock fieldVal = (BytesRefBlock) field.eval(page); BytesRefBlock delimVal = (BytesRefBlock) delim.eval(page)) { int positionCount = page.getPositionCount(); - try (BytesRefBlock.Builder builder = BytesRefBlock.newBlockBuilder(positionCount, context.blockFactory())) { + try (BytesRefBlock.Builder builder = context.blockFactory().newBytesRefBlockBuilder(positionCount)) { BytesRefBuilder work = new BytesRefBuilder(); // TODO BreakingBytesRefBuilder so we don't blow past circuit breakers BytesRef fieldScratch = new BytesRef(); BytesRef delimScratch = new BytesRef(); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCount.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCount.java index bf05aeee4d228..9e4482bd48682 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCount.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCount.java @@ -8,8 +8,6 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.IntBlock; -import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; @@ -94,11 +92,8 @@ public String toString() { } private static class Evaluator extends AbstractEvaluator { - private final DriverContext driverContext; - protected Evaluator(DriverContext driverContext, EvalOperator.ExpressionEvaluator field) { - super(field); - this.driverContext = driverContext; + super(driverContext, field); } @Override @@ -108,7 +103,7 @@ protected String name() { @Override protected Block evalNullable(Block block) { - try (var builder = IntBlock.newBlockBuilder(block.getPositionCount(), driverContext.blockFactory())) { + try (var builder = driverContext.blockFactory().newIntBlockBuilder(block.getPositionCount())) { for (int p = 0; p < block.getPositionCount(); p++) { int valueCount = block.getValueCount(p); if (valueCount == 0) { @@ -123,7 +118,7 @@ protected Block evalNullable(Block block) { @Override protected Block evalNotNullable(Block block) { - try (var builder = IntVector.newVectorFixedBuilder(block.getPositionCount(), driverContext.blockFactory())) { + try (var builder = driverContext.blockFactory().newIntVectorFixedBuilder(block.getPositionCount())) { for (int p = 0; p < block.getPositionCount(); p++) { builder.appendInt(block.getValueCount(p)); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirst.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirst.java new file mode 100644 index 0000000000000..1acb135292995 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirst.java @@ -0,0 +1,111 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.ann.MvEvaluator; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.planner.PlannerUtils; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.tree.NodeInfo; +import org.elasticsearch.xpack.ql.tree.Source; + +import java.util.List; + +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isType; + +/** + * Reduce a multivalued field to a single valued field containing the minimum value. + */ +public class MvFirst extends AbstractMultivalueFunction { + @FunctionInfo(returnType = "?", description = "Reduce a multivalued field to a single valued field containing the first value.") + public MvFirst( + Source source, + @Param( + name = "v", + type = { + "unsigned_long", + "date", + "boolean", + "double", + "ip", + "text", + "integer", + "keyword", + "version", + "long", + "geo_point", + "cartesian_point" } + ) Expression field + ) { + super(source, field); + } + + @Override + protected TypeResolution resolveFieldType() { + return isType(field(), EsqlDataTypes::isRepresentable, sourceText(), null, "representable"); + } + + @Override + protected ExpressionEvaluator.Factory evaluator(ExpressionEvaluator.Factory fieldEval) { + return switch (PlannerUtils.toElementType(field().dataType())) { + case BOOLEAN -> new MvFirstBooleanEvaluator.Factory(fieldEval); + case BYTES_REF -> new MvFirstBytesRefEvaluator.Factory(fieldEval); + case DOUBLE -> new MvFirstDoubleEvaluator.Factory(fieldEval); + case INT -> new MvFirstIntEvaluator.Factory(fieldEval); + case LONG -> new MvFirstLongEvaluator.Factory(fieldEval); + case NULL -> EvalOperator.CONSTANT_NULL_FACTORY; + default -> throw EsqlIllegalArgumentException.illegalDataType(field.dataType()); + }; + } + + @Override + public Expression replaceChildren(List newChildren) { + return new MvFirst(source(), newChildren.get(0)); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, MvFirst::new, field()); + } + + @MvEvaluator(extraName = "Boolean") + static boolean process(BooleanBlock block, int start, int end) { + return block.getBoolean(start); + } + + @MvEvaluator(extraName = "Long") + static long process(LongBlock block, int start, int end) { + return block.getLong(start); + } + + @MvEvaluator(extraName = "Int") + static int process(IntBlock block, int start, int end) { + return block.getInt(start); + } + + @MvEvaluator(extraName = "Double") + static double process(DoubleBlock block, int start, int end) { + return block.getDouble(start); + } + + @MvEvaluator(extraName = "BytesRef") + static BytesRef process(BytesRefBlock block, int start, int end, BytesRef scratch) { + return block.getBytesRef(start, scratch); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLast.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLast.java new file mode 100644 index 0000000000000..2e6066a6dc98c --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLast.java @@ -0,0 +1,111 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.ann.MvEvaluator; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.planner.PlannerUtils; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.tree.NodeInfo; +import org.elasticsearch.xpack.ql.tree.Source; + +import java.util.List; + +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isType; + +/** + * Reduce a multivalued field to a single valued field containing the minimum value. + */ +public class MvLast extends AbstractMultivalueFunction { + @FunctionInfo(returnType = "?", description = "Reduce a multivalued field to a single valued field containing the last value.") + public MvLast( + Source source, + @Param( + name = "v", + type = { + "unsigned_long", + "date", + "boolean", + "double", + "ip", + "text", + "integer", + "keyword", + "version", + "long", + "geo_point", + "cartesian_point" } + ) Expression field + ) { + super(source, field); + } + + @Override + protected TypeResolution resolveFieldType() { + return isType(field(), EsqlDataTypes::isRepresentable, sourceText(), null, "representable"); + } + + @Override + protected ExpressionEvaluator.Factory evaluator(ExpressionEvaluator.Factory fieldEval) { + return switch (PlannerUtils.toElementType(field().dataType())) { + case BOOLEAN -> new MvLastBooleanEvaluator.Factory(fieldEval); + case BYTES_REF -> new MvLastBytesRefEvaluator.Factory(fieldEval); + case DOUBLE -> new MvLastDoubleEvaluator.Factory(fieldEval); + case INT -> new MvLastIntEvaluator.Factory(fieldEval); + case LONG -> new MvLastLongEvaluator.Factory(fieldEval); + case NULL -> EvalOperator.CONSTANT_NULL_FACTORY; + default -> throw EsqlIllegalArgumentException.illegalDataType(field.dataType()); + }; + } + + @Override + public Expression replaceChildren(List newChildren) { + return new MvLast(source(), newChildren.get(0)); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, MvLast::new, field()); + } + + @MvEvaluator(extraName = "Boolean") + static boolean process(BooleanBlock block, int start, int end) { + return block.getBoolean(end - 1); + } + + @MvEvaluator(extraName = "Long") + static long process(LongBlock block, int start, int end) { + return block.getLong(end - 1); + } + + @MvEvaluator(extraName = "Int") + static int process(IntBlock block, int start, int end) { + return block.getInt(end - 1); + } + + @MvEvaluator(extraName = "Double") + static double process(DoubleBlock block, int start, int end) { + return block.getDouble(end - 1); + } + + @MvEvaluator(extraName = "BytesRef") + static BytesRef process(BytesRefBlock block, int start, int end, BytesRef scratch) { + return block.getBytesRef(end - 1, scratch); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSum.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSum.java index a0abced909c48..48b83aa205549 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSum.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSum.java @@ -45,7 +45,7 @@ protected ExpressionEvaluator.Factory evaluator(ExpressionEvaluator.Factory fiel case LONG -> field().dataType() == DataTypes.UNSIGNED_LONG ? new MvSumUnsignedLongEvaluator.Factory(source(), fieldEval) : new MvSumLongEvaluator.Factory(source(), fieldEval); - case NULL -> dvrCtx -> EvalOperator.CONSTANT_NULL; + case NULL -> EvalOperator.CONSTANT_NULL_FACTORY; default -> throw EsqlIllegalArgumentException.illegalDataType(field.dataType()); }; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/package-info.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/package-info.java index 1470c3ec1e5ae..c0caaf8b180ce 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/package-info.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/package-info.java @@ -100,7 +100,7 @@ * {@code ./gradlew -p x-pack/plugin/esql/ check} * *
  • - * Now it's time to write some docs! Open {@code docs/reference/esql/esql-functions.asciidoc} + * Now it's time to write some docs! Open {@code docs/reference/esql/esql-functions-operators.asciidoc} * and add your function in alphabetical order to the list at the top and then add it to * the includes below. *
  • diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Split.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Split.java index 950486b1b0eed..7e6b3659bbdf0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Split.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Split.java @@ -13,7 +13,7 @@ import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; -import org.elasticsearch.xpack.ql.QlIllegalArgumentException; +import org.elasticsearch.xpack.ql.InvalidArgumentException; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.function.scalar.BinaryScalarFunction; import org.elasticsearch.xpack.ql.tree.NodeInfo; @@ -102,9 +102,7 @@ static void process( BytesRef delim, @Fixed(includeInToString = false, build = true) BytesRef scratch ) { - if (delim.length != 1) { - throw new QlIllegalArgumentException("delimiter must be single byte for now"); - } + checkDelimiter(delim); process(builder, str, delim.bytes[delim.offset], scratch); } @@ -125,9 +123,13 @@ public ExpressionEvaluator.Factory toEvaluator(Function new BytesRef()); } BytesRef delim = (BytesRef) right().fold(); + checkDelimiter(delim); + return new SplitSingleByteEvaluator.Factory(source(), str, delim.bytes[delim.offset], context -> new BytesRef()); + } + + private static void checkDelimiter(BytesRef delim) { if (delim.length != 1) { - throw new QlIllegalArgumentException("for now delimiter must be a single byte"); + throw new InvalidArgumentException("delimiter must be single byte for now"); } - return new SplitSingleByteEvaluator.Factory(source(), str, delim.bytes[delim.offset], context -> new BytesRef()); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java index 0132301cb79b5..e29502920d3d8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java @@ -13,7 +13,6 @@ import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; -import org.elasticsearch.xpack.ql.type.DataTypes; import java.time.Duration; import java.time.Period; @@ -26,6 +25,9 @@ import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.TIME_DURATION; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isDateTimeOrTemporal; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isTemporalAmount; +import static org.elasticsearch.xpack.ql.type.DataTypes.DATETIME; +import static org.elasticsearch.xpack.ql.type.DataTypes.isDateTime; +import static org.elasticsearch.xpack.ql.type.DataTypes.isNull; abstract class DateTimeArithmeticOperation extends EsqlArithmeticOperation { /** Arithmetic (quad) function. */ @@ -57,16 +59,16 @@ protected TypeResolution resolveType() { // Date math is only possible if either // - one argument is a DATETIME and the other a (foldable) TemporalValue, or - // - both arguments are TemporalValues (so we can fold them). + // - both arguments are TemporalValues (so we can fold them), or + // - one argument is NULL and the other one a DATETIME. if (isDateTimeOrTemporal(leftType) || isDateTimeOrTemporal(rightType)) { - if ((leftType == DataTypes.DATETIME && isTemporalAmount(rightType)) - || (rightType == DataTypes.DATETIME && isTemporalAmount(leftType))) { + if (isNull(leftType) || isNull(rightType)) { return TypeResolution.TYPE_RESOLVED; } - if (leftType == TIME_DURATION && rightType == TIME_DURATION) { + if ((isDateTime(leftType) && isTemporalAmount(rightType)) || (isTemporalAmount(leftType) && isDateTime(rightType))) { return TypeResolution.TYPE_RESOLVED; } - if (leftType == DATE_PERIOD && rightType == DATE_PERIOD) { + if (isTemporalAmount(leftType) && isTemporalAmount(rightType) && leftType == rightType) { return TypeResolution.TYPE_RESOLVED; } @@ -126,16 +128,19 @@ public final Object fold() { throw ExceptionUtils.math(source(), e); } } + if (isNull(leftDataType) || isNull(rightDataType)) { + return null; + } return super.fold(); } @Override public ExpressionEvaluator.Factory toEvaluator(Function toEvaluator) { - if (dataType() == DataTypes.DATETIME) { + if (dataType() == DATETIME) { // One of the arguments has to be a datetime and the other a temporal amount. Expression datetimeArgument; Expression temporalAmountArgument; - if (left().dataType() == DataTypes.DATETIME) { + if (left().dataType() == DATETIME) { datetimeArgument = left(); temporalAmountArgument = right(); } else { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/formatter/TextFormat.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/formatter/TextFormat.java index b6bb7b8d74429..ac8f9560074f5 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/formatter/TextFormat.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/formatter/TextFormat.java @@ -8,7 +8,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; -import org.elasticsearch.common.geo.SpatialPoint; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.xcontent.MediaType; @@ -291,18 +290,10 @@ public Iterator> format(RestRequest request hasHeader(request) && esqlResponse.columns() != null ? Iterators.single(writer -> row(writer, esqlResponse.columns().iterator(), ColumnInfo::name, delimiter)) : Collections.emptyIterator(), - Iterators.map(esqlResponse.values(), row -> writer -> row(writer, row, TextFormat::formatEsqlResultObject, delimiter)) + Iterators.map(esqlResponse.values(), row -> writer -> row(writer, row, f -> Objects.toString(f, StringUtils.EMPTY), delimiter)) ); } - private static String formatEsqlResultObject(Object obj) { - // TODO: It would be nicer to override GeoPoint.toString() but that has consequences - if (obj instanceof SpatialPoint point) { - return String.format(Locale.ROOT, "POINT (%.7f %.7f)", point.getX(), point.getY()); - } - return Objects.toString(obj, StringUtils.EMPTY); - } - boolean hasHeader(RestRequest request) { return true; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/formatter/TextFormatter.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/formatter/TextFormatter.java index 48196d2bffbde..0535e4adfe346 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/formatter/TextFormatter.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/formatter/TextFormatter.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.esql.formatter; import org.elasticsearch.common.collect.Iterators; -import org.elasticsearch.common.geo.SpatialPoint; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.xpack.esql.action.EsqlQueryResponse; @@ -16,7 +15,6 @@ import java.io.Writer; import java.util.Collections; import java.util.Iterator; -import java.util.Locale; import java.util.Objects; import java.util.function.Function; @@ -31,7 +29,7 @@ public class TextFormatter { private final EsqlQueryResponse response; private final int[] width; - private final Function FORMATTER = TextFormatter::formatEsqlResultObject; + private final Function FORMATTER = Objects::toString; /** * Create a new {@linkplain TextFormatter} for formatting responses. @@ -130,12 +128,4 @@ private static void writePadding(int padding, Writer writer) throws IOException writer.append(PADDING_64, 0, padding); } } - - private static String formatEsqlResultObject(Object obj) { - // TODO: It would be nicer to override GeoPoint.toString() but that has consequences - if (obj instanceof SpatialPoint point) { - return String.format(Locale.ROOT, "POINT (%.7f %.7f)", point.getX(), point.getY()); - } - return Objects.toString(obj); - } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java index a0e9c620d0fce..ee37b34e58d39 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.esql.io.stream; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.StreamInput; @@ -49,6 +51,7 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToString; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToUnsignedLong; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToVersion; +import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateDiff; import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateExtract; import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateFormat; import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateParse; @@ -84,6 +87,8 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvConcat; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvCount; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvDedupe; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvFirst; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvLast; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMax; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMedian; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMin; @@ -172,6 +177,7 @@ import org.elasticsearch.xpack.ql.plan.logical.OrderBy; import org.elasticsearch.xpack.ql.plan.logical.Project; import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataType; import org.elasticsearch.xpack.ql.type.DateEsField; import org.elasticsearch.xpack.ql.type.EsField; import org.elasticsearch.xpack.ql.type.InvalidMappedField; @@ -190,6 +196,10 @@ import static org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.Entry.of; import static org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanReader.readerFromPlanReader; import static org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanWriter.writerFromPlanWriter; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.CARTESIAN_POINT; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.GEO_POINT; +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.CARTESIAN; +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.GEO; /** * A utility class that consists solely of static methods that describe how to serialize and @@ -343,6 +353,7 @@ public static List namedTypeEntries() { of(ScalarFunction.class, CIDRMatch.class, PlanNamedTypes::writeCIDRMatch, PlanNamedTypes::readCIDRMatch), of(ScalarFunction.class, Coalesce.class, PlanNamedTypes::writeVararg, PlanNamedTypes::readVarag), of(ScalarFunction.class, Concat.class, PlanNamedTypes::writeVararg, PlanNamedTypes::readVarag), + of(ScalarFunction.class, DateDiff.class, PlanNamedTypes::writeDateDiff, PlanNamedTypes::readDateDiff), of(ScalarFunction.class, DateExtract.class, PlanNamedTypes::writeDateExtract, PlanNamedTypes::readDateExtract), of(ScalarFunction.class, DateFormat.class, PlanNamedTypes::writeDateFormat, PlanNamedTypes::readDateFormat), of(ScalarFunction.class, DateParse.class, PlanNamedTypes::writeDateTimeParse, PlanNamedTypes::readDateTimeParse), @@ -383,6 +394,8 @@ public static List namedTypeEntries() { of(ScalarFunction.class, MvCount.class, PlanNamedTypes::writeMvFunction, PlanNamedTypes::readMvFunction), of(ScalarFunction.class, MvConcat.class, PlanNamedTypes::writeMvConcat, PlanNamedTypes::readMvConcat), of(ScalarFunction.class, MvDedupe.class, PlanNamedTypes::writeMvFunction, PlanNamedTypes::readMvFunction), + of(ScalarFunction.class, MvFirst.class, PlanNamedTypes::writeMvFunction, PlanNamedTypes::readMvFunction), + of(ScalarFunction.class, MvLast.class, PlanNamedTypes::writeMvFunction, PlanNamedTypes::readMvFunction), of(ScalarFunction.class, MvMax.class, PlanNamedTypes::writeMvFunction, PlanNamedTypes::readMvFunction), of(ScalarFunction.class, MvMedian.class, PlanNamedTypes::writeMvFunction, PlanNamedTypes::readMvFunction), of(ScalarFunction.class, MvMin.class, PlanNamedTypes::writeMvFunction, PlanNamedTypes::readMvFunction), @@ -1287,6 +1300,19 @@ static void writeCountDistinct(PlanStreamOutput out, CountDistinct countDistinct out.writeOptionalWriteable(fields.size() == 2 ? o -> out.writeExpression(fields.get(1)) : null); } + static DateDiff readDateDiff(PlanStreamInput in) throws IOException { + return new DateDiff(in.readSource(), in.readExpression(), in.readExpression(), in.readExpression()); + } + + static void writeDateDiff(PlanStreamOutput out, DateDiff function) throws IOException { + out.writeNoSource(); + List fields = function.children(); + assert fields.size() == 3; + out.writeExpression(fields.get(0)); + out.writeExpression(fields.get(1)); + out.writeExpression(fields.get(2)); + } + static DateExtract readDateExtract(PlanStreamInput in) throws IOException { return new DateExtract(in.readSource(), in.readExpression(), in.readExpression(), in.configuration()); } @@ -1522,6 +1548,8 @@ static void writeAggFunction(PlanStreamOutput out, AggregateFunction aggregateFu entry(name(MvAvg.class), MvAvg::new), entry(name(MvCount.class), MvCount::new), entry(name(MvDedupe.class), MvDedupe::new), + entry(name(MvFirst.class), MvFirst::new), + entry(name(MvLast.class), MvLast::new), entry(name(MvMax.class), MvMax::new), entry(name(MvMedian.class), MvMedian::new), entry(name(MvMin.class), MvMin::new), @@ -1572,15 +1600,64 @@ static void writeAlias(PlanStreamOutput out, Alias alias) throws IOException { // -- Expressions (other) static Literal readLiteral(PlanStreamInput in) throws IOException { - return new Literal(in.readSource(), in.readGenericValue(), in.dataTypeFromTypeName(in.readString())); + Source source = in.readSource(); + Object value = in.readGenericValue(); + DataType dataType = in.dataTypeFromTypeName(in.readString()); + return new Literal(source, mapToLiteralValue(in, dataType, value), dataType); } static void writeLiteral(PlanStreamOutput out, Literal literal) throws IOException { out.writeNoSource(); - out.writeGenericValue(literal.value()); + out.writeGenericValue(mapFromLiteralValue(out, literal.dataType(), literal.value())); out.writeString(literal.dataType().typeName()); } + /** + * Not all literal values are currently supported in StreamInput/StreamOutput as generic values. + * This mapper allows for addition of new and interesting values without (yet) adding to StreamInput/Output. + * This makes the most sense during the pre-GA version of ESQL. When we get near GA we might want to push this down. + *

    + * For the spatial point type support we need to care about the fact that 8.12.0 uses encoded longs for serializing + * while 8.13 uses WKB. + */ + private static Object mapFromLiteralValue(PlanStreamOutput out, DataType dataType, Object value) { + if (dataType == GEO_POINT || dataType == CARTESIAN_POINT) { + // In 8.12.0 and earlier builds of 8.13 (pre-release) we serialized point literals as encoded longs, but now use WKB + if (out.getTransportVersion().before(TransportVersions.ESQL_PLAN_POINT_LITERAL_WKB)) { + if (value instanceof List list) { + return list.stream().map(v -> mapFromLiteralValue(out, dataType, v)).toList(); + } + return wkbAsLong(dataType, (BytesRef) value); + } + } + return value; + } + + /** + * Not all literal values are currently supported in StreamInput/StreamOutput as generic values. + * This mapper allows for addition of new and interesting values without (yet) changing StreamInput/Output. + */ + private static Object mapToLiteralValue(PlanStreamInput in, DataType dataType, Object value) { + if (dataType == GEO_POINT || dataType == CARTESIAN_POINT) { + // In 8.12.0 and earlier builds of 8.13 (pre-release) we serialized point literals as encoded longs, but now use WKB + if (in.getTransportVersion().before(TransportVersions.ESQL_PLAN_POINT_LITERAL_WKB)) { + if (value instanceof List list) { + return list.stream().map(v -> mapToLiteralValue(in, dataType, v)).toList(); + } + return longAsWKB(dataType, (Long) value); + } + } + return value; + } + + private static BytesRef longAsWKB(DataType dataType, long encoded) { + return dataType == GEO_POINT ? GEO.longAsWKB(encoded) : CARTESIAN.longAsWKB(encoded); + } + + private static long wkbAsLong(DataType dataType, BytesRef wkb) { + return dataType == GEO_POINT ? GEO.wkbAsLong(wkb) : CARTESIAN.wkbAsLong(wkb); + } + static Order readOrder(PlanStreamInput in) throws IOException { return new org.elasticsearch.xpack.esql.expression.Order( in.readSource(), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutput.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutput.java index 66bd4163013ee..ac894ce7a099e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutput.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutput.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.esql.io.stream; -import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanWriter; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; @@ -26,18 +26,19 @@ * A customized stream output used to serialize ESQL physical plan fragments. Complements stream * output with methods that write plan nodes, Attributes, Expressions, etc. */ -public final class PlanStreamOutput extends OutputStreamStreamOutput { +public final class PlanStreamOutput extends StreamOutput { + private final StreamOutput delegate; private final PlanNameRegistry registry; private final Function, String> nameSupplier; - public PlanStreamOutput(StreamOutput streamOutput, PlanNameRegistry registry) { - this(streamOutput, registry, PlanNamedTypes::name); + public PlanStreamOutput(StreamOutput delegate, PlanNameRegistry registry) { + this(delegate, registry, PlanNamedTypes::name); } - public PlanStreamOutput(StreamOutput streamOutput, PlanNameRegistry registry, Function, String> nameSupplier) { - super(streamOutput); + public PlanStreamOutput(StreamOutput delegate, PlanNameRegistry registry, Function, String> nameSupplier) { + this.delegate = delegate; this.registry = registry; this.nameSupplier = nameSupplier; } @@ -89,4 +90,35 @@ public void writeNamed(Class type, T value) throws IOException { writeString(name); writer.write(this, value); } + + @Override + public void writeByte(byte b) throws IOException { + delegate.writeByte(b); + } + + @Override + public void writeBytes(byte[] b, int offset, int length) throws IOException { + delegate.writeBytes(b, offset, length); + } + + @Override + public void flush() throws IOException { + delegate.flush(); + } + + @Override + public void close() throws IOException { + delegate.close(); + } + + @Override + public TransportVersion getTransportVersion() { + return delegate.getTransportVersion(); + } + + @Override + public void setTransportVersion(TransportVersion version) { + delegate.setTransportVersion(version); + super.setTransportVersion(version); + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizer.java index e05dd9a00c567..86ef7b6d1e618 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizer.java @@ -7,28 +7,47 @@ package org.elasticsearch.xpack.esql.optimizer; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BlockUtils; +import org.elasticsearch.xpack.esql.expression.function.aggregate.Count; import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; +import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer.PropagateEmptyRelation; import org.elasticsearch.xpack.esql.plan.logical.Eval; import org.elasticsearch.xpack.esql.plan.logical.TopN; +import org.elasticsearch.xpack.esql.planner.AbstractPhysicalOperationProviders; +import org.elasticsearch.xpack.esql.planner.PlannerUtils; import org.elasticsearch.xpack.esql.stats.SearchStats; import org.elasticsearch.xpack.ql.expression.Alias; +import org.elasticsearch.xpack.ql.expression.Attribute; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.FieldAttribute; import org.elasticsearch.xpack.ql.expression.Literal; import org.elasticsearch.xpack.ql.expression.NamedExpression; +import org.elasticsearch.xpack.ql.expression.function.aggregate.AggregateFunction; +import org.elasticsearch.xpack.ql.expression.predicate.Predicates; +import org.elasticsearch.xpack.ql.expression.predicate.nulls.IsNotNull; import org.elasticsearch.xpack.ql.optimizer.OptimizerRules; import org.elasticsearch.xpack.ql.plan.logical.Aggregate; import org.elasticsearch.xpack.ql.plan.logical.EsRelation; +import org.elasticsearch.xpack.ql.plan.logical.Filter; import org.elasticsearch.xpack.ql.plan.logical.Limit; import org.elasticsearch.xpack.ql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.ql.plan.logical.OrderBy; import org.elasticsearch.xpack.ql.plan.logical.Project; import org.elasticsearch.xpack.ql.rule.ParameterizedRule; import org.elasticsearch.xpack.ql.rule.ParameterizedRuleExecutor; +import org.elasticsearch.xpack.ql.type.DataType; +import org.elasticsearch.xpack.ql.type.DataTypes; import java.util.ArrayList; import java.util.List; +import java.util.Set; +import static java.util.Arrays.asList; +import static org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer.cleanup; +import static org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer.operators; import static org.elasticsearch.xpack.ql.optimizer.OptimizerRules.TransformDirection.UP; public class LocalLogicalPlanOptimizer extends ParameterizedRuleExecutor { @@ -44,16 +63,30 @@ protected List> batches() { Limiter.ONCE, new ReplaceTopNWithLimitAndSort(), new ReplaceMissingFieldWithNull(), - new InferIsNotNull() + new InferIsNotNull(), + new InferNonNullAggConstraint() ); var rules = new ArrayList>(); rules.add(local); // TODO: if the local rules haven't touched the tree, the rest of the rules can be skipped - rules.addAll(LogicalPlanOptimizer.rules()); + rules.addAll(asList(operators(), cleanup())); + replaceRules(rules); return rules; } + private List> replaceRules(List> listOfRules) { + for (Batch batch : listOfRules) { + var rules = batch.rules(); + for (int i = 0; i < rules.length; i++) { + if (rules[i] instanceof PropagateEmptyRelation) { + rules[i] = new LocalPropagateEmptyRelation(); + } + } + } + return listOfRules; + } + public LogicalPlan localOptimize(LogicalPlan plan) { return execute(plan); } @@ -132,6 +165,84 @@ protected boolean skipExpression(Expression e) { } } + /** + * Local aggregation can only produce intermediate state that get wired into the global agg. + */ + private static class LocalPropagateEmptyRelation extends PropagateEmptyRelation { + + /** + * Local variant of the aggregation that returns the intermediate value. + */ + @Override + protected void aggOutput(NamedExpression agg, AggregateFunction aggFunc, BlockFactory blockFactory, List blocks) { + List output = AbstractPhysicalOperationProviders.intermediateAttributes(List.of(agg), List.of()); + for (Attribute o : output) { + DataType dataType = o.dataType(); + // boolean right now is used for the internal #seen so always return true + var value = dataType == DataTypes.BOOLEAN ? true + // look for count(literal) with literal != null + : aggFunc instanceof Count count && (count.foldable() == false || count.fold() != null) ? 0L + // otherwise nullify + : null; + var wrapper = BlockUtils.wrapperFor(blockFactory, PlannerUtils.toElementType(dataType), 1); + wrapper.accept(value); + blocks.add(wrapper.builder().build()); + } + } + } + + /** + * The vast majority of aggs ignore null entries - this rule adds a pushable filter, as it is cheap + * to execute, to filter this entries out to begin with. + * STATS x = min(a), y = sum(b) + * becomes + * | WHERE a IS NOT NULL OR b IS NOT NULL + * | STATS x = min(a), y = sum(b) + *
    + * Unfortunately this optimization cannot be applied when grouping is necessary since it can filter out + * groups containing only null values + */ + static class InferNonNullAggConstraint extends ParameterizedOptimizerRule { + + @Override + protected LogicalPlan rule(Aggregate aggregate, LocalLogicalOptimizerContext context) { + // only look at aggregates with default grouping + if (aggregate.groupings().size() > 0) { + return aggregate; + } + + SearchStats stats = context.searchStats(); + LogicalPlan plan = aggregate; + var aggs = aggregate.aggregates(); + Set nonNullAggFields = Sets.newLinkedHashSetWithExpectedSize(aggs.size()); + for (var agg : aggs) { + Expression expr = agg; + if (agg instanceof Alias as) { + expr = as.child(); + } + if (expr instanceof AggregateFunction af) { + Expression field = af.field(); + // ignore literals (e.g. COUNT(1)) + // make sure the field exists at the source and is indexed (not runtime) + if (field.foldable() == false && field instanceof FieldAttribute fa && stats.isIndexed(fa.name())) { + nonNullAggFields.add(field); + } else { + // otherwise bail out since unless disjunction needs to cover _all_ fields, things get filtered out + return plan; + } + } + } + + if (nonNullAggFields.size() > 0) { + Expression condition = Predicates.combineOr( + nonNullAggFields.stream().map(f -> (Expression) new IsNotNull(aggregate.source(), f)).toList() + ); + plan = aggregate.replaceChild(new Filter(aggregate.source(), aggregate.child(), condition)); + } + return plan; + } + } + abstract static class ParameterizedOptimizerRule extends ParameterizedRule { public final LogicalPlan apply(LogicalPlan plan, P context) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java index 55ead7aa3fe4e..e59d80ed96b76 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java @@ -10,7 +10,6 @@ import org.elasticsearch.core.Tuple; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEquals; import org.elasticsearch.xpack.esql.expression.function.aggregate.Count; @@ -29,9 +28,10 @@ import org.elasticsearch.xpack.esql.plan.physical.TopNExec; import org.elasticsearch.xpack.esql.plan.physical.UnaryExec; import org.elasticsearch.xpack.esql.planner.AbstractPhysicalOperationProviders; +import org.elasticsearch.xpack.esql.planner.EsqlTranslatorHandler; import org.elasticsearch.xpack.esql.planner.PhysicalVerificationException; import org.elasticsearch.xpack.esql.planner.PhysicalVerifier; -import org.elasticsearch.xpack.esql.querydsl.query.SingleValueQuery; +import org.elasticsearch.xpack.esql.stats.SearchStats; import org.elasticsearch.xpack.ql.common.Failure; import org.elasticsearch.xpack.ql.expression.Alias; import org.elasticsearch.xpack.ql.expression.Attribute; @@ -43,18 +43,19 @@ import org.elasticsearch.xpack.ql.expression.NamedExpression; import org.elasticsearch.xpack.ql.expression.Order; import org.elasticsearch.xpack.ql.expression.TypedAttribute; -import org.elasticsearch.xpack.ql.expression.function.scalar.ScalarFunction; +import org.elasticsearch.xpack.ql.expression.function.scalar.UnaryScalarFunction; import org.elasticsearch.xpack.ql.expression.predicate.Predicates; import org.elasticsearch.xpack.ql.expression.predicate.logical.BinaryLogic; import org.elasticsearch.xpack.ql.expression.predicate.logical.Not; +import org.elasticsearch.xpack.ql.expression.predicate.nulls.IsNotNull; +import org.elasticsearch.xpack.ql.expression.predicate.nulls.IsNull; import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparison; import org.elasticsearch.xpack.ql.expression.predicate.regex.RegexMatch; import org.elasticsearch.xpack.ql.expression.predicate.regex.WildcardLike; -import org.elasticsearch.xpack.ql.planner.ExpressionTranslator; -import org.elasticsearch.xpack.ql.planner.QlTranslatorHandler; import org.elasticsearch.xpack.ql.querydsl.query.Query; import org.elasticsearch.xpack.ql.rule.ParameterizedRuleExecutor; import org.elasticsearch.xpack.ql.rule.Rule; +import org.elasticsearch.xpack.ql.type.DataTypes; import org.elasticsearch.xpack.ql.util.Queries; import org.elasticsearch.xpack.ql.util.Queries.Clause; import org.elasticsearch.xpack.ql.util.StringUtils; @@ -65,7 +66,7 @@ import java.util.LinkedList; import java.util.List; import java.util.Set; -import java.util.function.Supplier; +import java.util.function.Predicate; import java.util.stream.Collectors; import static java.util.Arrays.asList; @@ -76,7 +77,7 @@ import static org.elasticsearch.xpack.ql.optimizer.OptimizerRules.TransformDirection.UP; public class LocalPhysicalPlanOptimizer extends ParameterizedRuleExecutor { - public static final QlTranslatorHandler TRANSLATOR_HANDLER = new EsqlTranslatorHandler(); + public static final EsqlTranslatorHandler TRANSLATOR_HANDLER = new EsqlTranslatorHandler(); private final PhysicalVerifier verifier = new PhysicalVerifier(); @@ -195,18 +196,22 @@ private static Set missingAttributes(PhysicalPlan p) { } } - public static class PushFiltersToSource extends OptimizerRule { + public static class PushFiltersToSource extends PhysicalOptimizerRules.ParameterizedOptimizerRule< + FilterExec, + LocalPhysicalOptimizerContext> { + @Override - protected PhysicalPlan rule(FilterExec filterExec) { + protected PhysicalPlan rule(FilterExec filterExec, LocalPhysicalOptimizerContext ctx) { PhysicalPlan plan = filterExec; if (filterExec.child() instanceof EsQueryExec queryExec) { List pushable = new ArrayList<>(); List nonPushable = new ArrayList<>(); for (Expression exp : splitAnd(filterExec.condition())) { - (canPushToSource(exp) ? pushable : nonPushable).add(exp); + (canPushToSource(exp, x -> hasIdenticalDelegate(x, ctx.searchStats())) ? pushable : nonPushable).add(exp); } if (pushable.size() > 0) { // update the executable with pushable conditions - QueryBuilder planQuery = TRANSLATOR_HANDLER.asQuery(Predicates.combineAnd(pushable)).asBuilder(); + Query queryDSL = TRANSLATOR_HANDLER.asQuery(Predicates.combineAnd(pushable)); + QueryBuilder planQuery = queryDSL.asBuilder(); var query = Queries.combine(Clause.FILTER, asList(queryExec.query(), planQuery)); queryExec = new EsQueryExec( queryExec.source(), @@ -228,24 +233,30 @@ protected PhysicalPlan rule(FilterExec filterExec) { return plan; } - public static boolean canPushToSource(Expression exp) { + public static boolean canPushToSource(Expression exp, Predicate hasIdenticalDelegate) { if (exp instanceof BinaryComparison bc) { - return isAttributePushable(bc.left(), bc) && bc.right().foldable(); + return isAttributePushable(bc.left(), bc, hasIdenticalDelegate) && bc.right().foldable(); } else if (exp instanceof BinaryLogic bl) { - return canPushToSource(bl.left()) && canPushToSource(bl.right()); - } else if (exp instanceof RegexMatch rm) { - return isAttributePushable(rm.field(), rm); + return canPushToSource(bl.left(), hasIdenticalDelegate) && canPushToSource(bl.right(), hasIdenticalDelegate); } else if (exp instanceof In in) { - return isAttributePushable(in.value(), null) && Expressions.foldable(in.list()); + return isAttributePushable(in.value(), null, hasIdenticalDelegate) && Expressions.foldable(in.list()); } else if (exp instanceof Not not) { - return canPushToSource(not.field()); + return canPushToSource(not.field(), hasIdenticalDelegate); + } else if (exp instanceof UnaryScalarFunction usf) { + if (usf instanceof RegexMatch || usf instanceof IsNull || usf instanceof IsNotNull) { + return isAttributePushable(usf.field(), usf, hasIdenticalDelegate); + } } return false; } - private static boolean isAttributePushable(Expression expression, ScalarFunction operation) { - if (expression instanceof FieldAttribute f && f.getExactInfo().hasExact()) { - return isAggregatable(f); + private static boolean isAttributePushable( + Expression expression, + Expression operation, + Predicate hasIdenticalDelegate + ) { + if (isPushableFieldAttribute(expression, hasIdenticalDelegate)) { + return true; } if (expression instanceof MetadataAttribute ma && ma.searchable()) { return operation == null @@ -281,15 +292,17 @@ protected PhysicalPlan rule(LimitExec limitExec) { } } - private static class PushTopNToSource extends OptimizerRule { + private static class PushTopNToSource extends PhysicalOptimizerRules.ParameterizedOptimizerRule< + TopNExec, + LocalPhysicalOptimizerContext> { @Override - protected PhysicalPlan rule(TopNExec topNExec) { + protected PhysicalPlan rule(TopNExec topNExec, LocalPhysicalOptimizerContext ctx) { PhysicalPlan plan = topNExec; PhysicalPlan child = topNExec.child(); boolean canPushDownTopN = child instanceof EsQueryExec || (child instanceof ExchangeExec exchangeExec && exchangeExec.child() instanceof EsQueryExec); - if (canPushDownTopN && canPushDownOrders(topNExec.order())) { + if (canPushDownTopN && canPushDownOrders(topNExec.order(), x -> hasIdenticalDelegate(x, ctx.searchStats()))) { var sorts = buildFieldSorts(topNExec.order()); var limit = topNExec.limit(); @@ -302,10 +315,9 @@ protected PhysicalPlan rule(TopNExec topNExec) { return plan; } - private boolean canPushDownOrders(List orders) { + private boolean canPushDownOrders(List orders, Predicate hasIdenticalDelegate) { // allow only exact FieldAttributes (no expressions) for sorting - return orders.stream() - .allMatch(o -> o.child() instanceof FieldAttribute fa && fa.getExactInfo().hasExact() && isAggregatable(fa)); + return orders.stream().allMatch(o -> isPushableFieldAttribute(o.child(), hasIdenticalDelegate)); } private List buildFieldSorts(List orders) { @@ -404,22 +416,15 @@ private Tuple, List> pushableStats(AggregateExec aggregate } } - private static final class EsqlTranslatorHandler extends QlTranslatorHandler { - @Override - public Query wrapFunctionQuery(ScalarFunction sf, Expression field, Supplier querySupplier) { - if (field instanceof FieldAttribute fa) { - if (fa.getExactInfo().hasExact()) { - var exact = fa.exactAttribute(); - if (exact != fa) { - fa = exact; - } - } - return ExpressionTranslator.wrapIfNested(new SingleValueQuery(querySupplier.get(), fa.name()), field); - } - if (field instanceof MetadataAttribute) { - return querySupplier.get(); // MetadataAttributes are always single valued - } - throw new EsqlIllegalArgumentException("Expected a FieldAttribute or MetadataAttribute but received [" + field + "]"); + public static boolean hasIdenticalDelegate(FieldAttribute attr, SearchStats stats) { + return stats.hasIdenticalDelegate(attr.name()); + } + + public static boolean isPushableFieldAttribute(Expression exp, Predicate hasIdenticalDelegate) { + if (exp instanceof FieldAttribute fa && fa.getExactInfo().hasExact() && isAggregatable(fa)) { + return fa.dataType() != DataTypes.TEXT || hasIdenticalDelegate.test(fa); } + return false; } + } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java index 66654b78c3af4..e4f67838731a0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java @@ -25,7 +25,6 @@ import org.elasticsearch.xpack.esql.plan.logical.local.EsqlProject; import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; import org.elasticsearch.xpack.esql.plan.logical.local.LocalSupplier; -import org.elasticsearch.xpack.esql.planner.AbstractPhysicalOperationProviders; import org.elasticsearch.xpack.esql.planner.PlannerUtils; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.Alias; @@ -63,7 +62,6 @@ import org.elasticsearch.xpack.ql.rule.ParameterizedRuleExecutor; import org.elasticsearch.xpack.ql.rule.Rule; import org.elasticsearch.xpack.ql.tree.Source; -import org.elasticsearch.xpack.ql.type.DataType; import org.elasticsearch.xpack.ql.type.DataTypes; import org.elasticsearch.xpack.ql.util.CollectionUtils; import org.elasticsearch.xpack.ql.util.Holder; @@ -101,17 +99,8 @@ protected List> batches() { return rules(); } - protected static List> rules() { - var substitutions = new Batch<>( - "Substitutions", - Limiter.ONCE, - new SubstituteSurrogates(), - new ReplaceRegexMatch(), - new ReplaceAliasingEvalWithProject() - // new NormalizeAggregate(), - waits on https://github.com/elastic/elasticsearch/issues/100634 - ); - - var operators = new Batch<>( + protected static Batch operators() { + return new Batch<>( "Operator Optimization", new CombineProjections(), new CombineEvals(), @@ -146,19 +135,33 @@ protected static List> rules() { new PruneOrderByBeforeStats(), new PruneRedundantSortClauses() ); + } - var skip = new Batch<>("Skip Compute", new SkipQueryOnLimitZero()); - var cleanup = new Batch<>( + protected static Batch cleanup() { + return new Batch<>( "Clean Up", new ReplaceDuplicateAggWithEval(), // pushing down limits again, because ReplaceDuplicateAggWithEval could create new Project nodes that can still be optimized new PushDownAndCombineLimits(), new ReplaceLimitAndSortAsTopN() ); + } + + protected static List> rules() { + var substitutions = new Batch<>( + "Substitutions", + Limiter.ONCE, + new SubstituteSurrogates(), + new ReplaceRegexMatch(), + new ReplaceAliasingEvalWithProject() + // new NormalizeAggregate(), - waits on https://github.com/elastic/elasticsearch/issues/100634 + ); + + var skip = new Batch<>("Skip Compute", new SkipQueryOnLimitZero()); var defaultTopN = new Batch<>("Add default TopN", new AddDefaultTopN()); var label = new Batch<>("Set as Optimized", Limiter.ONCE, new SetAsOptimized()); - return asList(substitutions, operators, skip, cleanup, defaultTopN, label); + return asList(substitutions, operators(), skip, cleanup(), defaultTopN, label); } // TODO: currently this rule only works for aggregate functions (AVG) @@ -633,6 +636,7 @@ protected LogicalPlan rule(UnaryPlan plan) { } } + @SuppressWarnings("removal") static class PropagateEmptyRelation extends OptimizerRules.OptimizerRule { @Override @@ -650,29 +654,14 @@ protected LogicalPlan rule(UnaryPlan plan) { return p; } - private static List aggsFromEmpty(List aggs) { - // TODO: Should we introduce skip operator that just never queries the source + private List aggsFromEmpty(List aggs) { List blocks = new ArrayList<>(); - var blockFactory = BlockFactory.getNonBreakingInstance(); + var blockFactory = PlannerUtils.NON_BREAKING_BLOCK_FACTORY; int i = 0; for (var agg : aggs) { // there needs to be an alias if (agg instanceof Alias a && a.child() instanceof AggregateFunction aggFunc) { - List output = AbstractPhysicalOperationProviders.intermediateAttributes(List.of(agg), List.of()); - for (Attribute o : output) { - DataType dataType = o.dataType(); - // fill the boolean block later in LocalExecutionPlanner - if (dataType != DataTypes.BOOLEAN) { - // look for count(literal) with literal != null - var wrapper = BlockUtils.wrapperFor(blockFactory, PlannerUtils.toElementType(dataType), 1); - if (aggFunc instanceof Count count && (count.foldable() == false || count.fold() != null)) { - wrapper.accept(0L); - } else { - wrapper.accept(null); - } - blocks.add(wrapper.builder().build()); - } - } + aggOutput(agg, aggFunc, blockFactory, blocks); } else { throw new EsqlIllegalArgumentException("Did not expect a non-aliased aggregation {}", agg); } @@ -680,6 +669,16 @@ private static List aggsFromEmpty(List aggs) { return blocks; } + /** + * The folded aggregation output - this variant is for the coordinator/final. + */ + protected void aggOutput(NamedExpression agg, AggregateFunction aggFunc, BlockFactory blockFactory, List blocks) { + // look for count(literal) with literal != null + Object value = aggFunc instanceof Count count && (count.foldable() == false || count.fold() != null) ? 0L : null; + var wrapper = BlockUtils.wrapperFor(blockFactory, PlannerUtils.toElementType(aggFunc.dataType()), 1); + wrapper.accept(value); + blocks.add(wrapper.builder().build()); + } } private static LogicalPlan skipPlan(UnaryPlan plan) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ExpressionBuilder.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ExpressionBuilder.java index 3b1ef475350b1..9875979808f0b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ExpressionBuilder.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ExpressionBuilder.java @@ -103,9 +103,9 @@ public Literal visitDecimalValue(EsqlBaseParser.DecimalValueContext ctx) { String text = ctx.getText(); try { - return new Literal(source, Double.valueOf(StringUtils.parseDouble(text)), DataTypes.DOUBLE); - } catch (QlIllegalArgumentException siae) { - throw new ParsingException(source, siae.getMessage()); + return new Literal(source, StringUtils.parseDouble(text), DataTypes.DOUBLE); + } catch (InvalidArgumentException iae) { + throw new ParsingException(source, iae.getMessage()); } } @@ -121,7 +121,7 @@ public Literal visitIntegerValue(EsqlBaseParser.IntegerValueContext ctx) { // if it's too large, then quietly try to parse as a float instead try { return new Literal(source, StringUtils.parseDouble(text), DataTypes.DOUBLE); - } catch (QlIllegalArgumentException ignored) {} + } catch (InvalidArgumentException ignored) {} throw new ParsingException(source, siae.getMessage()); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AbstractPhysicalOperationProviders.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AbstractPhysicalOperationProviders.java index a7d2c6cec50ee..69e80a433f2d0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AbstractPhysicalOperationProviders.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AbstractPhysicalOperationProviders.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.esql.planner; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.aggregation.Aggregator; import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.AggregatorMode; @@ -22,6 +21,7 @@ import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner.LocalExecutionPlannerContext; import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner.PhysicalOperation; +import org.elasticsearch.xpack.ql.InvalidArgumentException; import org.elasticsearch.xpack.ql.expression.Alias; import org.elasticsearch.xpack.ql.expression.Attribute; import org.elasticsearch.xpack.ql.expression.Expression; @@ -70,7 +70,6 @@ public final PhysicalOperation groupingPhysicalOperation( aggregates, mode, sourceLayout, - context.bigArrays(), false, // non-grouping s -> aggregatorFactories.add(s.supplier.aggregatorFactory(s.mode)) ); @@ -139,7 +138,6 @@ else if (mode == AggregateExec.Mode.PARTIAL) { aggregates, mode, sourceLayout, - context.bigArrays(), true, // grouping s -> aggregatorFactories.add(s.supplier.groupingAggregatorFactory(s.mode)) ); @@ -157,8 +155,7 @@ else if (mode == AggregateExec.Mode.PARTIAL) { operatorFactory = new HashAggregationOperatorFactory( groupSpecs.stream().map(GroupSpec::toHashGroupSpec).toList(), aggregatorFactories, - context.pageSize(aggregateExec.estimatedRowSize()), - context.bigArrays() + context.pageSize(aggregateExec.estimatedRowSize()) ); } } @@ -224,7 +221,6 @@ private void aggregatesToFactory( List aggregates, AggregateExec.Mode mode, Layout layout, - BigArrays bigArrays, boolean grouping, Consumer consumer ) { @@ -241,10 +237,10 @@ private void aggregatesToFactory( Expression field = aggregateFunction.field(); // Only count can now support literals - all the other aggs should be optimized away if (field.foldable()) { - if (aggregateFunction instanceof Count count) { + if (aggregateFunction instanceof Count) { sourceAttr = emptyList(); } else { - throw new EsqlIllegalArgumentException( + throw new InvalidArgumentException( "Does not support yet aggregations over constants - [{}]", aggregateFunction.sourceText() ); @@ -283,7 +279,7 @@ private void aggregatesToFactory( assert inputChannels.size() > 0 && inputChannels.stream().allMatch(i -> i >= 0); } if (aggregateFunction instanceof ToAggregator agg) { - consumer.accept(new AggFunctionSupplierContext(agg.supplier(bigArrays, inputChannels), aggMode)); + consumer.accept(new AggFunctionSupplierContext(agg.supplier(inputChannels), aggMode)); } else { throw new EsqlIllegalArgumentException("aggregate functions must extend ToAggregator"); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java index f1647ff15d9d0..b324cf7c4056a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java @@ -37,12 +37,12 @@ import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner.PhysicalOperation; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.Attribute; -import org.elasticsearch.xpack.ql.expression.FieldAttribute; import org.elasticsearch.xpack.ql.type.DataType; import java.util.ArrayList; import java.util.List; import java.util.function.Function; +import java.util.function.IntFunction; import static org.elasticsearch.common.lucene.search.Queries.newNonNestedFilter; import static org.elasticsearch.compute.lucene.LuceneSourceOperator.NO_LIMIT; @@ -61,6 +61,9 @@ public List searchContexts() { @Override public final PhysicalOperation fieldExtractPhysicalOperation(FieldExtractExec fieldExtractExec, PhysicalOperation source) { + // TODO: see if we can get the FieldExtractExec to know if spatial types need to be read from source or doc values, and capture + // that information in the BlockReaderFactories.loaders method so it is passed in the BlockLoaderContext + // to GeoPointFieldMapper.blockLoader Layout.Builder layout = source.layout.builder(); var sourceAttr = fieldExtractExec.sourceAttribute(); List readers = searchContexts.stream() @@ -69,14 +72,17 @@ public final PhysicalOperation fieldExtractPhysicalOperation(FieldExtractExec fi List fields = new ArrayList<>(); int docChannel = source.layout.get(sourceAttr.id()).channel(); for (Attribute attr : fieldExtractExec.attributesToExtract()) { - if (attr instanceof FieldAttribute fa && fa.getExactInfo().hasExact()) { - attr = fa.exactAttribute(); - } layout.append(attr); DataType dataType = attr.dataType(); + ElementType elementType = PlannerUtils.toElementType(dataType); String fieldName = attr.name(); - List loaders = BlockReaderFactories.loaders(searchContexts, fieldName, EsqlDataTypes.isUnsupported(dataType)); - fields.add(new ValuesSourceReaderOperator.FieldInfo(fieldName, loaders)); + boolean isSupported = EsqlDataTypes.isUnsupported(dataType); + IntFunction loader = s -> BlockReaderFactories.loader( + searchContexts.get(s).getSearchExecutionContext(), + fieldName, + isSupported + ); + fields.add(new ValuesSourceReaderOperator.FieldInfo(fieldName, elementType, loader)); } return source.with(new ValuesSourceReaderOperator.Factory(fields, readers, docChannel), layout.build()); } @@ -165,15 +171,19 @@ public final Operator.OperatorFactory ordinalGroupingOperatorFactory( .toList(); // The grouping-by values are ready, let's group on them directly. // Costin: why are they ready and not already exposed in the layout? + boolean isUnsupported = EsqlDataTypes.isUnsupported(attrSource.dataType()); return new OrdinalsGroupingOperator.OrdinalsGroupingOperatorFactory( - BlockReaderFactories.loaders(searchContexts, attrSource.name(), EsqlDataTypes.isUnsupported(attrSource.dataType())), + shardIdx -> BlockReaderFactories.loader( + searchContexts.get(shardIdx).getSearchExecutionContext(), + attrSource.name(), + isUnsupported + ), shardContexts, groupElementType, docChannel, attrSource.name(), aggregatorFactories, - context.pageSize(aggregateExec.estimatedRowSize()), - context.bigArrays() + context.pageSize(aggregateExec.estimatedRowSize()) ); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlTranslatorHandler.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlTranslatorHandler.java new file mode 100644 index 0000000000000..98ac1a2d9910a --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlTranslatorHandler.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.planner; + +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; +import org.elasticsearch.xpack.esql.querydsl.query.SingleValueQuery; +import org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.FieldAttribute; +import org.elasticsearch.xpack.ql.expression.MetadataAttribute; +import org.elasticsearch.xpack.ql.expression.function.scalar.ScalarFunction; +import org.elasticsearch.xpack.ql.expression.predicate.nulls.IsNotNull; +import org.elasticsearch.xpack.ql.expression.predicate.nulls.IsNull; +import org.elasticsearch.xpack.ql.planner.ExpressionTranslator; +import org.elasticsearch.xpack.ql.planner.ExpressionTranslators; +import org.elasticsearch.xpack.ql.planner.QlTranslatorHandler; +import org.elasticsearch.xpack.ql.querydsl.query.Query; +import org.elasticsearch.xpack.ql.type.DataType; + +import java.util.function.Supplier; + +public final class EsqlTranslatorHandler extends QlTranslatorHandler { + @Override + public Query asQuery(Expression e) { + return ExpressionTranslators.toQuery(e, this); + } + + @Override + public Object convert(Object value, DataType dataType) { + return EsqlDataTypeConverter.convert(value, dataType); + } + + @Override + public Query wrapFunctionQuery(ScalarFunction sf, Expression field, Supplier querySupplier) { + if (field instanceof FieldAttribute fa) { + if (fa.getExactInfo().hasExact()) { + var exact = fa.exactAttribute(); + if (exact != fa) { + fa = exact; + } + } + // don't wrap is null/is not null with SVQ + Query query = querySupplier.get(); + if ((sf instanceof IsNull || sf instanceof IsNotNull) == false) { + query = new SingleValueQuery(query, fa.name()); + } + return ExpressionTranslator.wrapIfNested(query, field); + } + if (field instanceof MetadataAttribute) { + return querySupplier.get(); // MetadataAttributes are always single valued + } + throw new EsqlIllegalArgumentException("Expected a FieldAttribute or MetadataAttribute but received [" + field + "]"); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java index e4e2402a9c7a3..15aec4545e7e7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java @@ -56,7 +56,6 @@ import org.elasticsearch.xpack.esql.enrich.EnrichLookupService; import org.elasticsearch.xpack.esql.evaluator.EvalMapper; import org.elasticsearch.xpack.esql.evaluator.command.GrokEvaluatorExtracter; -import org.elasticsearch.xpack.esql.plan.logical.local.LocalSupplier; import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; import org.elasticsearch.xpack.esql.plan.physical.DissectExec; import org.elasticsearch.xpack.esql.plan.physical.EnrichExec; @@ -88,7 +87,6 @@ import org.elasticsearch.xpack.ql.expression.NameId; import org.elasticsearch.xpack.ql.expression.NamedExpression; import org.elasticsearch.xpack.ql.expression.Order; -import org.elasticsearch.xpack.ql.type.DataTypes; import org.elasticsearch.xpack.ql.util.Holder; import java.util.ArrayList; @@ -323,29 +321,6 @@ private PhysicalOperation planExchange(ExchangeExec exchangeExec, LocalExecution private PhysicalOperation planExchangeSink(ExchangeSinkExec exchangeSink, LocalExecutionPlannerContext context) { Objects.requireNonNull(exchangeSinkHandler, "ExchangeSinkHandler wasn't provided"); var child = exchangeSink.child(); - // see https://github.com/elastic/elasticsearch/issues/100807 - handle case where the plan has been fully minimized - // to a local relation and the aggregate intermediate data erased. For this scenario, match the output the exchange output - // with that of the local relation - - if (child instanceof LocalSourceExec localExec) { - var output = exchangeSink.output(); - var localOutput = localExec.output(); - if (output.equals(localOutput) == false) { - // the outputs are going to be similar except for the bool "seen" flags which are added in below - List blocks = new ArrayList<>(asList(localExec.supplier().get())); - if (blocks.size() > 0) { - for (int i = 0, s = output.size(); i < s; i++) { - var out = output.get(i); - if (out.dataType() == DataTypes.BOOLEAN) { - blocks.add(i, BlockFactory.getNonBreakingInstance().newConstantBooleanBlockWith(true, 1)); - } - } - } - var newSupplier = LocalSupplier.of(blocks.toArray(Block[]::new)); - - child = new LocalSourceExec(localExec.source(), output, newSupplier); - } - } PhysicalOperation source = plan(child, context); @@ -381,8 +356,8 @@ private PhysicalOperation planTopN(TopNExec topNExec, LocalExecutionPlannerConte case "text", "keyword" -> TopNEncoder.UTF8; case "version" -> TopNEncoder.VERSION; case "boolean", "null", "byte", "short", "integer", "long", "double", "float", "half_float", "datetime", "date_period", - "time_duration", "object", "nested", "scaled_float", "unsigned_long", "_doc", "geo_point", "cartesian_point" -> - TopNEncoder.DEFAULT_SORTABLE; + "time_duration", "object", "nested", "scaled_float", "unsigned_long", "_doc" -> TopNEncoder.DEFAULT_SORTABLE; + case "geo_point", "cartesian_point" -> TopNEncoder.DEFAULT_UNSORTABLE; // unsupported fields are encoded as BytesRef, we'll use the same encoder; all values should be null at this point case "unsupported" -> TopNEncoder.UNSUPPORTED; default -> throw new EsqlIllegalArgumentException("No TopN sorting encoder for type " + inverse.get(channel).type()); @@ -814,9 +789,7 @@ public List createDrivers(String sessionId) { @Override public String describe() { - StringBuilder sb = new StringBuilder(); - sb.append(driverFactories.stream().map(DriverFactory::describe).collect(joining("\n"))); - return sb.toString(); + return driverFactories.stream().map(DriverFactory::describe).collect(joining("\n")); } } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java index adf684d573cd1..1c20e55f289c3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java @@ -8,6 +8,9 @@ package org.elasticsearch.xpack.esql.planner; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.breaker.NoopCircuitBreaker; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.query.QueryBuilder; @@ -17,6 +20,8 @@ import org.elasticsearch.xpack.esql.optimizer.LocalLogicalPlanOptimizer; import org.elasticsearch.xpack.esql.optimizer.LocalPhysicalOptimizerContext; import org.elasticsearch.xpack.esql.optimizer.LocalPhysicalPlanOptimizer; +import org.elasticsearch.xpack.esql.plan.logical.Enrich; +import org.elasticsearch.xpack.esql.plan.physical.EnrichExec; import org.elasticsearch.xpack.esql.plan.physical.EsQueryExec; import org.elasticsearch.xpack.esql.plan.physical.EsSourceExec; import org.elasticsearch.xpack.esql.plan.physical.EstimatesRowSize; @@ -30,6 +35,7 @@ import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.AttributeSet; import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.FieldAttribute; import org.elasticsearch.xpack.ql.expression.predicate.Predicates; import org.elasticsearch.xpack.ql.plan.logical.EsRelation; import org.elasticsearch.xpack.ql.plan.logical.Filter; @@ -43,6 +49,7 @@ import java.util.LinkedHashSet; import java.util.List; import java.util.Set; +import java.util.function.Predicate; import static java.util.Arrays.asList; import static org.elasticsearch.xpack.esql.optimizer.LocalPhysicalPlanOptimizer.PushFiltersToSource.canPushToSource; @@ -65,6 +72,19 @@ public static Tuple breakPlanBetweenCoordinatorAndDa return new Tuple<>(coordinatorPlan, dataNodePlan.get()); } + public static boolean hasEnrich(PhysicalPlan plan) { + boolean[] found = { false }; + plan.forEachDown(p -> { + if (p instanceof EnrichExec) { + found[0] = true; + } + if (p instanceof FragmentExec f) { + f.fragment().forEachDown(Enrich.class, e -> found[0] = true); + } + }); + return found[0]; + } + /** * Returns a set of concrete indices after resolving the original indices specified in the FROM command. */ @@ -131,12 +151,16 @@ public static PhysicalPlan localPlan( /** * Extracts the ES query provided by the filter parameter + * @param plan + * @param hasIdenticalDelegate a lambda that given a field attribute sayis if it has + * a synthetic source delegate with the exact same value + * @return */ - public static QueryBuilder requestFilter(PhysicalPlan plan) { - return detectFilter(plan, "@timestamp"); + public static QueryBuilder requestFilter(PhysicalPlan plan, Predicate hasIdenticalDelegate) { + return detectFilter(plan, "@timestamp", hasIdenticalDelegate); } - static QueryBuilder detectFilter(PhysicalPlan plan, String fieldName) { + static QueryBuilder detectFilter(PhysicalPlan plan, String fieldName, Predicate hasIdenticalDelegate) { // first position is the REST filter, the second the query filter var requestFilter = new QueryBuilder[] { null, null }; @@ -157,7 +181,7 @@ static QueryBuilder detectFilter(PhysicalPlan plan, String fieldName) { boolean matchesField = refs.removeIf(e -> fieldName.equals(e.name())); // the expression only contains the target reference // and the expression is pushable (functions can be fully translated) - if (matchesField && refs.isEmpty() && canPushToSource(exp)) { + if (matchesField && refs.isEmpty() && canPushToSource(exp, hasIdenticalDelegate)) { matches.add(exp); } } @@ -214,12 +238,23 @@ public static ElementType toElementType(DataType dataType) { if (dataType == EsQueryExec.DOC_DATA_TYPE) { return ElementType.DOC; } + // TODO: Spatial types can be read from source into BYTES_REF, or read from doc-values into LONG if (dataType == EsqlDataTypes.GEO_POINT) { - return ElementType.LONG; + return ElementType.BYTES_REF; } if (dataType == EsqlDataTypes.CARTESIAN_POINT) { - return ElementType.LONG; + return ElementType.BYTES_REF; } throw EsqlIllegalArgumentException.illegalDataType(dataType); } + + /** + * A non-breaking block factory used to create small pages during the planning + * TODO: Remove this + */ + @Deprecated(forRemoval = true) + public static final BlockFactory NON_BREAKING_BLOCK_FACTORY = BlockFactory.getInstance( + new NoopCircuitBreaker("noop-esql-breaker"), + BigArrays.NON_RECYCLING_INSTANCE + ); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/ToAggregator.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/ToAggregator.java index 0f4410e207b52..62bc0a96ab873 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/ToAggregator.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/ToAggregator.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.esql.planner; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; import java.util.List; @@ -16,5 +15,5 @@ * Expressions that have a mapping to an {@link AggregatorFunctionSupplier}. */ public interface ToAggregator { - AggregatorFunctionSupplier supplier(BigArrays bigArrays, List inputChannels); + AggregatorFunctionSupplier supplier(List inputChannels); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ClusterComputeRequest.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ClusterComputeRequest.java new file mode 100644 index 0000000000000..e25136f4d9532 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ClusterComputeRequest.java @@ -0,0 +1,165 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.plugin; + +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.compute.operator.exchange.ExchangeService; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; +import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; +import org.elasticsearch.xpack.esql.session.EsqlConfiguration; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Map; +import java.util.Objects; + +/** + * A request to initiate a compute on a remote cluster. The output pages of the compute on the remote cluster will be placed in an + * exchange sink specified by the {@code sessionId}. The exchange sink associated with this {@code sessionId} should have been opened + * via {@link ExchangeService#openExchange} before sending this request to the remote cluster. The coordinator on the main cluster + * will poll pages from this sink. Internally, this compute will trigger sub-computes on data nodes via {@link DataNodeRequest}. + */ +final class ClusterComputeRequest extends TransportRequest implements IndicesRequest { + private static final PlanNameRegistry planNameRegistry = new PlanNameRegistry(); + private final String clusterAlias; + private final String sessionId; + private final EsqlConfiguration configuration; + private final PhysicalPlan plan; + + private final String[] originalIndices; + private final String[] indices; + + /** + * A request to start a compute on a remote cluster. + * + * @param clusterAlias the cluster alias of this remote cluster + * @param sessionId the sessionId in which the output pages will be placed in the exchange sink specified by this id + * @param configuration the configuration for this compute + * @param plan the physical plan to be executed + * @param indices the target indices + * @param originalIndices the original indices - needed to resolve alias filters + */ + ClusterComputeRequest( + String clusterAlias, + String sessionId, + EsqlConfiguration configuration, + PhysicalPlan plan, + String[] indices, + String[] originalIndices + ) { + this.clusterAlias = clusterAlias; + this.sessionId = sessionId; + this.configuration = configuration; + this.plan = plan; + this.indices = indices; + this.originalIndices = originalIndices; + } + + ClusterComputeRequest(StreamInput in) throws IOException { + super(in); + this.clusterAlias = in.readString(); + this.sessionId = in.readString(); + this.configuration = new EsqlConfiguration(in); + this.plan = new PlanStreamInput(in, planNameRegistry, in.namedWriteableRegistry(), configuration).readPhysicalPlanNode(); + this.indices = in.readStringArray(); + this.originalIndices = in.readStringArray(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(clusterAlias); + out.writeString(sessionId); + configuration.writeTo(out); + new PlanStreamOutput(out, planNameRegistry).writePhysicalPlanNode(plan); + out.writeStringArray(indices); + out.writeStringArray(originalIndices); + } + + @Override + public String[] indices() { + return indices; + } + + @Override + public IndicesOptions indicesOptions() { + return IndicesOptions.strictSingleIndexNoExpandForbidClosed(); + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + if (parentTaskId.isSet() == false) { + assert false : "DataNodeRequest must have a parent task"; + throw new IllegalStateException("DataNodeRequest must have a parent task"); + } + return new CancellableTask(id, type, action, "", parentTaskId, headers) { + @Override + public String getDescription() { + return ClusterComputeRequest.this.getDescription(); + } + }; + } + + String clusterAlias() { + return clusterAlias; + } + + String sessionId() { + return sessionId; + } + + EsqlConfiguration configuration() { + return configuration; + } + + String[] originalIndices() { + return originalIndices; + } + + PhysicalPlan plan() { + return plan; + } + + @Override + public String getDescription() { + return "indices=" + Arrays.toString(indices) + " plan=" + plan; + } + + @Override + public String toString() { + return "ClusterComputeRequest{" + getDescription() + "}"; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ClusterComputeRequest request = (ClusterComputeRequest) o; + return clusterAlias.equals(request.clusterAlias) + && sessionId.equals(request.sessionId) + && configuration.equals(request.configuration) + && Arrays.equals(indices, request.indices) + && Arrays.equals(originalIndices, request.originalIndices) + && plan.equals(request.plan) + && getParentTask().equals(request.getParentTask()); + } + + @Override + public int hashCode() { + return Objects.hash(sessionId, configuration, Arrays.hashCode(indices), Arrays.hashCode(originalIndices), plan); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeResponse.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeResponse.java new file mode 100644 index 0000000000000..44796ca78aa91 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeResponse.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.plugin; + +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.compute.operator.DriverProfile; +import org.elasticsearch.transport.TransportResponse; + +import java.io.IOException; +import java.util.List; + +/** + * The compute result of {@link DataNodeRequest} or {@link ClusterComputeRequest} + */ +final class ComputeResponse extends TransportResponse { + private final List profiles; + + ComputeResponse(List profiles) { + this.profiles = profiles; + } + + ComputeResponse(StreamInput in) throws IOException { + super(in); + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE)) { + if (in.readBoolean()) { + profiles = in.readCollectionAsImmutableList(DriverProfile::new); + } else { + profiles = null; + } + } else { + profiles = null; + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE)) { + if (profiles == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeCollection(profiles); + } + } + } + + public List getProfiles() { + return profiles; + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java index b7b31868d65e2..aa1eafbf90265 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java @@ -7,33 +7,29 @@ package org.elasticsearch.xpack.esql.plugin; -import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; +import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchShardsGroup; import org.elasticsearch.action.search.SearchShardsRequest; import org.elasticsearch.action.search.SearchShardsResponse; import org.elasticsearch.action.search.TransportSearchShardsAction; +import org.elasticsearch.action.support.ChannelActionListener; import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.action.support.RefCountingListener; import org.elasticsearch.action.support.RefCountingRunnable; -import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.compute.OwningChannelActionListener; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.Driver; import org.elasticsearch.compute.operator.DriverProfile; import org.elasticsearch.compute.operator.DriverTaskRunner; import org.elasticsearch.compute.operator.ResponseHeadersCollector; -import org.elasticsearch.compute.operator.exchange.ExchangeResponse; import org.elasticsearch.compute.operator.exchange.ExchangeService; import org.elasticsearch.compute.operator.exchange.ExchangeSinkHandler; import org.elasticsearch.compute.operator.exchange.ExchangeSourceHandler; @@ -57,13 +53,16 @@ import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteClusterAware; +import org.elasticsearch.transport.RemoteClusterService; +import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportRequestOptions; -import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.esql.action.EsqlQueryAction; import org.elasticsearch.xpack.esql.enrich.EnrichLookupService; +import org.elasticsearch.xpack.esql.plan.physical.ExchangeSinkExec; +import org.elasticsearch.xpack.esql.plan.physical.ExchangeSourceExec; import org.elasticsearch.xpack.esql.plan.physical.OutputExec; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.esql.planner.EsPhysicalOperationProviders; @@ -71,9 +70,7 @@ import org.elasticsearch.xpack.esql.planner.PlannerUtils; import org.elasticsearch.xpack.esql.session.EsqlConfiguration; -import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -120,6 +117,12 @@ public ComputeService( this.blockFactory = blockFactory; this.esqlExecutor = threadPool.executor(ESQL_THREAD_POOL_NAME); transportService.registerRequestHandler(DATA_ACTION_NAME, this.esqlExecutor, DataNodeRequest::new, new DataNodeRequestHandler()); + transportService.registerRequestHandler( + CLUSTER_ACTION_NAME, + this.esqlExecutor, + ClusterComputeRequest::new, + new ClusterRequestHandler() + ); this.driverRunner = new DriverTaskRunner(transportService, this.esqlExecutor); this.exchangeService = exchangeService; this.enrichLookupService = enrichLookupService; @@ -144,12 +147,14 @@ public void execute( }); PhysicalPlan coordinatorPlan = new OutputExec(coordinatorAndDataNodePlan.v1(), collectedPages::add); PhysicalPlan dataNodePlan = coordinatorAndDataNodePlan.v2(); - - var concreteIndices = PlannerUtils.planConcreteIndices(physicalPlan); - + if (dataNodePlan != null && dataNodePlan instanceof ExchangeSinkExec == false) { + listener.onFailure(new IllegalStateException("expect data node plan starts with an ExchangeSink; got " + dataNodePlan)); + return; + } + Map clusterToConcreteIndices = transportService.getRemoteClusterService() + .groupIndices(SearchRequest.DEFAULT_INDICES_OPTIONS, PlannerUtils.planConcreteIndices(physicalPlan).toArray(String[]::new)); QueryPragmas queryPragmas = configuration.pragmas(); - - if (concreteIndices.isEmpty()) { + if (dataNodePlan == null || clusterToConcreteIndices.values().stream().allMatch(v -> v.indices().length == 0)) { var computeContext = new ComputeContext(sessionId, List.of(), configuration, null, null); runCompute( rootTask, @@ -159,108 +164,186 @@ public void execute( ); return; } - QueryBuilder requestFilter = PlannerUtils.requestFilter(dataNodePlan); - - LOGGER.debug("Sending data node plan\n{}\n with filter [{}]", dataNodePlan, requestFilter); - + Map clusterToOriginalIndices = transportService.getRemoteClusterService() + .groupIndices(SearchRequest.DEFAULT_INDICES_OPTIONS, PlannerUtils.planOriginalIndices(physicalPlan)); + var localOriginalIndices = clusterToOriginalIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + var localConcreteIndices = clusterToConcreteIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + if (clusterToOriginalIndices.isEmpty() == false && PlannerUtils.hasEnrich(physicalPlan)) { + listener.onFailure(new IllegalArgumentException("cross clusters query doesn't support enrich yet")); + return; + } final var responseHeadersCollector = new ResponseHeadersCollector(transportService.getThreadPool().getThreadContext()); listener = ActionListener.runBefore(listener, responseHeadersCollector::finish); - String[] originalIndices = PlannerUtils.planOriginalIndices(physicalPlan); - computeTargetNodes( - rootTask, - requestFilter, - concreteIndices, - originalIndices, - listener.delegateFailureAndWrap((delegate, targetNodes) -> { - final ExchangeSourceHandler exchangeSource = exchangeService.createSourceHandler( + final AtomicBoolean cancelled = new AtomicBoolean(); + final List collectedProfiles = configuration.profile() ? Collections.synchronizedList(new ArrayList<>()) : List.of(); + final var exchangeSource = new ExchangeSourceHandler( + queryPragmas.exchangeBufferSize(), + transportService.getThreadPool().executor(ESQL_THREAD_POOL_NAME) + ); + try ( + Releasable ignored = exchangeSource.addEmptySink(); + RefCountingListener refs = new RefCountingListener(listener.map(unused -> new Result(collectedPages, collectedProfiles))) + ) { + // run compute on the coordinator + runCompute( + rootTask, + new ComputeContext(sessionId, List.of(), configuration, exchangeSource, null), + coordinatorPlan, + cancelOnFailure(rootTask, cancelled, refs.acquire()).map(driverProfiles -> { + responseHeadersCollector.collect(); + if (configuration.profile()) { + collectedProfiles.addAll(driverProfiles); + } + return null; + }) + ); + // starts computes on data nodes on the main cluster + if (localConcreteIndices != null && localConcreteIndices.indices().length > 0) { + startComputeOnDataNodes( sessionId, - queryPragmas.exchangeBufferSize(), - ESQL_THREAD_POOL_NAME + RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, + rootTask, + configuration, + dataNodePlan, + Set.of(localConcreteIndices.indices()), + localOriginalIndices.indices(), + exchangeSource, + ActionListener.releaseAfter(refs.acquire(), exchangeSource.addEmptySink()), + () -> cancelOnFailure(rootTask, cancelled, refs.acquire()).map(response -> { + responseHeadersCollector.collect(); + if (configuration.profile()) { + collectedProfiles.addAll(response.getProfiles()); + } + return null; + }) ); - final List collectedProfiles = configuration.profile() - ? Collections.synchronizedList(new ArrayList<>()) - : null; - try ( - Releasable ignored = exchangeSource::decRef; - RefCountingListener requestRefs = new RefCountingListener( - delegate.map(unused -> new Result(collectedPages, collectedProfiles)) - ) - ) { - final AtomicBoolean cancelled = new AtomicBoolean(); - // wait until the source handler is completed - exchangeSource.addCompletionListener(requestRefs.acquire()); - // run compute on the coordinator - var computeContext = new ComputeContext(sessionId, List.of(), configuration, exchangeSource, null); - runCompute( - rootTask, - computeContext, - coordinatorPlan, - cancelOnFailure(rootTask, cancelled, requestRefs.acquire()).map(driverProfiles -> { - responseHeadersCollector.collect(); - if (configuration.profile()) { - collectedProfiles.addAll(driverProfiles); - } - return null; - }) - ); - // run compute on remote nodes - runComputeOnRemoteNodes( + } + // starts computes on remote cluster + startComputeOnRemoteClusters( + sessionId, + rootTask, + configuration, + dataNodePlan, + exchangeSource, + getRemoteClusters(clusterToConcreteIndices, clusterToOriginalIndices), + () -> cancelOnFailure(rootTask, cancelled, refs.acquire()).map(response -> { + responseHeadersCollector.collect(); + if (configuration.profile()) { + collectedProfiles.addAll(response.getProfiles()); + } + return null; + }) + ); + } + } + + private List getRemoteClusters( + Map clusterToConcreteIndices, + Map clusterToOriginalIndices + ) { + List remoteClusters = new ArrayList<>(clusterToConcreteIndices.size()); + RemoteClusterService remoteClusterService = transportService.getRemoteClusterService(); + for (Map.Entry e : clusterToConcreteIndices.entrySet()) { + String clusterAlias = e.getKey(); + OriginalIndices concreteIndices = clusterToConcreteIndices.get(clusterAlias); + OriginalIndices originalIndices = clusterToOriginalIndices.get(clusterAlias); + if (originalIndices == null) { + assert false : "can't find original indices for cluster " + clusterAlias; + throw new IllegalStateException("can't find original indices for cluster " + clusterAlias); + } + if (concreteIndices.indices().length > 0) { + Transport.Connection connection = remoteClusterService.getConnection(clusterAlias); + remoteClusters.add(new RemoteCluster(clusterAlias, connection, concreteIndices.indices(), originalIndices.indices())); + } + } + return remoteClusters; + } + + private void startComputeOnDataNodes( + String sessionId, + String clusterAlias, + CancellableTask parentTask, + EsqlConfiguration configuration, + PhysicalPlan dataNodePlan, + Set concreteIndices, + String[] originalIndices, + ExchangeSourceHandler exchangeSource, + ActionListener parentListener, + Supplier> dataNodeListenerSupplier + ) { + // The lambda is to say if a TEXT field has an identical exact subfield + // We cannot use SearchContext because we don't have it yet. + // Since it's used only for @timestamp, it is relatively safe to assume it's not needed + // but it would be better to have a proper impl. + QueryBuilder requestFilter = PlannerUtils.requestFilter(dataNodePlan, x -> true); + lookupDataNodes(parentTask, clusterAlias, requestFilter, concreteIndices, originalIndices, ActionListener.wrap(dataNodes -> { + try (RefCountingRunnable refs = new RefCountingRunnable(() -> parentListener.onResponse(null))) { + // For each target node, first open a remote exchange on the remote node, then link the exchange source to + // the new remote exchange sink, and initialize the computation on the target node via data-node-request. + for (DataNode node : dataNodes) { + var dataNodeListener = ActionListener.releaseAfter(dataNodeListenerSupplier.get(), refs.acquire()); + var queryPragmas = configuration.pragmas(); + ExchangeService.openExchange( + transportService, + node.connection, sessionId, - rootTask, - configuration, - dataNodePlan, - exchangeSource, - targetNodes, - () -> cancelOnFailure(rootTask, cancelled, requestRefs.acquire()).map(response -> { - responseHeadersCollector.collect(); - if (configuration.profile()) { - collectedProfiles.addAll(response.profiles); - } - return null; + queryPragmas.exchangeBufferSize(), + esqlExecutor, + dataNodeListener.delegateFailureAndWrap((delegate, unused) -> { + var remoteSink = exchangeService.newRemoteSink(parentTask, sessionId, transportService, node.connection); + exchangeSource.addRemoteSink(remoteSink, queryPragmas.concurrentExchangeClients()); + transportService.sendChildRequest( + node.connection, + DATA_ACTION_NAME, + new DataNodeRequest(sessionId, configuration, clusterAlias, node.shardIds, node.aliasFilters, dataNodePlan), + parentTask, + TransportRequestOptions.EMPTY, + new ActionListenerResponseHandler<>(delegate, ComputeResponse::new, esqlExecutor) + ); }) ); } - }) - ); + } + }, parentListener::onFailure)); } - private void runComputeOnRemoteNodes( + private void startComputeOnRemoteClusters( String sessionId, CancellableTask rootTask, EsqlConfiguration configuration, - PhysicalPlan dataNodePlan, + PhysicalPlan plan, ExchangeSourceHandler exchangeSource, - List targetNodes, - Supplier> listener + List clusters, + Supplier> listener ) { - // Do not complete the exchange sources until we have linked all remote sinks - final SubscribableListener blockingSinkFuture = new SubscribableListener<>(); - exchangeSource.addRemoteSink( - (sourceFinished, l) -> blockingSinkFuture.addListener(l.map(ignored -> new ExchangeResponse(null, true))), - 1 - ); - try (RefCountingRunnable exchangeRefs = new RefCountingRunnable(() -> blockingSinkFuture.onResponse(null))) { - // For each target node, first open a remote exchange on the remote node, then link the exchange source to - // the new remote exchange sink, and initialize the computation on the target node via data-node-request. - for (TargetNode targetNode : targetNodes) { - var targetNodeListener = ActionListener.releaseAfter(listener.get(), exchangeRefs.acquire()); + try (RefCountingRunnable refs = new RefCountingRunnable(exchangeSource.addEmptySink()::close)) { + for (RemoteCluster cluster : clusters) { + var targetNodeListener = ActionListener.releaseAfter(listener.get(), refs.acquire()); var queryPragmas = configuration.pragmas(); ExchangeService.openExchange( transportService, - targetNode.node(), + cluster.connection, sessionId, queryPragmas.exchangeBufferSize(), esqlExecutor, - targetNodeListener.delegateFailureAndWrap((delegate, unused) -> { - var remoteSink = exchangeService.newRemoteSink(rootTask, sessionId, transportService, targetNode.node); + targetNodeListener.delegateFailureAndWrap((l, unused) -> { + var remoteSink = exchangeService.newRemoteSink(rootTask, sessionId, transportService, cluster.connection); exchangeSource.addRemoteSink(remoteSink, queryPragmas.concurrentExchangeClients()); + var clusterRequest = new ClusterComputeRequest( + cluster.clusterAlias, + sessionId, + configuration, + plan, + cluster.concreteIndices, + cluster.originalIndices + ); transportService.sendChildRequest( - targetNode.node, - DATA_ACTION_NAME, - new DataNodeRequest(sessionId, configuration, targetNode.shardIds, targetNode.aliasFilters, dataNodePlan), + cluster.connection, + CLUSTER_ACTION_NAME, + clusterRequest, rootTask, TransportRequestOptions.EMPTY, - new ActionListenerResponseHandler<>(delegate, DataNodeResponse::new, esqlExecutor) + new ActionListenerResponseHandler<>(l, ComputeResponse::new, esqlExecutor) ); }) ); @@ -327,7 +410,9 @@ void runCompute(CancellableTask task, ComputeContext context, PhysicalPlan plan, } private void acquireSearchContexts( + String clusterAlias, List shardIds, + EsqlConfiguration configuration, Map aliasFilters, ActionListener> listener ) { @@ -351,11 +436,13 @@ private void acquireSearchContexts( try { for (IndexShard shard : targetShards) { var aliasFilter = aliasFilters.getOrDefault(shard.shardId().getIndex(), AliasFilter.EMPTY); - ShardSearchRequest shardSearchLocalRequest = new ShardSearchRequest(shard.shardId(), 0, aliasFilter); - SearchContext context = searchService.createSearchContext( - shardSearchLocalRequest, - SearchService.NO_TIMEOUT + var shardRequest = new ShardSearchRequest( + shard.shardId(), + configuration.absoluteStartedTimeInMillis(), + aliasFilter, + clusterAlias ); + SearchContext context = searchService.createSearchContext(shardRequest, SearchService.NO_TIMEOUT); searchContexts.add(context); } for (SearchContext searchContext : searchContexts) { @@ -377,27 +464,28 @@ private void acquireSearchContexts( } } - record TargetNode(DiscoveryNode node, List shardIds, Map aliasFilters) { + record DataNode(Transport.Connection connection, List shardIds, Map aliasFilters) { } - private void computeTargetNodes( + record RemoteCluster(String clusterAlias, Transport.Connection connection, String[] concreteIndices, String[] originalIndices) { + + } + + /** + * Performs can_match and find the target nodes for the given target indices and filter. + *

    + * Ideally, the search_shards API should be called before the field-caps API; however, this can lead + * to a situation where the column structure (i.e., matched data types) differs depending on the query. + */ + void lookupDataNodes( Task parentTask, + String clusterAlias, QueryBuilder filter, Set concreteIndices, String[] originalIndices, - ActionListener> listener + ActionListener> listener ) { - var remoteIndices = transportService.getRemoteClusterService().groupIndices(SearchRequest.DEFAULT_INDICES_OPTIONS, originalIndices); - remoteIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); - if (remoteIndices.isEmpty() == false) { - listener.onFailure( - new IllegalArgumentException("ES|QL does not yet support querying remote indices " + Arrays.toString(originalIndices)) - ); - return; - } - // Ideally, the search_shards API should be called before the field-caps API; however, this can lead - // to a situation where the column structure (i.e., matched data types) differs depending on the query. ThreadContext threadContext = transportService.getThreadPool().getThreadContext(); ActionListener preservingContextListener = ContextPreservingActionListener.wrapPreservingContext( listener.map(resp -> { @@ -425,13 +513,13 @@ private void computeTargetNodes( nodeToAliasFilters.computeIfAbsent(targetNode, k -> new HashMap<>()).put(shardId.getIndex(), aliasFilter); } } - List targetNodes = new ArrayList<>(nodeToShards.size()); + List dataNodes = new ArrayList<>(nodeToShards.size()); for (Map.Entry> e : nodeToShards.entrySet()) { DiscoveryNode node = nodes.get(e.getKey()); Map aliasFilters = nodeToAliasFilters.getOrDefault(e.getKey(), Map.of()); - targetNodes.add(new TargetNode(node, e.getValue(), aliasFilters)); + dataNodes.add(new DataNode(transportService.getConnection(node), e.getValue(), aliasFilters)); } - return targetNodes; + return dataNodes; }), threadContext ); @@ -444,7 +532,7 @@ private void computeTargetNodes( null, null, false, - null + clusterAlias ); transportService.sendChildRequest( transportService.getLocalNode(), @@ -457,39 +545,6 @@ private void computeTargetNodes( } } - private static class DataNodeResponse extends TransportResponse { - private final List profiles; - - DataNodeResponse(List profiles) { - this.profiles = profiles; - } - - DataNodeResponse(StreamInput in) throws IOException { - super(in); - if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE)) { - if (in.readBoolean()) { - profiles = in.readCollectionAsImmutableList(DriverProfile::new); - } else { - profiles = null; - } - } else { - profiles = null; - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE)) { - if (profiles == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - out.writeCollection(profiles); - } - } - } - } - // TODO: Use an internal action here public static final String DATA_ACTION_NAME = EsqlQueryAction.NAME + "/data"; @@ -499,29 +554,138 @@ public void messageReceived(DataNodeRequest request, TransportChannel channel, T final var parentTask = (CancellableTask) task; final var sessionId = request.sessionId(); final var exchangeSink = exchangeService.getSinkHandler(sessionId); - parentTask.addListener(() -> exchangeService.finishSinkHandler(sessionId, new TaskCancelledException("task cancelled"))); - final ActionListener listener = new OwningChannelActionListener<>(channel); - acquireSearchContexts(request.shardIds(), request.aliasFilters(), ActionListener.wrap(searchContexts -> { - var computeContext = new ComputeContext(sessionId, searchContexts, request.configuration(), null, exchangeSink); - runCompute(parentTask, computeContext, request.plan(), ActionListener.wrap(driverProfiles -> { - // don't return until all pages are fetched - exchangeSink.addCompletionListener( - ContextPreservingActionListener.wrapPreservingContext( - ActionListener.releaseAfter( - listener.map(nullValue -> new DataNodeResponse(driverProfiles)), - () -> exchangeService.finishSinkHandler(sessionId, null) - ), - transportService.getThreadPool().getThreadContext() - ) - ); + parentTask.addListener( + () -> exchangeService.finishSinkHandler(sessionId, new TaskCancelledException(parentTask.getReasonCancelled())) + ); + final ActionListener listener = new ChannelActionListener<>(channel); + final EsqlConfiguration configuration = request.configuration(); + acquireSearchContexts( + request.clusterAlias(), + request.shardIds(), + configuration, + request.aliasFilters(), + ActionListener.wrap(searchContexts -> { + var computeContext = new ComputeContext(sessionId, searchContexts, configuration, null, exchangeSink); + runCompute(parentTask, computeContext, request.plan(), ActionListener.wrap(driverProfiles -> { + // don't return until all pages are fetched + exchangeSink.addCompletionListener( + ContextPreservingActionListener.wrapPreservingContext( + ActionListener.releaseAfter( + listener.map(nullValue -> new ComputeResponse(driverProfiles)), + () -> exchangeService.finishSinkHandler(sessionId, null) + ), + transportService.getThreadPool().getThreadContext() + ) + ); + }, e -> { + exchangeService.finishSinkHandler(sessionId, e); + listener.onFailure(e); + })); }, e -> { exchangeService.finishSinkHandler(sessionId, e); listener.onFailure(e); - })); - }, e -> { - exchangeService.finishSinkHandler(sessionId, e); - listener.onFailure(e); - })); + }) + ); + } + } + + public static final String CLUSTER_ACTION_NAME = EsqlQueryAction.NAME + "/cluster"; + + private class ClusterRequestHandler implements TransportRequestHandler { + @Override + public void messageReceived(ClusterComputeRequest request, TransportChannel channel, Task task) { + ChannelActionListener listener = new ChannelActionListener<>(channel); + if (request.plan() instanceof ExchangeSinkExec == false) { + listener.onFailure(new IllegalStateException("expected exchange sink for a remote compute; got " + request.plan())); + return; + } + runComputeOnRemoteCluster( + request.clusterAlias(), + request.sessionId(), + (CancellableTask) task, + request.configuration(), + (ExchangeSinkExec) request.plan(), + Set.of(request.indices()), + request.originalIndices(), + listener + ); + } + } + + /** + * Performs a compute on a remote cluster. The output pages are placed in an exchange sink specified by + * {@code globalSessionId}. The coordinator on the main cluster will poll pages from there. + *

    + * Currently, the coordinator on the remote cluster simply collects pages from data nodes in the remote cluster + * and places them in the exchange sink. We can achieve this by using a single exchange buffer to minimize overhead. + * However, here we use two exchange buffers so that we can run an actual plan on this coordinator to perform partial + * reduce operations, such as limit, topN, and partial-to-partial aggregation in the future. + */ + void runComputeOnRemoteCluster( + String clusterAlias, + String globalSessionId, + CancellableTask parentTask, + EsqlConfiguration configuration, + ExchangeSinkExec plan, + Set concreteIndices, + String[] originalIndices, + ActionListener listener + ) { + final var exchangeSink = exchangeService.getSinkHandler(globalSessionId); + parentTask.addListener( + () -> exchangeService.finishSinkHandler(globalSessionId, new TaskCancelledException(parentTask.getReasonCancelled())) + ); + ThreadPool threadPool = transportService.getThreadPool(); + final var responseHeadersCollector = new ResponseHeadersCollector(threadPool.getThreadContext()); + listener = ActionListener.runBefore(listener, responseHeadersCollector::finish); + final AtomicBoolean cancelled = new AtomicBoolean(); + final List collectedProfiles = configuration.profile() ? Collections.synchronizedList(new ArrayList<>()) : List.of(); + final String localSessionId = clusterAlias + ":" + globalSessionId; + var exchangeSource = new ExchangeSourceHandler( + configuration.pragmas().exchangeBufferSize(), + transportService.getThreadPool().executor(ESQL_THREAD_POOL_NAME) + ); + try ( + Releasable ignored = exchangeSource.addEmptySink(); + RefCountingListener refs = new RefCountingListener(listener.map(unused -> new ComputeResponse(collectedProfiles))) + ) { + exchangeSink.addCompletionListener(refs.acquire()); + PhysicalPlan coordinatorPlan = new ExchangeSinkExec( + plan.source(), + plan.output(), + plan.isIntermediateAgg(), + new ExchangeSourceExec(plan.source(), plan.output(), plan.isIntermediateAgg()) + ); + runCompute( + parentTask, + new ComputeContext(localSessionId, List.of(), configuration, exchangeSource, exchangeSink), + coordinatorPlan, + cancelOnFailure(parentTask, cancelled, refs.acquire()).map(driverProfiles -> { + responseHeadersCollector.collect(); + if (configuration.profile()) { + collectedProfiles.addAll(driverProfiles); + } + return null; + }) + ); + startComputeOnDataNodes( + localSessionId, + clusterAlias, + parentTask, + configuration, + plan, + concreteIndices, + originalIndices, + exchangeSource, + ActionListener.releaseAfter(refs.acquire(), exchangeSource.addEmptySink()), + () -> cancelOnFailure(parentTask, cancelled, refs.acquire()).map(r -> { + responseHeadersCollector.collect(); + if (configuration.profile()) { + collectedProfiles.addAll(r.getProfiles()); + } + return null; + }) + ); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequest.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequest.java index d8e5e576386e3..5067e62fa6970 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequest.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequest.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql.plugin; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.io.stream.StreamInput; @@ -17,6 +18,7 @@ import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; @@ -33,6 +35,7 @@ final class DataNodeRequest extends TransportRequest implements IndicesRequest { private static final PlanNameRegistry planNameRegistry = new PlanNameRegistry(); private final String sessionId; private final EsqlConfiguration configuration; + private final String clusterAlias; private final List shardIds; private final Map aliasFilters; private final PhysicalPlan plan; @@ -42,12 +45,14 @@ final class DataNodeRequest extends TransportRequest implements IndicesRequest { DataNodeRequest( String sessionId, EsqlConfiguration configuration, + String clusterAlias, List shardIds, Map aliasFilters, PhysicalPlan plan ) { this.sessionId = sessionId; this.configuration = configuration; + this.clusterAlias = clusterAlias; this.shardIds = shardIds; this.aliasFilters = aliasFilters; this.plan = plan; @@ -57,6 +62,11 @@ final class DataNodeRequest extends TransportRequest implements IndicesRequest { super(in); this.sessionId = in.readString(); this.configuration = new EsqlConfiguration(in); + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_CLUSTER_ALIAS)) { + this.clusterAlias = in.readString(); + } else { + this.clusterAlias = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY; + } this.shardIds = in.readCollectionAsList(ShardId::new); this.aliasFilters = in.readMap(Index::new, AliasFilter::readFrom); this.plan = new PlanStreamInput(in, planNameRegistry, in.namedWriteableRegistry(), configuration).readPhysicalPlanNode(); @@ -67,6 +77,9 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(sessionId); configuration.writeTo(out); + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_CLUSTER_ALIAS)) { + out.writeString(clusterAlias); + } out.writeCollection(shardIds); out.writeMap(aliasFilters); new PlanStreamOutput(out, planNameRegistry).writePhysicalPlanNode(plan); @@ -111,6 +124,10 @@ QueryPragmas pragmas() { return configuration.pragmas(); } + String clusterAlias() { + return clusterAlias; + } + List shardIds() { return shardIds; } @@ -143,6 +160,7 @@ public boolean equals(Object o) { DataNodeRequest request = (DataNodeRequest) o; return sessionId.equals(request.sessionId) && configuration.equals(request.configuration) + && clusterAlias.equals(request.clusterAlias) && shardIds.equals(request.shardIds) && aliasFilters.equals(request.aliasFilters) && plan.equals(request.plan) @@ -151,6 +169,6 @@ public boolean equals(Object o) { @Override public int hashCode() { - return Objects.hash(sessionId, configuration, shardIds, aliasFilters, plan); + return Objects.hash(sessionId, configuration, clusterAlias, shardIds, aliasFilters, plan); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java index e8a57e5a49808..07ca55aa665eb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java @@ -17,6 +17,8 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; @@ -41,7 +43,11 @@ import org.elasticsearch.xpack.core.action.XPackUsageFeatureAction; import org.elasticsearch.xpack.esql.EsqlInfoTransportAction; import org.elasticsearch.xpack.esql.EsqlUsageTransportAction; +import org.elasticsearch.xpack.esql.action.EsqlAsyncGetResultAction; import org.elasticsearch.xpack.esql.action.EsqlQueryAction; +import org.elasticsearch.xpack.esql.action.RestEsqlAsyncQueryAction; +import org.elasticsearch.xpack.esql.action.RestEsqlDeleteAsyncResultAction; +import org.elasticsearch.xpack.esql.action.RestEsqlGetAsyncResultAction; import org.elasticsearch.xpack.esql.action.RestEsqlQueryAction; import org.elasticsearch.xpack.esql.execution.PlanExecutor; import org.elasticsearch.xpack.esql.querydsl.query.SingleValueQuery; @@ -82,7 +88,13 @@ public class EsqlPlugin extends Plugin implements ActionPlugin { public Collection createComponents(PluginServices services) { CircuitBreaker circuitBreaker = services.indicesService().getBigArrays().breakerService().getBreaker("request"); Objects.requireNonNull(circuitBreaker, "request circuit breaker wasn't set"); - BlockFactory blockFactory = new BlockFactory(circuitBreaker, services.indicesService().getBigArrays().withCircuitBreaking()); + Settings settings = services.clusterService().getSettings(); + ByteSizeValue maxPrimitiveArrayBlockSize = settings.getAsBytesSize( + BlockFactory.MAX_BLOCK_PRIMITIVE_ARRAY_SIZE_SETTING, + BlockFactory.DEFAULT_MAX_BLOCK_PRIMITIVE_ARRAY_SIZE + ); + BigArrays bigArrays = services.indicesService().getBigArrays().withCircuitBreaking(); + BlockFactory blockFactory = new BlockFactory(circuitBreaker, bigArrays, maxPrimitiveArrayBlockSize); return List.of( new PlanExecutor( new IndexResolver( @@ -116,6 +128,7 @@ public List> getSettings() { public List> getActions() { return List.of( new ActionHandler<>(EsqlQueryAction.INSTANCE, TransportEsqlQueryAction.class), + new ActionHandler<>(EsqlAsyncGetResultAction.INSTANCE, TransportEsqlAsyncGetResultsAction.class), new ActionHandler<>(EsqlStatsAction.INSTANCE, TransportEsqlStatsAction.class), new ActionHandler<>(XPackUsageFeatureAction.ESQL, EsqlUsageTransportAction.class), new ActionHandler<>(XPackInfoFeatureAction.ESQL, EsqlInfoTransportAction.class) @@ -132,7 +145,12 @@ public List getRestHandlers( IndexNameExpressionResolver indexNameExpressionResolver, Supplier nodesInCluster ) { - return List.of(new RestEsqlQueryAction()); + return List.of( + new RestEsqlQueryAction(), + new RestEsqlAsyncQueryAction(), + new RestEsqlGetAsyncResultAction(), + new RestEsqlDeleteAsyncResultAction() + ); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlAsyncGetResultsAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlAsyncGetResultsAction.java new file mode 100644 index 0000000000000..8785b8f5de887 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlAsyncGetResultsAction.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.plugin; + +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.esql.action.EsqlAsyncGetResultAction; +import org.elasticsearch.xpack.esql.action.EsqlQueryResponse; +import org.elasticsearch.xpack.esql.action.EsqlQueryTask; +import org.elasticsearch.xpack.ql.plugin.AbstractTransportQlAsyncGetResultsAction; + +public class TransportEsqlAsyncGetResultsAction extends AbstractTransportQlAsyncGetResultsAction { + + private final BlockFactory blockFactory; + + @Inject + public TransportEsqlAsyncGetResultsAction( + TransportService transportService, + ActionFilters actionFilters, + ClusterService clusterService, + NamedWriteableRegistry registry, + Client client, + ThreadPool threadPool, + BigArrays bigArrays, + BlockFactory blockFactory + ) { + super( + EsqlAsyncGetResultAction.NAME, + transportService, + actionFilters, + clusterService, + registry, + client, + threadPool, + bigArrays, + EsqlQueryTask.class + ); + this.blockFactory = blockFactory; + } + + @Override + public Writeable.Reader responseReader() { + return EsqlQueryResponse.reader(blockFactory); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java index d272aba26e4e8..baaa4abe23b3d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java @@ -11,8 +11,11 @@ import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.compute.data.BlockFactory; @@ -23,22 +26,32 @@ import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.XPackPlugin; +import org.elasticsearch.xpack.core.async.AsyncExecutionId; import org.elasticsearch.xpack.esql.action.ColumnInfo; import org.elasticsearch.xpack.esql.action.EsqlQueryAction; import org.elasticsearch.xpack.esql.action.EsqlQueryRequest; import org.elasticsearch.xpack.esql.action.EsqlQueryResponse; +import org.elasticsearch.xpack.esql.action.EsqlQueryTask; import org.elasticsearch.xpack.esql.enrich.EnrichLookupService; import org.elasticsearch.xpack.esql.enrich.EnrichPolicyResolver; import org.elasticsearch.xpack.esql.execution.PlanExecutor; import org.elasticsearch.xpack.esql.session.EsqlConfiguration; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import org.elasticsearch.xpack.ql.async.AsyncTaskManagementService; +import java.io.IOException; import java.time.ZoneOffset; import java.util.List; import java.util.Locale; +import java.util.Map; import java.util.concurrent.Executor; -public class TransportEsqlQueryAction extends HandledTransportAction { +import static org.elasticsearch.xpack.core.ClientHelper.ASYNC_SEARCH_ORIGIN; + +public class TransportEsqlQueryAction extends HandledTransportAction + implements + AsyncTaskManagementService.AsyncOperation { private final PlanExecutor planExecutor; private final ComputeService computeService; @@ -47,8 +60,10 @@ public class TransportEsqlQueryAction extends HandledTransportAction asyncTaskManagementService; @Inject + @SuppressWarnings("this-escape") public TransportEsqlQueryAction( TransportService transportService, ActionFilters actionFilters, @@ -58,7 +73,10 @@ public TransportEsqlQueryAction( ClusterService clusterService, ThreadPool threadPool, BigArrays bigArrays, - BlockFactory blockFactory + BlockFactory blockFactory, + Client client, + NamedWriteableRegistry registry + ) { // TODO replace SAME when removing workaround for https://github.com/elastic/elasticsearch/issues/97916 super(EsqlQueryAction.NAME, transportService, actionFilters, EsqlQueryRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); @@ -79,15 +97,53 @@ public TransportEsqlQueryAction( bigArrays, blockFactory ); + this.asyncTaskManagementService = new AsyncTaskManagementService<>( + XPackPlugin.ASYNC_RESULTS_INDEX, + client, + ASYNC_SEARCH_ORIGIN, + registry, + taskManager, + EsqlQueryAction.INSTANCE.name(), + this, + EsqlQueryTask.class, + clusterService, + threadPool, + bigArrays + ); } @Override protected void doExecute(Task task, EsqlQueryRequest request, ActionListener listener) { // workaround for https://github.com/elastic/elasticsearch/issues/97916 - TODO remove this when we can - requestExecutor.execute(ActionRunnable.wrap(listener, l -> doExecuteForked(task, request, l))); + requestExecutor.execute( + ActionRunnable.wrap( + listener.delegateFailureAndWrap(ActionListener::respondAndRelease), + l -> doExecuteForked(task, request, l) + ) + ); } private void doExecuteForked(Task task, EsqlQueryRequest request, ActionListener listener) { + assert ThreadPool.assertCurrentThreadPool(EsqlPlugin.ESQL_THREAD_POOL_NAME); + if (requestIsAsync(request)) { + asyncTaskManagementService.asyncExecute( + request, + request.waitForCompletionTimeout(), + request.keepAlive(), + request.keepOnCompletion(), + listener + ); + } else { + innerExecute(task, request, listener); + } + } + + @Override + public void execute(EsqlQueryRequest request, EsqlQueryTask task, ActionListener listener) { + ActionListener.run(listener, l -> innerExecute(task, request, l)); + } + + private void innerExecute(Task task, EsqlQueryRequest request, ActionListener listener) { EsqlConfiguration configuration = new EsqlConfiguration( ZoneOffset.UTC, request.locale() != null ? request.locale() : Locale.US, @@ -120,7 +176,12 @@ private void doExecuteForked(Task task, EsqlQueryRequest request, ActionListener EsqlQueryResponse.Profile profile = configuration.profile() ? new EsqlQueryResponse.Profile(result.profiles()) : null; - return new EsqlQueryResponse(columns, result.pages(), profile, request.columnar()); + if (task instanceof EsqlQueryTask asyncTask && request.keepOnCompletion()) { + String id = asyncTask.getExecutionId().getEncoded(); + return new EsqlQueryResponse(columns, result.pages(), profile, request.columnar(), id, false, request.async()); + } else { + return new EsqlQueryResponse(columns, result.pages(), profile, request.columnar(), request.async()); + } }) ) ) @@ -143,4 +204,50 @@ public ExchangeService exchangeService() { public EnrichLookupService enrichLookupService() { return enrichLookupService; } + + @Override + public EsqlQueryTask createTask( + EsqlQueryRequest request, + long id, + String type, + String action, + TaskId parentTaskId, + Map headers, + Map originHeaders, + AsyncExecutionId asyncExecutionId + ) { + return new EsqlQueryTask( + id, + type, + action, + request.getDescription(), + parentTaskId, + headers, + originHeaders, + asyncExecutionId, + request.keepAlive() + ); + } + + @Override + public EsqlQueryResponse initialResponse(EsqlQueryTask task) { + return new EsqlQueryResponse( + List.of(), + List.of(), + null, + false, + task.getExecutionId().getEncoded(), + true, // is_running + true // isAsync + ); + } + + @Override + public EsqlQueryResponse readResponse(StreamInput inputStream) throws IOException { + throw new AssertionError("should not reach here"); + } + + private static boolean requestIsAsync(EsqlQueryRequest request) { + return request.async(); + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlConfiguration.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlConfiguration.java index ac13f25c2d2a9..ccec6554cb2cb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlConfiguration.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlConfiguration.java @@ -112,6 +112,15 @@ public String query() { return query; } + /** + * Returns the current time in milliseconds from the time epoch for the execution of this request. + * It ensures consistency by using the same value on all nodes involved in the search request. + * Note: Currently, it returns {@link System#currentTimeMillis()}, but this value will be serialized between nodes. + */ + public long absoluteStartedTimeInMillis() { + return System.currentTimeMillis(); + } + /** * Enable profiling, sacrificing performance to return information about * what operations are taking the most time. diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/stats/SearchStats.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/stats/SearchStats.java index b5d75a1528493..1106ecc344db7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/stats/SearchStats.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/stats/SearchStats.java @@ -18,12 +18,14 @@ import org.apache.lucene.index.Term; import org.apache.lucene.index.Terms; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.index.mapper.AbstractScriptFieldType; import org.elasticsearch.index.mapper.ConstantFieldType; import org.elasticsearch.index.mapper.DocCountFieldMapper.DocCountFieldType; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper.NumberFieldType; import org.elasticsearch.index.mapper.SeqNoFieldMapper; +import org.elasticsearch.index.mapper.TextFieldMapper; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.ql.type.DataType; @@ -43,9 +45,13 @@ public class SearchStats { private static class FieldStat { private Long count; - private Boolean exists; private Object min, max; + // TODO: use a multi-bitset instead + private Boolean exists; private Boolean singleValue; + private Boolean hasIdenticalDelegate; + private Boolean indexed; + private Boolean runtime; } private static final int CACHE_SIZE = 32; @@ -106,10 +112,38 @@ public boolean exists(String field) { break; } } + + // populate additional properties to save on the lookups + if (stat.exists == false) { + stat.indexed = false; + stat.singleValue = true; + } } return stat.exists; } + public boolean hasIdenticalDelegate(String field) { + var stat = cache.computeIfAbsent(field, s -> new FieldStat()); + if (stat.hasIdenticalDelegate == null) { + stat.hasIdenticalDelegate = true; + for (SearchContext context : contexts) { + if (context.getSearchExecutionContext().isFieldMapped(field)) { + MappedFieldType type = context.getSearchExecutionContext().getFieldType(field); + if (type instanceof TextFieldMapper.TextFieldType t) { + if (t.canUseSyntheticSourceDelegateForQuerying() == false) { + stat.hasIdenticalDelegate = false; + break; + } + } else { + stat.hasIdenticalDelegate = false; + break; + } + } + } + } + return stat.hasIdenticalDelegate; + } + public byte[] min(String field, DataType dataType) { var stat = cache.computeIfAbsent(field, s -> new FieldStat()); if (stat.min == null) { @@ -161,14 +195,17 @@ public boolean isSingleValue(String field) { if (exists(field) == false) { stat.singleValue = true; } else { - var sv = new boolean[] { false }; + var sv = new boolean[] { true }; for (SearchContext context : contexts) { - MappedFieldType mappedType = context.getSearchExecutionContext().getFieldType(field); - doWithContexts(r -> { - sv[0] &= detectSingleValue(r, mappedType, field); - return sv[0]; - }, true); - break; + var sec = context.getSearchExecutionContext(); + MappedFieldType mappedType = sec.isFieldMapped(field) ? null : sec.getFieldType(field); + if (mappedType != null) { + doWithContexts(r -> { + sv[0] &= detectSingleValue(r, mappedType, field); + return sv[0]; + }, true); + break; + } } stat.singleValue = sv[0]; } @@ -176,6 +213,46 @@ public boolean isSingleValue(String field) { return stat.singleValue; } + public boolean isRuntimeField(String field) { + var stat = cache.computeIfAbsent(field, s -> new FieldStat()); + if (stat.runtime == null) { + stat.runtime = false; + if (exists(field)) { + for (SearchContext context : contexts) { + var sec = context.getSearchExecutionContext(); + if (sec.isFieldMapped(field)) { + if (sec.getFieldType(field) instanceof AbstractScriptFieldType) { + stat.runtime = true; + break; + } + } + } + } + } + return stat.runtime; + } + + public boolean isIndexed(String field) { + var stat = cache.computeIfAbsent(field, s -> new FieldStat()); + if (stat.indexed == null) { + stat.indexed = false; + if (exists(field)) { + boolean indexed = true; + for (SearchContext context : contexts) { + var sec = context.getSearchExecutionContext(); + if (sec.isFieldMapped(field)) { + if (sec.getFieldType(field).isIndexed() == false) { + indexed = false; + break; + } + } + } + stat.indexed = indexed; + } + } + return stat.indexed; + } + private boolean detectSingleValue(IndexReader r, MappedFieldType fieldType, String name) throws IOException { // types that are always single value (and are accessible through instanceof) if (fieldType instanceof ConstantFieldType || fieldType instanceof DocCountFieldType || fieldType instanceof TimestampFieldType) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java index 192d5e43f9366..eba80ff238a45 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.parser.ParsingException; +import org.elasticsearch.xpack.ql.InvalidArgumentException; import org.elasticsearch.xpack.ql.QlIllegalArgumentException; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.Converter; @@ -130,8 +131,8 @@ public static DataType commonType(DataType left, DataType right) { return DataTypeConverter.commonType(left, right); } - public static TemporalAmount parseTemporalAmout(Number value, String qualifier, Source source) throws QlIllegalArgumentException, - ArithmeticException { + public static TemporalAmount parseTemporalAmout(Number value, String qualifier, Source source) throws InvalidArgumentException, + ArithmeticException, ParsingException { return switch (qualifier) { case "millisecond", "milliseconds" -> Duration.ofMillis(safeToLong(value)); case "second", "seconds" -> Duration.ofSeconds(safeToLong(value)); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistry.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistry.java index 3b5aa5dbecc3d..2910a690bf8a0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistry.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistry.java @@ -16,7 +16,11 @@ import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.DATE_PERIOD; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.TIME_DURATION; -import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isTemporalAmount; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isDateTimeOrTemporal; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isNullOrDatePeriod; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isNullOrTemporalAmount; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isNullOrTimeDuration; +import static org.elasticsearch.xpack.ql.type.DataTypes.DATETIME; import static org.elasticsearch.xpack.ql.type.DataTypes.isDateTime; public class EsqlDataTypeRegistry implements DataTypeRegistry { @@ -61,14 +65,16 @@ public Object convert(Object value, DataType type) { @Override public DataType commonType(DataType left, DataType right) { - if (isDateTime(left) && isTemporalAmount(right) || isTemporalAmount(left) && isDateTime(right)) { - return DataTypes.DATETIME; - } - if (left == TIME_DURATION && right == TIME_DURATION) { - return TIME_DURATION; - } - if (left == DATE_PERIOD && right == DATE_PERIOD) { - return DATE_PERIOD; + if (isDateTimeOrTemporal(left) || isDateTimeOrTemporal(right)) { + if ((isDateTime(left) && isNullOrTemporalAmount(right)) || (isNullOrTemporalAmount(left) && isDateTime(right))) { + return DATETIME; + } + if (isNullOrTimeDuration(left) && isNullOrTimeDuration(right)) { + return TIME_DURATION; + } + if (isNullOrDatePeriod(left) && isNullOrDatePeriod(right)) { + return DATE_PERIOD; + } } return EsqlDataTypeConverter.commonType(left, right); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java index 03e2d40c8cb48..eae808abb5037 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java @@ -39,6 +39,7 @@ import static org.elasticsearch.xpack.ql.type.DataTypes.UNSIGNED_LONG; import static org.elasticsearch.xpack.ql.type.DataTypes.UNSUPPORTED; import static org.elasticsearch.xpack.ql.type.DataTypes.VERSION; +import static org.elasticsearch.xpack.ql.type.DataTypes.isNull; public final class EsqlDataTypes { @@ -153,6 +154,18 @@ public static boolean isTemporalAmount(DataType t) { return t == DATE_PERIOD || t == TIME_DURATION; } + public static boolean isNullOrTemporalAmount(DataType t) { + return isTemporalAmount(t) || isNull(t); + } + + public static boolean isNullOrDatePeriod(DataType t) { + return t == DATE_PERIOD || isNull(t); + } + + public static boolean isNullOrTimeDuration(DataType t) { + return t == TIME_DURATION || isNull(t); + } + public static boolean isSpatial(DataType t) { return t == GEO_POINT || t == CARTESIAN_POINT; } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index a5e3033b6e1e3..39a7eee2e616d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -331,11 +331,16 @@ private ActualResults executePlan(BigArrays bigArrays) throws Exception { ExchangeSinkHandler exchangeSink = new ExchangeSinkHandler(between(1, 64), threadPool::relativeTimeInMillis); Settings.Builder settings = Settings.builder(); + BlockFactory blockFactory = new BlockFactory( + bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST), + bigArrays, + ByteSizeValue.ofBytes(randomLongBetween(1, BlockFactory.DEFAULT_MAX_BLOCK_PRIMITIVE_ARRAY_SIZE.getBytes() * 2)) + ); LocalExecutionPlanner executionPlanner = new LocalExecutionPlanner( sessionId, new CancellableTask(1, "transport", "esql", null, TaskId.EMPTY_TASK_ID, Map.of()), bigArrays, - new BlockFactory(bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST), bigArrays), + blockFactory, randomNodeSettings(), configuration, exchangeSource, @@ -407,7 +412,7 @@ protected void start(Driver driver, ActionListener driverListener) { })); return future.actionGet(TimeValue.timeValueSeconds(30)); } finally { - Releasables.close(() -> Releasables.close(drivers), exchangeSource::decRef); + Releasables.close(() -> Releasables.close(drivers)); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/SerializationTestUtils.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/SerializationTestUtils.java index c8da2792c7565..4be95b95afe54 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/SerializationTestUtils.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/SerializationTestUtils.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.ExistsQueryBuilder; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.RangeQueryBuilder; @@ -29,6 +30,7 @@ import org.elasticsearch.xpack.esql.querydsl.query.SingleValueQuery; import org.elasticsearch.xpack.esql.session.EsqlConfiguration; import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.plan.logical.LogicalPlan; import java.io.IOException; import java.io.UncheckedIOException; @@ -43,6 +45,11 @@ public static void assertSerialization(PhysicalPlan plan) { EqualsHashCodeTestUtils.checkEqualsAndHashCode(plan, unused -> deserPlan); } + public static void assertSerialization(LogicalPlan plan) { + var deserPlan = serializeDeserialize(plan, PlanStreamOutput::writeLogicalPlanNode, PlanStreamInput::readLogicalPlanNode); + EqualsHashCodeTestUtils.checkEqualsAndHashCode(plan, unused -> deserPlan); + } + public static void assertSerialization(Expression expression) { Expression deserExpression = serializeDeserialize(expression, PlanStreamOutput::writeExpression, PlanStreamInput::readExpression); EqualsHashCodeTestUtils.checkEqualsAndHashCode(expression, unused -> deserExpression); @@ -85,6 +92,7 @@ public static NamedWriteableRegistry writableRegistry() { new NamedWriteableRegistry.Entry(QueryBuilder.class, BoolQueryBuilder.NAME, BoolQueryBuilder::new), new NamedWriteableRegistry.Entry(QueryBuilder.class, WildcardQueryBuilder.NAME, WildcardQueryBuilder::new), new NamedWriteableRegistry.Entry(QueryBuilder.class, RegexpQueryBuilder.NAME, RegexpQueryBuilder::new), + new NamedWriteableRegistry.Entry(QueryBuilder.class, ExistsQueryBuilder.NAME, ExistsQueryBuilder::new), SingleValueQuery.ENTRY ) ); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/TestBlockFactory.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/TestBlockFactory.java new file mode 100644 index 0000000000000..99cf8be307054 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/TestBlockFactory.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql; + +import org.elasticsearch.common.breaker.NoopCircuitBreaker; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.compute.data.BlockFactory; + +public class TestBlockFactory { + + private static final BlockFactory NON_BREAKING = BlockFactory.getInstance( + new NoopCircuitBreaker("noop-esql-breaker"), + BigArrays.NON_RECYCLING_INSTANCE + ); + + /** + * Returns the Non-Breaking block factory. + */ + public static BlockFactory getNonBreakingInstance() { + return NON_BREAKING; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java index b1b492b28076e..dd8118c8074fb 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java @@ -31,6 +31,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.function.Function; import java.util.function.Supplier; import static org.hamcrest.Matchers.containsString; @@ -55,7 +56,7 @@ public void testParseFields() throws IOException { "filter": %s %s""", query, columnar, locale.toLanguageTag(), filter, paramsString); - EsqlQueryRequest request = parseEsqlQueryRequest(json); + EsqlQueryRequest request = parseEsqlQueryRequestSync(json); assertEquals(query, request.query()); assertEquals(columnar, request.columnar()); @@ -69,6 +70,71 @@ public void testParseFields() throws IOException { } } + public void testParseFieldsForAsync() throws IOException { + String query = randomAlphaOfLengthBetween(1, 100); + boolean columnar = randomBoolean(); + Locale locale = randomLocale(random()); + QueryBuilder filter = randomQueryBuilder(); + + List params = randomParameters(); + boolean hasParams = params.isEmpty() == false; + StringBuilder paramsString = paramsString(params, hasParams); + boolean keepOnCompletion = randomBoolean(); + TimeValue waitForCompletion = TimeValue.parseTimeValue(randomTimeValue(), "test"); + TimeValue keepAlive = TimeValue.parseTimeValue(randomTimeValue(), "test"); + String json = String.format( + Locale.ROOT, + """ + { + "query": "%s", + "columnar": %s, + "locale": "%s", + "filter": %s, + "keep_on_completion": %s, + "wait_for_completion_timeout": "%s", + "keep_alive": "%s" + %s""", + query, + columnar, + locale.toLanguageTag(), + filter, + keepOnCompletion, + waitForCompletion.getStringRep(), + keepAlive.getStringRep(), + paramsString + ); + + EsqlQueryRequest request = parseEsqlQueryRequestAsync(json); + + assertEquals(query, request.query()); + assertEquals(columnar, request.columnar()); + assertEquals(locale.toLanguageTag(), request.locale().toLanguageTag()); + assertEquals(locale, request.locale()); + assertEquals(filter, request.filter()); + assertEquals(keepOnCompletion, request.keepOnCompletion()); + assertEquals(waitForCompletion, request.waitForCompletionTimeout()); + assertEquals(keepAlive, request.keepAlive()); + + assertEquals(params.size(), request.params().size()); + for (int i = 0; i < params.size(); i++) { + assertEquals(params.get(i), request.params().get(i)); + } + } + + public void testDefaultValueForOptionalAsyncParams() throws IOException { + String query = randomAlphaOfLengthBetween(1, 100); + String json = String.format(Locale.ROOT, """ + { + "query": "%s" + } + """, query); + EsqlQueryRequest request = parseEsqlQueryRequestAsync(json); + assertEquals(query, request.query()); + assertFalse(request.keepOnCompletion()); + assertEquals(TimeValue.timeValueSeconds(1), request.waitForCompletionTimeout()); + assertEquals(TimeValue.timeValueDays(5), request.keepAlive()); + } + public void testRejectUnknownFields() { assertParserErrorMessage(""" { @@ -84,10 +150,15 @@ public void testRejectUnknownFields() { } public void testMissingQueryIsNotValidation() throws IOException { - EsqlQueryRequest request = parseEsqlQueryRequest(""" + String json = """ { "columnar": true - }"""); + }"""; + EsqlQueryRequest request = parseEsqlQueryRequestSync(json); + assertNotNull(request.validate()); + assertThat(request.validate().getMessage(), containsString("[query] is required")); + + request = parseEsqlQueryRequestAsync(json); assertNotNull(request.validate()); assertThat(request.validate().getMessage(), containsString("[query] is required")); } @@ -96,10 +167,12 @@ public void testTask() throws IOException { String query = randomAlphaOfLength(10); int id = randomInt(); - EsqlQueryRequest request = parseEsqlQueryRequest(""" + String requestJson = """ { "query": "QUERY" - }""".replace("QUERY", query)); + }""".replace("QUERY", query); + + EsqlQueryRequest request = parseEsqlQueryRequestSync(requestJson); Task task = request.createTask(id, "transport", EsqlQueryAction.NAME, TaskId.EMPTY_TASK_ID, Map.of()); assertThat(task.getDescription(), equalTo(query)); @@ -180,17 +253,33 @@ private StringBuilder paramsString(List params, boolean hasPara } private static void assertParserErrorMessage(String json, String message) { - Exception e = expectThrows(IllegalArgumentException.class, () -> parseEsqlQueryRequest(json)); + Exception e = expectThrows(IllegalArgumentException.class, () -> parseEsqlQueryRequestSync(json)); + assertThat(e.getMessage(), containsString(message)); + + e = expectThrows(IllegalArgumentException.class, () -> parseEsqlQueryRequestAsync(json)); assertThat(e.getMessage(), containsString(message)); } - private static EsqlQueryRequest parseEsqlQueryRequest(String json) throws IOException { + static EsqlQueryRequest parseEsqlQueryRequestSync(String json) throws IOException { + var request = parseEsqlQueryRequest(json, EsqlQueryRequest::fromXContentSync); + assertFalse(request.async()); + return request; + } + + static EsqlQueryRequest parseEsqlQueryRequestAsync(String json) throws IOException { + var request = parseEsqlQueryRequest(json, EsqlQueryRequest::fromXContentAsync); + assertTrue(request.async()); + return request; + } + + static EsqlQueryRequest parseEsqlQueryRequest(String json, Function fromXContentFunc) + throws IOException { SearchModule searchModule = new SearchModule(Settings.EMPTY, Collections.emptyList()); XContentParserConfiguration config = XContentParserConfiguration.EMPTY.withRegistry( new NamedXContentRegistry(searchModule.getNamedXContents()) ); try (XContentParser parser = XContentType.JSON.xContent().createParser(config, json)) { - return EsqlQueryRequest.fromXContent(parser); + return fromXContentFunc.apply(parser); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java index 25083268a3761..fa5334fb33ef7 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java @@ -24,7 +24,6 @@ import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.DoubleBlock; -import org.elasticsearch.compute.data.IntArrayVector; import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.Page; @@ -32,11 +31,19 @@ import org.elasticsearch.compute.operator.AbstractPageMappingOperator; import org.elasticsearch.compute.operator.DriverProfile; import org.elasticsearch.compute.operator.DriverStatus; +import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasables; +import org.elasticsearch.geo.GeometryTestUtils; +import org.elasticsearch.geo.ShapeTestUtils; import org.elasticsearch.test.AbstractChunkedSerializingTestCase; +import org.elasticsearch.xcontent.InstantiatingObjectParser; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ParserConstructor; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.esql.TestBlockFactory; import org.elasticsearch.xpack.esql.planner.PlannerUtils; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.type.DataType; @@ -51,6 +58,9 @@ import java.util.List; import java.util.stream.Stream; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; +import static org.elasticsearch.xpack.esql.action.ResponseValueUtils.valuesToPage; import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.CARTESIAN; import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.GEO; import static org.hamcrest.Matchers.equalTo; @@ -89,11 +99,21 @@ protected EsqlQueryResponse createTestInstance() { } EsqlQueryResponse randomResponse(boolean columnar, EsqlQueryResponse.Profile profile) { + return randomResponseAsync(columnar, profile, false); + } + + EsqlQueryResponse randomResponseAsync(boolean columnar, EsqlQueryResponse.Profile profile, boolean async) { int noCols = randomIntBetween(1, 10); List columns = randomList(noCols, noCols, this::randomColumnInfo); int noPages = randomIntBetween(1, 20); List values = randomList(noPages, noPages, () -> randomPage(columns)); - return new EsqlQueryResponse(columns, values, profile, columnar); + String id = null; + boolean isRunning = false; + if (async) { + id = randomAlphaOfLengthBetween(1, 16); + isRunning = randomBoolean(); + } + return new EsqlQueryResponse(columns, values, profile, columnar, id, isRunning, async); } private ColumnInfo randomColumnInfo() { @@ -130,8 +150,10 @@ private Page randomPage(List columns) { new BytesRef(UnsupportedValueSource.UNSUPPORTED_OUTPUT) ); case "version" -> ((BytesRefBlock.Builder) builder).appendBytesRef(new Version(randomIdentifier()).toBytesRef()); - case "geo_point" -> ((LongBlock.Builder) builder).appendLong(GEO.pointAsLong(randomGeoPoint())); - case "cartesian_point" -> ((LongBlock.Builder) builder).appendLong(CARTESIAN.pointAsLong(randomCartesianPoint())); + case "geo_point" -> ((BytesRefBlock.Builder) builder).appendBytesRef(GEO.pointAsWKB(GeometryTestUtils.randomPoint())); + case "cartesian_point" -> ((BytesRefBlock.Builder) builder).appendBytesRef( + CARTESIAN.pointAsWKB(ShapeTestUtils.randomPoint()) + ); case "null" -> builder.appendNull(); case "_source" -> { try { @@ -167,19 +189,21 @@ protected EsqlQueryResponse mutateInstance(EsqlQueryResponse instance) { List cols = new ArrayList<>(instance.columns()); // keep the type the same so the values are still valid but change the name cols.set(mutCol, new ColumnInfo(cols.get(mutCol).name() + "mut", cols.get(mutCol).type())); - yield new EsqlQueryResponse(cols, deepCopyOfPages(instance), instance.profile(), instance.columnar()); + yield new EsqlQueryResponse(cols, deepCopyOfPages(instance), instance.profile(), instance.columnar(), instance.isAsync()); } case 1 -> new EsqlQueryResponse( instance.columns(), deepCopyOfPages(instance), instance.profile(), - false == instance.columnar() + false == instance.columnar(), + instance.isAsync() ); case 2 -> new EsqlQueryResponse( instance.columns(), deepCopyOfPages(instance), randomValueOtherThan(instance.profile(), this::randomProfile), - instance.columnar() + instance.columnar(), + instance.isAsync() ); case 3 -> { int noPages = instance.pages().size(); @@ -188,7 +212,13 @@ protected EsqlQueryResponse mutateInstance(EsqlQueryResponse instance) { differentPages.forEach(p -> Releasables.closeExpectNoException(p::releaseBlocks)); differentPages = randomList(noPages, noPages, () -> randomPage(instance.columns())); } while (differentPages.equals(instance.pages())); - yield new EsqlQueryResponse(instance.columns(), differentPages, instance.profile(), instance.columnar()); + yield new EsqlQueryResponse( + instance.columns(), + differentPages, + instance.profile(), + instance.columnar(), + instance.isAsync() + ); } default -> throw new IllegalArgumentException(); }; @@ -214,7 +244,58 @@ protected Writeable.Reader instanceReader() { @Override protected EsqlQueryResponse doParseInstance(XContentParser parser) { - return EsqlQueryResponse.fromXContent(parser); + return ResponseBuilder.fromXContent(parser); + } + + public static class ResponseBuilder { + private static final ParseField ID = new ParseField("id"); + private static final ParseField IS_RUNNING = new ParseField("is_running"); + private static final InstantiatingObjectParser PARSER; + + static { + InstantiatingObjectParser.Builder parser = InstantiatingObjectParser.builder( + "esql/query_response", + true, + ResponseBuilder.class + ); + parser.declareString(optionalConstructorArg(), ID); + parser.declareField( + optionalConstructorArg(), + p -> p.currentToken() == XContentParser.Token.VALUE_NULL ? false : p.booleanValue(), + IS_RUNNING, + ObjectParser.ValueType.BOOLEAN_OR_NULL + ); + parser.declareObjectArray(constructorArg(), (p, c) -> ColumnInfo.fromXContent(p), new ParseField("columns")); + parser.declareField(constructorArg(), (p, c) -> p.list(), new ParseField("values"), ObjectParser.ValueType.OBJECT_ARRAY); + PARSER = parser.build(); + } + + // Used for XContent reconstruction + private final EsqlQueryResponse response; + + @ParserConstructor + public ResponseBuilder(@Nullable String asyncExecutionId, Boolean isRunning, List columns, List> values) { + this.response = new EsqlQueryResponse( + columns, + List.of(valuesToPage(TestBlockFactory.getNonBreakingInstance(), columns, values)), + null, + false, + asyncExecutionId, + isRunning != null, + isAsync(asyncExecutionId, isRunning) + ); + } + + static boolean isAsync(@Nullable String asyncExecutionId, Boolean isRunning) { + if (asyncExecutionId != null || isRunning != null) { + return true; + } + return false; + } + + static EsqlQueryResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null).response; + } } public void testChunkResponseSizeColumnar() { @@ -223,6 +304,12 @@ public void testChunkResponseSizeColumnar() { int bodySize = resp.pages().stream().mapToInt(p -> p.getPositionCount() * p.getBlockCount()).sum() + columnCount * 2; assertChunkCount(resp, r -> 5 + bodySize); } + + try (EsqlQueryResponse resp = randomResponseAsync(true, null, true)) { + int columnCount = resp.pages().get(0).getBlockCount(); + int bodySize = resp.pages().stream().mapToInt(p -> p.getPositionCount() * p.getBlockCount()).sum() + columnCount * 2; + assertChunkCount(resp, r -> 6 + bodySize); // is_running + } } public void testChunkResponseSizeRows() { @@ -230,6 +317,10 @@ public void testChunkResponseSizeRows() { int bodySize = resp.pages().stream().mapToInt(p -> p.getPositionCount()).sum(); assertChunkCount(resp, r -> 5 + bodySize); } + try (EsqlQueryResponse resp = randomResponseAsync(false, null, true)) { + int bodySize = resp.pages().stream().mapToInt(p -> p.getPositionCount()).sum(); + assertChunkCount(resp, r -> 6 + bodySize); + } } public void testSimpleXContentColumnar() { @@ -239,6 +330,13 @@ public void testSimpleXContentColumnar() { } } + public void testSimpleXContentColumnarAsync() { + try (EsqlQueryResponse response = simple(true, true)) { + assertThat(Strings.toString(response), equalTo(""" + {"is_running":false,"columns":[{"name":"foo","type":"integer"}],"values":[[40,80]]}""")); + } + } + public void testSimpleXContentRows() { try (EsqlQueryResponse response = simple(false)) { assertThat(Strings.toString(response), equalTo(""" @@ -246,12 +344,41 @@ public void testSimpleXContentRows() { } } + public void testSimpleXContentRowsAsync() { + try (EsqlQueryResponse response = simple(false, true)) { + assertThat(Strings.toString(response), equalTo(""" + {"is_running":false,"columns":[{"name":"foo","type":"integer"}],"values":[[40],[80]]}""")); + } + } + + public void testBasicXContentIdAndRunning() { + try ( + EsqlQueryResponse response = new EsqlQueryResponse( + List.of(new ColumnInfo("foo", "integer")), + List.of(new Page(blockFactory.newIntArrayVector(new int[] { 40, 80 }, 2).asBlock())), + null, + false, + "id-123", + true, + true + ) + ) { + assertThat(Strings.toString(response), equalTo(""" + {"id":"id-123","is_running":true,"columns":[{"name":"foo","type":"integer"}],"values":[[40],[80]]}""")); + } + } + private EsqlQueryResponse simple(boolean columnar) { + return simple(columnar, false); + } + + private EsqlQueryResponse simple(boolean columnar, boolean async) { return new EsqlQueryResponse( List.of(new ColumnInfo("foo", "integer")), - List.of(new Page(new IntArrayVector(new int[] { 40, 80 }, 2).asBlock())), + List.of(new Page(blockFactory.newIntArrayVector(new int[] { 40, 80 }, 2).asBlock())), null, - columnar + columnar, + async ); } @@ -259,10 +386,11 @@ public void testProfileXContent() { try ( EsqlQueryResponse response = new EsqlQueryResponse( List.of(new ColumnInfo("foo", "integer")), - List.of(new Page(new IntArrayVector(new int[] { 40, 80 }, 2).asBlock())), + List.of(new Page(blockFactory.newIntArrayVector(new int[] { 40, 80 }, 2).asBlock())), new EsqlQueryResponse.Profile( List.of(new DriverProfile(List.of(new DriverStatus.OperatorStatus("asdf", new AbstractPageMappingOperator.Status(10))))) ), + false, false ); ) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index 500a7a1b14195..de9f5e1aedabf 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -28,10 +28,13 @@ import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; import org.elasticsearch.core.PathUtils; import org.elasticsearch.core.Releasables; +import org.elasticsearch.geo.GeometryTestUtils; +import org.elasticsearch.geo.ShapeTestUtils; import org.elasticsearch.indices.CrankyCircuitBreakerService; import org.elasticsearch.logging.LogManager; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.esql.TestBlockFactory; import org.elasticsearch.xpack.esql.evaluator.EvalMapper; import org.elasticsearch.xpack.esql.expression.function.scalar.conditional.Greatest; import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; @@ -40,6 +43,7 @@ import org.elasticsearch.xpack.esql.planner.Layout; import org.elasticsearch.xpack.esql.planner.PlannerUtils; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import org.elasticsearch.xpack.ql.expression.Attribute; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.FieldAttribute; import org.elasticsearch.xpack.ql.expression.Literal; @@ -123,8 +127,8 @@ public static Literal randomLiteral(DataType type) { case "time_duration" -> Duration.ofMillis(randomLongBetween(-604800000L, 604800000L)); // plus/minus 7 days case "text" -> new BytesRef(randomAlphaOfLength(50)); case "version" -> randomVersion().toBytesRef(); - case "geo_point" -> GEO.pointAsLong(randomGeoPoint()); - case "cartesian_point" -> CARTESIAN.pointAsLong(randomCartesianPoint()); + case "geo_point" -> GEO.pointAsWKB(GeometryTestUtils.randomPoint()); + case "cartesian_point" -> CARTESIAN.pointAsWKB(ShapeTestUtils.randomPoint()); case "null" -> null; case "_source" -> { try { @@ -150,11 +154,17 @@ protected static Iterable parameterSuppliersFromTypedData(List values) { - return new Page(BlockUtils.fromListRow(BlockFactory.getNonBreakingInstance(), values)); + return new Page(BlockUtils.fromListRow(TestBlockFactory.getNonBreakingInstance(), values)); } /** * Hack together a layout by scanning for Fields. * Those will show up in the layout in whatever order a depth first traversal finds them. */ - protected void buildLayout(Layout.Builder builder, Expression e) { + protected static void buildLayout(Layout.Builder builder, Expression e) { if (e instanceof FieldAttribute f) { builder.append(f); return; @@ -434,13 +448,14 @@ public final void testSimpleWithNulls() { // TODO replace this with nulls insert assumeTrue("All test data types must be representable in order to build fields", testCase.allTypesAreRepresentable()); List simpleData = testCase.getDataValues(); try (EvalOperator.ExpressionEvaluator eval = evaluator(buildFieldExpression(testCase)).get(driverContext())) { - Block[] orig = BlockUtils.fromListRow(BlockFactory.getNonBreakingInstance(), simpleData); + BlockFactory blockFactory = TestBlockFactory.getNonBreakingInstance(); + Block[] orig = BlockUtils.fromListRow(blockFactory, simpleData); for (int i = 0; i < orig.length; i++) { List data = new ArrayList<>(); Block[] blocks = new Block[orig.length]; for (int b = 0; b < blocks.length; b++) { if (b == i) { - blocks[b] = orig[b].elementType().newBlockBuilder(1).appendNull().build(); + blocks[b] = orig[b].elementType().newBlockBuilder(1, blockFactory).appendNull().build(); data.add(null); } else { blocks[b] = orig[b]; @@ -664,13 +679,9 @@ protected static List anyNullIsNull(boolean entirelyNullPreser if (newSignature) { suppliers.add(new TestCaseSupplier(typesWithNull, () -> { TestCaseSupplier.TestCase oc = original.get(); - List data = IntStream.range(0, oc.getData().size()).mapToObj(i -> { - TestCaseSupplier.TypedData od = oc.getData().get(i); - if (i == finalNullPosition) { - return new TestCaseSupplier.TypedData(null, DataTypes.NULL, od.name()); - } - return od; - }).toList(); + List data = IntStream.range(0, oc.getData().size()) + .mapToObj(i -> i == finalNullPosition ? TestCaseSupplier.TypedData.NULL : oc.getData().get(i)) + .toList(); return new TestCaseSupplier.TestCase( data, "LiteralsEvaluator[lit=null]", @@ -891,6 +902,14 @@ private static String typeErrorMessage(boolean includeOrdinal, List forBinaryCastingToDouble( + "]", warnings, suppliers, - DataTypes.DOUBLE + DataTypes.DOUBLE, + false ); return suppliers; } @@ -200,40 +201,55 @@ private static void casesCrossProduct( BiFunction evaluatorToString, List warnings, List suppliers, - DataType expectedType + DataType expectedType, + boolean symmetric ) { for (TypedDataSupplier lhsSupplier : lhsSuppliers) { for (TypedDataSupplier rhsSupplier : rhsSuppliers) { - String caseName = lhsSupplier.name() + ", " + rhsSupplier.name(); - suppliers.add(new TestCaseSupplier(caseName, List.of(lhsSupplier.type(), rhsSupplier.type()), () -> { - Object lhs = lhsSupplier.supplier().get(); - Object rhs = rhsSupplier.supplier().get(); - TypedData lhsTyped = new TypedData( - // TODO there has to be a better way to handle unsigned long - lhs instanceof BigInteger b ? NumericUtils.asLongUnsigned(b) : lhs, - lhsSupplier.type(), - "lhs" - ); - TypedData rhsTyped = new TypedData( - rhs instanceof BigInteger b ? NumericUtils.asLongUnsigned(b) : rhs, - rhsSupplier.type(), - "rhs" - ); - TestCase testCase = new TestCase( - List.of(lhsTyped, rhsTyped), - evaluatorToString.apply(lhsSupplier.type(), rhsSupplier.type()), - expectedType, - equalTo(expected.apply(lhs, rhs)) - ); - for (String warning : warnings) { - testCase = testCase.withWarning(warning); - } - return testCase; - })); + suppliers.add(testCaseSupplier(lhsSupplier, rhsSupplier, evaluatorToString, expectedType, expected, warnings)); + if (symmetric) { + suppliers.add(testCaseSupplier(rhsSupplier, lhsSupplier, evaluatorToString, expectedType, expected, warnings)); + } } } } + private static TestCaseSupplier testCaseSupplier( + TypedDataSupplier lhsSupplier, + TypedDataSupplier rhsSupplier, + BiFunction evaluatorToString, + DataType expectedType, + BinaryOperator expectedValue, + List warnings + ) { + String caseName = lhsSupplier.name() + ", " + rhsSupplier.name(); + return new TestCaseSupplier(caseName, List.of(lhsSupplier.type(), rhsSupplier.type()), () -> { + Object lhs = lhsSupplier.supplier().get(); + Object rhs = rhsSupplier.supplier().get(); + TypedData lhsTyped = new TypedData( + // TODO there has to be a better way to handle unsigned long + lhs instanceof BigInteger b ? NumericUtils.asLongUnsigned(b) : lhs, + lhsSupplier.type(), + "lhs" + ); + TypedData rhsTyped = new TypedData( + rhs instanceof BigInteger b ? NumericUtils.asLongUnsigned(b) : rhs, + rhsSupplier.type(), + "rhs" + ); + TestCase testCase = new TestCase( + List.of(lhsTyped, rhsTyped), + evaluatorToString.apply(lhsSupplier.type(), rhsSupplier.type()), + expectedType, + equalTo(expectedValue.apply(lhs, rhs)) + ); + for (String warning : warnings) { + testCase = testCase.withWarning(warning); + } + return testCase; + }); + } + public static List castToDoubleSuppliersFromRange(Double Min, Double Max) { List suppliers = new ArrayList<>(); suppliers.addAll(intCases(Min.intValue(), Max.intValue())); @@ -243,30 +259,6 @@ public static List castToDoubleSuppliersFromRange(Double Min, return suppliers; } - public static List forBinaryNumericNotCasting( - String name, - String lhsName, - String rhsName, - BinaryOperator expected, - DataType expectedType, - List lhsSuppliers, - List rhsSuppliers, - List warnings, - boolean symetric - ) { - return forBinaryNotCasting( - name, - lhsName, - rhsName, - (lhs, rhs) -> expected.apply((Number) lhs, (Number) rhs), - expectedType, - lhsSuppliers, - rhsSuppliers, - warnings, - symetric - ); - } - public record NumericTypeTestConfig(Number min, Number max, BinaryOperator expected, String evaluatorName) {} public record NumericTypeTestConfigs( @@ -333,25 +325,25 @@ public static List forBinaryWithWidening( for (DataType rhsType : numericTypes) { DataType expected = widen(lhsType, rhsType); NumericTypeTestConfig expectedTypeStuff = typeStuff.get(expected); - String evaluator = expectedTypeStuff.evaluatorName() + BiFunction evaluatorToString = (lhs, rhs) -> expectedTypeStuff.evaluatorName() + "[" + lhsName + "=" - + getCastEvaluator("Attribute[channel=0]", lhsType, expected) + + getCastEvaluator("Attribute[channel=0]", lhs, expected) + ", " + rhsName + "=" - + getCastEvaluator("Attribute[channel=1]", rhsType, expected) + + getCastEvaluator("Attribute[channel=1]", rhs, expected) + "]"; casesCrossProduct( (l, r) -> expectedTypeStuff.expected().apply((Number) l, (Number) r), getSuppliersForNumericType(lhsType, expectedTypeStuff.min(), expectedTypeStuff.max()), getSuppliersForNumericType(rhsType, expectedTypeStuff.min(), expectedTypeStuff.max()), - // TODO: This doesn't really need to be a function - (lt, rt) -> evaluator, + evaluatorToString, warnings, suppliers, - expected + expected, + true ); } } @@ -367,8 +359,7 @@ public static List forBinaryNotCasting( DataType expectedType, List lhsSuppliers, List rhsSuppliers, - List warnings, - boolean symetric + List warnings ) { List suppliers = new ArrayList<>(); casesCrossProduct( @@ -378,7 +369,8 @@ public static List forBinaryNotCasting( (lhsType, rhsType) -> name + "[" + lhsName + "=Attribute[channel=0], " + rhsName + "=Attribute[channel=1]]", warnings, suppliers, - expectedType + expectedType, + true ); return suppliers; } @@ -559,17 +551,10 @@ public static void forUnaryGeoPoint( List suppliers, String expectedEvaluatorToString, DataType expectedType, - Function expectedValue, + Function expectedValue, List warnings ) { - unaryNumeric( - suppliers, - expectedEvaluatorToString, - geoPointCases(), - expectedType, - n -> expectedValue.apply(n.longValue()), - warnings - ); + unary(suppliers, expectedEvaluatorToString, geoPointCases(), expectedType, n -> expectedValue.apply((BytesRef) n), warnings); } /** @@ -579,17 +564,10 @@ public static void forUnaryCartesianPoint( List suppliers, String expectedEvaluatorToString, DataType expectedType, - Function expectedValue, + Function expectedValue, List warnings ) { - unaryNumeric( - suppliers, - expectedEvaluatorToString, - cartesianPointCases(), - expectedType, - n -> expectedValue.apply(n.longValue()), - warnings - ); + unary(suppliers, expectedEvaluatorToString, cartesianPointCases(), expectedType, n -> expectedValue.apply((BytesRef) n), warnings); } /** @@ -935,12 +913,18 @@ public static List timeDurationCases() { } private static List geoPointCases() { - return List.of(new TypedDataSupplier("", () -> GEO.pointAsLong(randomGeoPoint()), EsqlDataTypes.GEO_POINT)); + return List.of( + new TypedDataSupplier("", () -> GEO.pointAsWKB(GeometryTestUtils.randomPoint()), EsqlDataTypes.GEO_POINT) + ); } private static List cartesianPointCases() { return List.of( - new TypedDataSupplier("", () -> CARTESIAN.pointAsLong(randomCartesianPoint()), EsqlDataTypes.CARTESIAN_POINT) + new TypedDataSupplier( + "", + () -> CARTESIAN.pointAsWKB(ShapeTestUtils.randomPoint()), + EsqlDataTypes.CARTESIAN_POINT + ) ); } @@ -1184,14 +1168,22 @@ public TestCase withWarning(String warning) { * exists because we can't generate random values from the test parameter generation functions, and instead need to return * suppliers which generate the random values at test execution time. */ - public record TypedDataSupplier(String name, Supplier supplier, DataType type) {} + public record TypedDataSupplier(String name, Supplier supplier, DataType type) { + public TypedData get() { + return new TypedData(supplier.get(), type, name); + } + } /** * Holds a data value and the intended parse type of that value * @param data - value to test against * @param type - type of the value, for building expressions + * @param name - a name for the value, used for generating test case names */ public record TypedData(Object data, DataType type, String name) { + + public static final TypedData NULL = new TypedData(null, DataTypes.NULL, ""); + public TypedData(Object data, String name) { this(data, EsqlDataTypes.fromJava(data), name); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java index 838044c8b90f6..90692d5b19df1 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java @@ -12,8 +12,8 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; @@ -88,11 +88,15 @@ protected Expression build(Source source, List args) { public void testEvalCase() { testCase(caseExpr -> { + DriverContext driverContext = driverContext(); + Page page = new Page(driverContext.blockFactory().newConstantIntBlockWith(0, 1)); try ( - EvalOperator.ExpressionEvaluator eval = caseExpr.toEvaluator(child -> evaluator(child)).get(driverContext()); - Block block = eval.eval(new Page(IntBlock.newConstantBlockWith(0, 1))) + EvalOperator.ExpressionEvaluator eval = caseExpr.toEvaluator(child -> evaluator(child)).get(driverContext); + Block block = eval.eval(page) ) { return toJavaObject(block, 0); + } finally { + page.releaseBlocks(); } }); } @@ -148,7 +152,8 @@ public void testCaseWithIncompatibleTypes() { public void testCaseIsLazy() { Case caseExpr = caseExpr(true, 1, true, 2); - try (Block block = caseExpr.toEvaluator(child -> { + DriverContext driveContext = driverContext(); + EvalOperator.ExpressionEvaluator evaluator = caseExpr.toEvaluator(child -> { Object value = child.fold(); if (value != null && value.equals(2)) { return dvrCtx -> new EvalOperator.ExpressionEvaluator() { @@ -163,8 +168,12 @@ public void close() {} }; } return evaluator(child); - }).get(driverContext()).eval(new Page(IntBlock.newConstantBlockWith(0, 1)))) { + }).get(driveContext); + Page page = new Page(driveContext.blockFactory().newConstantIntBlockWith(0, 1)); + try (Block block = evaluator.eval(page)) { assertEquals(1, toJavaObject(block, 0)); + } finally { + page.releaseBlocks(); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPointTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPointTests.java new file mode 100644 index 0000000000000..399ce11ab3d4c --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPointTests.java @@ -0,0 +1,96 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.convert; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.geo.ShapeTestUtils; +import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataTypes; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Function; +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.CARTESIAN; + +public class ToCartesianPointTests extends AbstractFunctionTestCase { + public ToCartesianPointTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + // TODO multivalue fields + final String attribute = "Attribute[channel=0]"; + final Function evaluatorName = s -> "ToCartesianPoint" + s + "Evaluator[field=" + attribute + "]"; + final List suppliers = new ArrayList<>(); + + TestCaseSupplier.forUnaryCartesianPoint(suppliers, attribute, EsqlDataTypes.CARTESIAN_POINT, v -> v, List.of()); + TestCaseSupplier.forUnaryLong(suppliers, evaluatorName.apply("FromLong"), EsqlDataTypes.CARTESIAN_POINT, l -> { + try { + return CARTESIAN.longAsWKB(l); + } catch (IllegalArgumentException e) { + return null; + } + }, Long.MIN_VALUE, Long.MAX_VALUE, l -> { + try { + CARTESIAN.longAsWKB(l.longValue()); + return List.of(); + } catch (IllegalArgumentException exception) { + return List.of( + "Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", + "Line -1:-1: " + exception + ); + } + }); + // random strings that don't look like a cartesian point + TestCaseSupplier.forUnaryStrings( + suppliers, + evaluatorName.apply("FromString"), + EsqlDataTypes.CARTESIAN_POINT, + bytesRef -> null, + bytesRef -> { + var exception = expectThrows(Exception.class, () -> CARTESIAN.stringAsWKB(bytesRef.utf8ToString())); + return List.of( + "Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", + "Line -1:-1: " + exception + ); + } + ); + // strings that are cartesian point representations + TestCaseSupplier.unary( + suppliers, + evaluatorName.apply("FromString"), + List.of( + new TestCaseSupplier.TypedDataSupplier( + "", + () -> new BytesRef(CARTESIAN.pointAsString(ShapeTestUtils.randomPoint())), + DataTypes.KEYWORD + ) + ), + EsqlDataTypes.CARTESIAN_POINT, + bytesRef -> CARTESIAN.stringAsWKB(((BytesRef) bytesRef).utf8ToString()), + List.of() + ); + + return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + } + + @Override + protected Expression build(Source source, List args) { + return new ToCartesianPoint(source, args.get(0)); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleTests.java index ebcaf367b1226..0309bcce85581 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleTests.java @@ -54,17 +54,13 @@ public static Iterable parameters() { List.of() ); // random strings that don't look like a double - TestCaseSupplier.forUnaryStrings( - suppliers, - evaluatorName.apply("String"), - DataTypes.DOUBLE, - bytesRef -> null, - bytesRef -> List.of( + TestCaseSupplier.forUnaryStrings(suppliers, evaluatorName.apply("String"), DataTypes.DOUBLE, bytesRef -> null, bytesRef -> { + var exception = expectThrows(NumberFormatException.class, () -> Double.parseDouble(bytesRef.utf8ToString())); + return List.of( "Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", - "Line -1:-1: java.lang.NumberFormatException: " - + (bytesRef.utf8ToString().isEmpty() ? "empty String" : ("For input string: \"" + bytesRef.utf8ToString() + "\"")) - ) - ); + "Line -1:-1: " + exception + ); + }); TestCaseSupplier.forUnaryUnsignedLong( suppliers, evaluatorName.apply("UnsignedLong"), diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPointTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPointTests.java new file mode 100644 index 0000000000000..51b58f4467fc3 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPointTests.java @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.convert; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.geo.GeometryTestUtils; +import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataTypes; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Function; +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.GEO; + +public class ToGeoPointTests extends AbstractFunctionTestCase { + public ToGeoPointTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + // TODO multivalue fields + final String attribute = "Attribute[channel=0]"; + final Function evaluatorName = s -> "ToGeoPoint" + s + "Evaluator[field=" + attribute + "]"; + final List suppliers = new ArrayList<>(); + + TestCaseSupplier.forUnaryGeoPoint(suppliers, attribute, EsqlDataTypes.GEO_POINT, v -> v, List.of()); + TestCaseSupplier.forUnaryLong( + suppliers, + evaluatorName.apply("FromLong"), + EsqlDataTypes.GEO_POINT, + GEO::longAsWKB, + Long.MIN_VALUE, + Long.MAX_VALUE, + List.of() + ); + // random strings that don't look like a geo point + TestCaseSupplier.forUnaryStrings( + suppliers, + evaluatorName.apply("FromString"), + EsqlDataTypes.GEO_POINT, + bytesRef -> null, + bytesRef -> { + var exception = expectThrows(Exception.class, () -> GEO.stringAsWKB(bytesRef.utf8ToString())); + return List.of( + "Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", + "Line -1:-1: " + exception + ); + } + ); + // strings that are geo point representations + TestCaseSupplier.unary( + suppliers, + evaluatorName.apply("FromString"), + List.of( + new TestCaseSupplier.TypedDataSupplier( + "", + () -> new BytesRef(GEO.pointAsString(GeometryTestUtils.randomPoint())), + DataTypes.KEYWORD + ) + ), + EsqlDataTypes.GEO_POINT, + bytesRef -> GEO.stringAsWKB(((BytesRef) bytesRef).utf8ToString()), + List.of() + ); + + return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + } + + @Override + protected Expression build(Source source, List args) { + return new ToGeoPoint(source, args.get(0)); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongTests.java index b153fa8489dee..1c2488c8e9cb5 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongTests.java @@ -24,6 +24,9 @@ import java.util.function.Function; import java.util.function.Supplier; +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.CARTESIAN; +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.GEO; + public class ToLongTests extends AbstractFunctionTestCase { public ToLongTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); @@ -41,8 +44,14 @@ public static Iterable parameters() { TestCaseSupplier.forUnaryBoolean(suppliers, evaluatorName.apply("Boolean"), DataTypes.LONG, b -> b ? 1L : 0L, List.of()); // geo types - TestCaseSupplier.forUnaryGeoPoint(suppliers, read, DataTypes.LONG, i -> i, List.of()); - TestCaseSupplier.forUnaryCartesianPoint(suppliers, read, DataTypes.LONG, i -> i, List.of()); + TestCaseSupplier.forUnaryGeoPoint(suppliers, evaluatorName.apply("GeoPoint"), DataTypes.LONG, GEO::wkbAsLong, List.of()); + TestCaseSupplier.forUnaryCartesianPoint( + suppliers, + evaluatorName.apply("CartesianPoint"), + DataTypes.LONG, + CARTESIAN::wkbAsLong, + List.of() + ); // datetimes TestCaseSupplier.forUnaryDatetime(suppliers, read, DataTypes.LONG, Instant::toEpochMilli, List.of()); // random strings that don't look like a long diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringTests.java index 088b9b438898b..46721c190c7b6 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringTests.java @@ -91,14 +91,14 @@ public static Iterable parameters() { suppliers, "ToStringFromGeoPointEvaluator[field=" + read + "]", DataTypes.KEYWORD, - i -> new BytesRef(GEO.pointAsString(GEO.longAsPoint(i))), + wkb -> new BytesRef(GEO.wkbAsString(wkb)), List.of() ); TestCaseSupplier.forUnaryCartesianPoint( suppliers, "ToStringFromCartesianPointEvaluator[field=" + read + "]", DataTypes.KEYWORD, - i -> new BytesRef(CARTESIAN.pointAsString(CARTESIAN.longAsPoint(i))), + wkb -> new BytesRef(CARTESIAN.wkbAsString(wkb)), List.of() ); TestCaseSupplier.forUnaryIp( diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateDiffTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateDiffTests.java new file mode 100644 index 0000000000000..15d0cca454407 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateDiffTests.java @@ -0,0 +1,192 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.date; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.ql.InvalidArgumentException; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataTypes; + +import java.time.ZonedDateTime; +import java.util.List; +import java.util.function.Supplier; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +public class DateDiffTests extends AbstractFunctionTestCase { + public DateDiffTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + ZonedDateTime zdtStart = ZonedDateTime.parse("2023-12-04T10:15:30Z"); + ZonedDateTime zdtEnd = ZonedDateTime.parse("2023-12-05T10:45:00Z"); + + return parameterSuppliersFromTypedData( + List.of( + new TestCaseSupplier( + "Date Diff In Seconds - OK", + List.of(DataTypes.KEYWORD, DataTypes.DATETIME, DataTypes.DATETIME), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef("seconds"), DataTypes.KEYWORD, "unit"), + new TestCaseSupplier.TypedData(zdtStart.toInstant().toEpochMilli(), DataTypes.DATETIME, "startTimestamp"), + new TestCaseSupplier.TypedData(zdtEnd.toInstant().toEpochMilli(), DataTypes.DATETIME, "endTimestamp") + ), + "DateDiffEvaluator[unit=Attribute[channel=0], startTimestamp=Attribute[channel=1], " + + "endTimestamp=Attribute[channel=2]]", + DataTypes.INTEGER, + equalTo(88170) + ) + ), + new TestCaseSupplier( + "Date Diff In Seconds with text- OK", + List.of(DataTypes.TEXT, DataTypes.DATETIME, DataTypes.DATETIME), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef("seconds"), DataTypes.TEXT, "unit"), + new TestCaseSupplier.TypedData(zdtStart.toInstant().toEpochMilli(), DataTypes.DATETIME, "startTimestamp"), + new TestCaseSupplier.TypedData(zdtEnd.toInstant().toEpochMilli(), DataTypes.DATETIME, "endTimestamp") + ), + "DateDiffEvaluator[unit=Attribute[channel=0], startTimestamp=Attribute[channel=1], " + + "endTimestamp=Attribute[channel=2]]", + DataTypes.INTEGER, + equalTo(88170) + ) + ), + new TestCaseSupplier( + "Date Diff Error Type unit", + List.of(DataTypes.INTEGER, DataTypes.DATETIME, DataTypes.DATETIME), + () -> TestCaseSupplier.TestCase.typeError( + List.of( + new TestCaseSupplier.TypedData(new BytesRef("seconds"), DataTypes.INTEGER, "unit"), + new TestCaseSupplier.TypedData(zdtStart.toInstant().toEpochMilli(), DataTypes.DATETIME, "startTimestamp"), + new TestCaseSupplier.TypedData(zdtEnd.toInstant().toEpochMilli(), DataTypes.DATETIME, "endTimestamp") + ), + "first argument of [] must be [string], found value [unit] type [integer]" + ) + ), + new TestCaseSupplier( + "Date Diff Error Type startTimestamp", + List.of(DataTypes.TEXT, DataTypes.INTEGER, DataTypes.DATETIME), + () -> TestCaseSupplier.TestCase.typeError( + List.of( + new TestCaseSupplier.TypedData(new BytesRef("minutes"), DataTypes.TEXT, "unit"), + new TestCaseSupplier.TypedData(zdtStart.toInstant().toEpochMilli(), DataTypes.INTEGER, "startTimestamp"), + new TestCaseSupplier.TypedData(zdtEnd.toInstant().toEpochMilli(), DataTypes.DATETIME, "endTimestamp") + ), + "second argument of [] must be [datetime], found value [startTimestamp] type [integer]" + ) + ), + new TestCaseSupplier( + "Date Diff Error Type endTimestamp", + List.of(DataTypes.TEXT, DataTypes.DATETIME, DataTypes.INTEGER), + () -> TestCaseSupplier.TestCase.typeError( + List.of( + new TestCaseSupplier.TypedData(new BytesRef("minutes"), DataTypes.TEXT, "unit"), + new TestCaseSupplier.TypedData(zdtStart.toInstant().toEpochMilli(), DataTypes.DATETIME, "startTimestamp"), + new TestCaseSupplier.TypedData(zdtEnd.toInstant().toEpochMilli(), DataTypes.INTEGER, "endTimestamp") + ), + "third argument of [] must be [datetime], found value [endTimestamp] type [integer]" + ) + ) + ) + ); + } + + public void testDateDiffFunction() { + ZonedDateTime zdtStart = ZonedDateTime.parse("2023-12-04T10:15:00Z"); + ZonedDateTime zdtEnd = ZonedDateTime.parse("2023-12-04T10:15:01Z"); + long startTimestamp = zdtStart.toInstant().toEpochMilli(); + long endTimestamp = zdtEnd.toInstant().toEpochMilli(); + + assertEquals(1000000000, DateDiff.process(new BytesRef("nanoseconds"), startTimestamp, endTimestamp)); + assertEquals(1000000000, DateDiff.process(new BytesRef("ns"), startTimestamp, endTimestamp)); + assertEquals(1000000, DateDiff.process(new BytesRef("microseconds"), startTimestamp, endTimestamp)); + assertEquals(1000000, DateDiff.process(new BytesRef("mcs"), startTimestamp, endTimestamp)); + assertEquals(1000, DateDiff.process(new BytesRef("milliseconds"), startTimestamp, endTimestamp)); + assertEquals(1000, DateDiff.process(new BytesRef("ms"), startTimestamp, endTimestamp)); + assertEquals(1, DateDiff.process(new BytesRef("seconds"), startTimestamp, endTimestamp)); + assertEquals(1, DateDiff.process(new BytesRef("ss"), startTimestamp, endTimestamp)); + assertEquals(1, DateDiff.process(new BytesRef("s"), startTimestamp, endTimestamp)); + + zdtEnd = zdtEnd.plusYears(1); + endTimestamp = zdtEnd.toInstant().toEpochMilli(); + + assertEquals(527040, DateDiff.process(new BytesRef("minutes"), startTimestamp, endTimestamp)); + assertEquals(527040, DateDiff.process(new BytesRef("mi"), startTimestamp, endTimestamp)); + assertEquals(527040, DateDiff.process(new BytesRef("n"), startTimestamp, endTimestamp)); + assertEquals(8784, DateDiff.process(new BytesRef("hours"), startTimestamp, endTimestamp)); + assertEquals(8784, DateDiff.process(new BytesRef("hh"), startTimestamp, endTimestamp)); + assertEquals(366, DateDiff.process(new BytesRef("weekdays"), startTimestamp, endTimestamp)); + assertEquals(366, DateDiff.process(new BytesRef("dw"), startTimestamp, endTimestamp)); + assertEquals(52, DateDiff.process(new BytesRef("weeks"), startTimestamp, endTimestamp)); + assertEquals(52, DateDiff.process(new BytesRef("wk"), startTimestamp, endTimestamp)); + assertEquals(52, DateDiff.process(new BytesRef("ww"), startTimestamp, endTimestamp)); + assertEquals(366, DateDiff.process(new BytesRef("days"), startTimestamp, endTimestamp)); + assertEquals(366, DateDiff.process(new BytesRef("dd"), startTimestamp, endTimestamp)); + assertEquals(366, DateDiff.process(new BytesRef("d"), startTimestamp, endTimestamp)); + assertEquals(366, DateDiff.process(new BytesRef("dy"), startTimestamp, endTimestamp)); + assertEquals(366, DateDiff.process(new BytesRef("y"), startTimestamp, endTimestamp)); + assertEquals(12, DateDiff.process(new BytesRef("months"), startTimestamp, endTimestamp)); + assertEquals(12, DateDiff.process(new BytesRef("mm"), startTimestamp, endTimestamp)); + assertEquals(12, DateDiff.process(new BytesRef("m"), startTimestamp, endTimestamp)); + assertEquals(4, DateDiff.process(new BytesRef("quarters"), startTimestamp, endTimestamp)); + assertEquals(4, DateDiff.process(new BytesRef("qq"), startTimestamp, endTimestamp)); + assertEquals(4, DateDiff.process(new BytesRef("q"), startTimestamp, endTimestamp)); + assertEquals(1, DateDiff.process(new BytesRef("years"), startTimestamp, endTimestamp)); + assertEquals(1, DateDiff.process(new BytesRef("yyyy"), startTimestamp, endTimestamp)); + assertEquals(1, DateDiff.process(new BytesRef("yy"), startTimestamp, endTimestamp)); + } + + public void testDateDiffFunctionErrorTooLarge() { + ZonedDateTime zdtStart = ZonedDateTime.parse("2023-12-04T10:15:00Z"); + ZonedDateTime zdtEnd = ZonedDateTime.parse("2023-12-04T10:20:00Z"); + long startTimestamp = zdtStart.toInstant().toEpochMilli(); + long endTimestamp = zdtEnd.toInstant().toEpochMilli(); + + InvalidArgumentException e = expectThrows( + InvalidArgumentException.class, + () -> DateDiff.process(new BytesRef("nanoseconds"), startTimestamp, endTimestamp) + ); + assertThat(e.getMessage(), containsString("[300000000000] out of [integer] range")); + } + + public void testDateDiffFunctionErrorUnitNotValid() { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> DateDiff.process(new BytesRef("sseconds"), 0, 0)); + assertThat( + e.getMessage(), + containsString( + "Received value [sseconds] is not valid date part to add; " + + "did you mean [seconds, second, nanoseconds, milliseconds, microseconds, nanosecond]?" + ) + ); + + e = expectThrows(IllegalArgumentException.class, () -> DateDiff.process(new BytesRef("not-valid-unit"), 0, 0)); + assertThat( + e.getMessage(), + containsString( + "A value of [YEAR, QUARTER, MONTH, DAYOFYEAR, DAY, WEEK, WEEKDAY, HOUR, MINUTE, SECOND, MILLISECOND, MICROSECOND, " + + "NANOSECOND] or their aliases is required; received [not-valid-unit]" + ) + ); + } + + @Override + protected Expression build(Source source, List args) { + return new DateDiff(source, args.get(0), args.get(1), args.get(2)); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractTests.java index 96c35905e3dc0..1446fc54c99fa 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractTests.java @@ -11,9 +11,11 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.xpack.esql.EsqlTestUtils; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; +import org.elasticsearch.xpack.ql.InvalidArgumentException; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.Literal; import org.elasticsearch.xpack.ql.tree.Source; @@ -68,6 +70,23 @@ public void testAllChronoFields() { } } + public void testInvalidChrono() { + String chrono = randomAlphaOfLength(10); + DriverContext driverContext = driverContext(); + InvalidArgumentException e = expectThrows( + InvalidArgumentException.class, + () -> evaluator( + new DateExtract( + Source.EMPTY, + new Literal(Source.EMPTY, new BytesRef(chrono), DataTypes.KEYWORD), + field("str", DataTypes.DATETIME), + null + ) + ).get(driverContext) + ); + assertThat(e.getMessage(), equalTo("invalid date field for []: " + chrono)); + } + @Override protected Expression build(Source source, List args) { return new DateExtract(source, args.get(0), args.get(1), EsqlTestUtils.TEST_CFG); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseTests.java index f6ead24ff34a6..5c5af560aec08 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseTests.java @@ -11,9 +11,12 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; +import org.elasticsearch.xpack.ql.InvalidArgumentException; import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.Literal; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; import org.elasticsearch.xpack.ql.type.DataTypes; @@ -22,6 +25,7 @@ import java.util.function.Supplier; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.startsWith; public class DateParseTests extends AbstractScalarFunctionTestCase { public DateParseTests(@Name("TestCase") Supplier testCaseSupplier) { @@ -60,6 +64,22 @@ public static Iterable parameters() { ); } + public void testInvalidPattern() { + String pattern = randomAlphaOfLength(10); + DriverContext driverContext = driverContext(); + InvalidArgumentException e = expectThrows( + InvalidArgumentException.class, + () -> evaluator( + new DateParse( + Source.EMPTY, + new Literal(Source.EMPTY, new BytesRef(pattern), DataTypes.KEYWORD), + field("str", DataTypes.KEYWORD) + ) + ).get(driverContext) + ); + assertThat(e.getMessage(), startsWith("invalid date pattern for []: Invalid format: [" + pattern + "]")); + } + @Override protected Expression build(Source source, List args) { return new DateParse(source, args.get(0), args.size() > 1 ? args.get(1) : null); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunctionTestCase.java index fe04a659651de..6f0a2edafaf04 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunctionTestCase.java @@ -8,8 +8,10 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.geo.SpatialPoint; import org.elasticsearch.compute.data.Block; +import org.elasticsearch.geo.GeometryTestUtils; +import org.elasticsearch.geo.ShapeTestUtils; +import org.elasticsearch.geometry.Point; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; @@ -390,18 +392,20 @@ protected static void dateTimes( /** * Build many test cases with {@code geo_point} values. + * This assumes that the function consumes {@code geo_point} values and produces {@code geo_point} values. */ protected static void geoPoints( List cases, String name, String evaluatorName, - BiFunction> matcher + BiFunction, Matcher> matcher ) { geoPoints(cases, name, evaluatorName, EsqlDataTypes.GEO_POINT, matcher); } /** * Build many test cases with {@code geo_point} values that are converted to another type. + * This assumes that the function consumes {@code geo_point} values and produces another type. * For example, mv_count() can consume points and produce an integer count. */ protected static void geoPoints( @@ -409,25 +413,27 @@ protected static void geoPoints( String name, String evaluatorName, DataType expectedDataType, - BiFunction> matcher + BiFunction, Matcher> matcher ) { - points(cases, name, evaluatorName, EsqlDataTypes.GEO_POINT, expectedDataType, GEO, ESTestCase::randomGeoPoint, matcher); + points(cases, name, evaluatorName, EsqlDataTypes.GEO_POINT, expectedDataType, GEO, GeometryTestUtils::randomPoint, matcher); } /** * Build many test cases with {@code cartesian_point} values. + * This assumes that the function consumes {@code cartesian_point} values and produces {@code cartesian_point} values. */ protected static void cartesianPoints( List cases, String name, String evaluatorName, - BiFunction> matcher + BiFunction, Matcher> matcher ) { cartesianPoints(cases, name, evaluatorName, EsqlDataTypes.CARTESIAN_POINT, matcher); } /** * Build many test cases with {@code cartesian_point} values that are converted to another type. + * This assumes that the function consumes {@code cartesian_point} values and produces another type. * For example, mv_count() can consume points and produce an integer count. */ protected static void cartesianPoints( @@ -435,7 +441,7 @@ protected static void cartesianPoints( String name, String evaluatorName, DataType expectedDataType, - BiFunction> matcher + BiFunction, Matcher> matcher ) { points( cases, @@ -444,7 +450,7 @@ protected static void cartesianPoints( EsqlDataTypes.CARTESIAN_POINT, expectedDataType, CARTESIAN, - ESTestCase::randomCartesianPoint, + ShapeTestUtils::randomPoint, matcher ); } @@ -458,29 +464,28 @@ protected static void points( String evaluatorName, DataType dataType, DataType expectedDataType, - SpatialCoordinateTypes coordType, - Supplier randomPoint, - BiFunction> matcher + SpatialCoordinateTypes spatial, + Supplier randomPoint, + BiFunction, Matcher> matcher ) { cases.add(new TestCaseSupplier(name + "(" + dataType.typeName() + ")", List.of(dataType), () -> { - SpatialPoint point = randomPoint.get(); - long data = coordType.pointAsLong(point); + BytesRef wkb = spatial.pointAsWKB(randomPoint.get()); return new TestCaseSupplier.TestCase( - List.of(new TestCaseSupplier.TypedData(List.of(data), dataType, "field")), + List.of(new TestCaseSupplier.TypedData(List.of(wkb), dataType, "field")), evaluatorName + "[field=Attribute[channel=0]]", expectedDataType, - matcher.apply(1, LongStream.of(data)) + matcher.apply(1, Stream.of(wkb)) ); })); for (Block.MvOrdering ordering : Block.MvOrdering.values()) { cases.add(new TestCaseSupplier(name + "(<" + dataType.typeName() + "s>) " + ordering, List.of(dataType), () -> { - List mvData = randomList(1, 100, () -> coordType.pointAsLong(randomPoint.get())); + List mvData = randomList(1, 100, () -> spatial.pointAsWKB(randomPoint.get())); putInOrder(mvData, ordering); return new TestCaseSupplier.TestCase( List.of(new TestCaseSupplier.TypedData(mvData, dataType, "field")), evaluatorName + "[field=Attribute[channel=0]]", expectedDataType, - matcher.apply(mvData.size(), mvData.stream().mapToLong(Long::longValue)) + matcher.apply(mvData.size(), mvData.stream()) ); })); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstTests.java new file mode 100644 index 0000000000000..91c30b7c1f566 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstTests.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataType; +import org.elasticsearch.xpack.ql.type.DataTypes; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Function; +import java.util.function.Supplier; + +import static org.hamcrest.Matchers.equalTo; + +public class MvFirstTests extends AbstractMultivalueFunctionTestCase { + public MvFirstTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + List cases = new ArrayList<>(); + booleans(cases, "mv_first", "MvFirst", DataTypes.BOOLEAN, (size, values) -> equalTo(values.findFirst().get())); + bytesRefs(cases, "mv_first", "MvFirst", Function.identity(), (size, values) -> equalTo(values.findFirst().get())); + doubles(cases, "mv_first", "MvFirst", DataTypes.DOUBLE, (size, values) -> equalTo(values.findFirst().getAsDouble())); + ints(cases, "mv_first", "MvFirst", DataTypes.INTEGER, (size, values) -> equalTo(values.findFirst().getAsInt())); + longs(cases, "mv_first", "MvFirst", DataTypes.LONG, (size, values) -> equalTo(values.findFirst().getAsLong())); + unsignedLongs(cases, "mv_first", "MvFirst", DataTypes.UNSIGNED_LONG, (size, values) -> equalTo(values.findFirst().get())); + dateTimes(cases, "mv_first", "MvFirst", DataTypes.DATETIME, (size, values) -> equalTo(values.findFirst().getAsLong())); + geoPoints(cases, "mv_first", "MvFirst", EsqlDataTypes.GEO_POINT, (size, values) -> equalTo(values.findFirst().get())); + cartesianPoints(cases, "mv_first", "MvFirst", EsqlDataTypes.CARTESIAN_POINT, (size, values) -> equalTo(values.findFirst().get())); + return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, cases))); + } + + @Override + protected Expression build(Source source, Expression field) { + return new MvFirst(source, field); + } + + @Override + protected DataType[] supportedTypes() { + return representableTypes(); + } + + @Override + protected DataType expectedType(List argTypes) { + return argTypes.get(0); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastTests.java new file mode 100644 index 0000000000000..7577cbf7dd0a8 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastTests.java @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataType; +import org.elasticsearch.xpack.ql.type.DataTypes; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Function; +import java.util.function.Supplier; + +import static org.hamcrest.Matchers.equalTo; + +public class MvLastTests extends AbstractMultivalueFunctionTestCase { + public MvLastTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + List cases = new ArrayList<>(); + booleans(cases, "mv_last", "MvLast", DataTypes.BOOLEAN, (size, values) -> equalTo(values.reduce((f, s) -> s).get())); + bytesRefs(cases, "mv_last", "MvLast", Function.identity(), (size, values) -> equalTo(values.reduce((f, s) -> s).get())); + doubles(cases, "mv_last", "MvLast", DataTypes.DOUBLE, (size, values) -> equalTo(values.reduce((f, s) -> s).getAsDouble())); + ints(cases, "mv_last", "MvLast", DataTypes.INTEGER, (size, values) -> equalTo(values.reduce((f, s) -> s).getAsInt())); + longs(cases, "mv_last", "MvLast", DataTypes.LONG, (size, values) -> equalTo(values.reduce((f, s) -> s).getAsLong())); + unsignedLongs(cases, "mv_last", "MvLast", DataTypes.UNSIGNED_LONG, (size, values) -> equalTo(values.reduce((f, s) -> s).get())); + dateTimes(cases, "mv_last", "MvLast", DataTypes.DATETIME, (size, values) -> equalTo(values.reduce((f, s) -> s).getAsLong())); + geoPoints(cases, "mv_last", "MvLast", EsqlDataTypes.GEO_POINT, (size, values) -> equalTo(values.reduce((f, s) -> s).get())); + cartesianPoints( + cases, + "mv_last", + "MvLast", + EsqlDataTypes.CARTESIAN_POINT, + (size, values) -> equalTo(values.reduce((f, s) -> s).get()) + ); + return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, cases))); + } + + @Override + protected Expression build(Source source, Expression field) { + return new MvLast(source, field); + } + + @Override + protected DataType[] supportedTypes() { + return representableTypes(); + } + + @Override + protected DataType expectedType(List argTypes) { + return argTypes.get(0); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitTests.java index abaa382637882..e0611c7125e6e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitTests.java @@ -12,22 +12,21 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; +import org.elasticsearch.xpack.ql.InvalidArgumentException; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.Literal; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; import org.elasticsearch.xpack.ql.type.DataTypes; -import org.hamcrest.Matcher; -import java.util.Arrays; import java.util.List; import java.util.function.Supplier; -import java.util.regex.Pattern; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -66,13 +65,6 @@ protected DataType expectedType(List argTypes) { return DataTypes.KEYWORD; } - private Matcher resultsMatcher(List typedData) { - String str = ((BytesRef) typedData.get(0).data()).utf8ToString(); - String delim = ((BytesRef) typedData.get(1).data()).utf8ToString(); - List split = Arrays.stream(str.split(Pattern.quote(delim))).map(BytesRef::new).toList(); - return equalTo(split.size() == 1 ? split.get(0) : split); - } - @Override protected List argSpec() { return List.of(required(strings()), required(strings())); @@ -84,10 +76,11 @@ protected Expression build(Source source, List args) { } public void testConstantDelimiter() { + DriverContext driverContext = driverContext(); try ( EvalOperator.ExpressionEvaluator eval = evaluator( new Split(Source.EMPTY, field("str", DataTypes.KEYWORD), new Literal(Source.EMPTY, new BytesRef(":"), DataTypes.KEYWORD)) - ).get(driverContext()) + ).get(driverContext) ) { /* * 58 is ascii for : and appears in the toString below. We don't convert the delimiter to a @@ -96,9 +89,29 @@ public void testConstantDelimiter() { */ assert ':' == 58; assertThat(eval.toString(), equalTo("SplitSingleByteEvaluator[str=Attribute[channel=0], delim=58]")); - try (Block block = eval.eval(new Page(BytesRefBlock.newConstantBlockWith(new BytesRef("foo:bar"), 1)))) { + BlockFactory blockFactory = driverContext.blockFactory(); + Page page = new Page(blockFactory.newConstantBytesRefBlockWith(new BytesRef("foo:bar"), 1)); + try (Block block = eval.eval(page)) { assertThat(toJavaObject(block, 0), equalTo(List.of(new BytesRef("foo"), new BytesRef("bar")))); + } finally { + page.releaseBlocks(); } } } + + public void testTooLongConstantDelimiter() { + String delimiter = randomAlphaOfLength(2); + DriverContext driverContext = driverContext(); + InvalidArgumentException e = expectThrows( + InvalidArgumentException.class, + () -> evaluator( + new Split( + Source.EMPTY, + field("str", DataTypes.KEYWORD), + new Literal(Source.EMPTY, new BytesRef(delimiter), DataTypes.KEYWORD) + ) + ).get(driverContext) + ); + assertThat(e.getMessage(), equalTo("delimiter must be single byte for now")); + } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/AbstractBinaryOperatorTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/AbstractBinaryOperatorTestCase.java index cc677787c50c6..22c3bb6e515df 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/AbstractBinaryOperatorTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/AbstractBinaryOperatorTestCase.java @@ -22,6 +22,7 @@ import org.elasticsearch.xpack.ql.type.DataTypes; import org.hamcrest.Matcher; +import java.util.Arrays; import java.util.List; import java.util.Locale; @@ -30,6 +31,7 @@ import static org.elasticsearch.xpack.ql.type.DataTypeConverter.commonType; import static org.elasticsearch.xpack.ql.type.DataTypes.isNull; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; @@ -70,9 +72,6 @@ protected Expression build(Source source, List args) { * @return True if the type combination is supported by the respective function. */ protected boolean supportsTypes(DataType lhsType, DataType rhsType) { - if (isNull(lhsType) || isNull(rhsType)) { - return false; - } if ((lhsType == DataTypes.UNSIGNED_LONG || rhsType == DataTypes.UNSIGNED_LONG) && lhsType != rhsType) { // UL can only be operated on together with another UL, so skip non-UL&UL combinations return false; @@ -94,14 +93,16 @@ public final void testApplyToAllTypes() { Source src = new Source(Location.EMPTY, lhsType.typeName() + " " + rhsType.typeName()); if (isRepresentable(lhsType) && isRepresentable(rhsType)) { op = build(src, field("lhs", lhsType), field("rhs", rhsType)); - try (Block block = evaluator(op).get(driverContext()).eval(row(List.of(lhs.value(), rhs.value())))) { + try (Block block = evaluator(op).get(driverContext()).eval(row(Arrays.asList(lhs.value(), rhs.value())))) { result = toJavaObject(block, 0); } } else { op = build(src, lhs, rhs); result = op.fold(); } - if (result == null) { + if (isNull(lhsType) || isNull(rhsType)) { + assertThat(op.toString(), result, is(nullValue())); + } else if (result == null) { assertCriticalWarnings( "Line -1:-1: evaluation of [" + op + "] failed, treating result as null. Only first 20 failures recorded.", "Line -1:-1: java.lang.ArithmeticException: " + commonType(lhsType, rhsType).typeName() + " overflow" diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/BreakerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/BreakerTests.java new file mode 100644 index 0000000000000..a09cb68c893e0 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/BreakerTests.java @@ -0,0 +1,92 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.predicate.operator; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Div; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.Literal; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataTypes; +import org.junit.After; + +import java.util.ArrayList; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; + +public class BreakerTests extends ESTestCase { + @ParametersFactory + public static Iterable parameters() { + List params = new ArrayList<>(); + + Expression expression = new Div( + Source.synthetic("[1] / (long) 2"), + AbstractFunctionTestCase.field("f", DataTypes.LONG), + new Literal(Source.EMPTY, 2, DataTypes.INTEGER) + ); + for (int b = 0; b < 136; b++) { + params.add(new Object[] { ByteSizeValue.ofBytes(b), expression }); + } + return params; + } + + private final List breakers = new ArrayList<>(); + + private final ByteSizeValue limit; + private final Expression expression; + + public BreakerTests(ByteSizeValue limit, Expression expression) { + this.limit = limit; + this.expression = expression; + } + + public void testBreaker() { + DriverContext unlimited = driverContext(ByteSizeValue.ofGb(1)); + DriverContext context = driverContext(limit); + EvalOperator.ExpressionEvaluator eval = AbstractFunctionTestCase.evaluator(expression).get(context); + try (Block b = unlimited.blockFactory().newConstantNullBlock(1)) { + Exception e = expectThrows(CircuitBreakingException.class, () -> eval.eval(new Page(b))); + assertThat(e.getMessage(), equalTo("over test limit")); + } + } + + /** + * A {@link DriverContext} that won't throw {@link CircuitBreakingException}. + */ + private DriverContext driverContext(ByteSizeValue limit) { + BigArrays bigArrays = new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, limit).withCircuitBreaking(); + CircuitBreaker breaker = bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST); + breakers.add(breaker); + return new DriverContext(bigArrays, new BlockFactory(breaker, bigArrays)); + } + + @After + public void allBreakersEmpty() throws Exception { + // first check that all big arrays are released, which can affect breakers + MockBigArrays.ensureAllArraysAreReleased(); + + for (CircuitBreaker breaker : breakers) { + assertThat("Unexpected used in breaker: " + breaker, breaker.getUsed(), equalTo(0L)); + } + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AbstractDateTimeArithmeticTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AbstractDateTimeArithmeticTestCase.java index a620a95ea3c0f..bb462dc00463c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AbstractDateTimeArithmeticTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AbstractDateTimeArithmeticTestCase.java @@ -13,15 +13,17 @@ import org.elasticsearch.xpack.ql.type.DataTypes; import org.hamcrest.Matcher; +import java.time.Duration; +import java.time.Period; import java.time.temporal.TemporalAmount; import java.util.List; import java.util.Locale; -import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isDateTimeOrTemporal; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isNullOrTemporalAmount; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isTemporalAmount; import static org.elasticsearch.xpack.ql.type.DataTypes.isDateTime; +import static org.elasticsearch.xpack.ql.type.DataTypes.isNull; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.oneOf; public abstract class AbstractDateTimeArithmeticTestCase extends AbstractArithmeticTestCase { @@ -30,15 +32,30 @@ protected Matcher resultMatcher(List data, DataType dataType) { Object lhs = data.get(0); Object rhs = data.get(1); if (lhs instanceof TemporalAmount || rhs instanceof TemporalAmount) { - TemporalAmount temporal = lhs instanceof TemporalAmount leftTemporal ? leftTemporal : (TemporalAmount) rhs; - long datetime = temporal == lhs ? (Long) rhs : (Long) lhs; - return equalTo(expectedValue(datetime, temporal)); + Object expectedValue; + if (lhs instanceof TemporalAmount && rhs instanceof TemporalAmount) { + assertThat("temporal amounts of different kinds", lhs.getClass(), equalTo(rhs.getClass())); + if (lhs instanceof Period) { + expectedValue = expectedValue((Period) lhs, (Period) rhs); + } else { + expectedValue = expectedValue((Duration) lhs, (Duration) rhs); + } + } else if (lhs instanceof TemporalAmount lhsTemporal) { + expectedValue = expectedValue((long) rhs, lhsTemporal); + } else { // rhs instanceof TemporalAmount + expectedValue = expectedValue((long) lhs, (TemporalAmount) rhs); + } + return equalTo(expectedValue); } return super.resultMatcher(data, dataType); } protected abstract long expectedValue(long datetime, TemporalAmount temporalAmount); + protected abstract Period expectedValue(Period lhs, Period rhs); + + protected abstract Duration expectedValue(Duration lhs, Duration rhs); + @Override protected final boolean supportsType(DataType type) { return EsqlDataTypes.isDateTimeOrTemporal(type) || super.supportsType(type); @@ -46,28 +63,61 @@ protected final boolean supportsType(DataType type) { @Override protected void validateType(BinaryOperator op, DataType lhsType, DataType rhsType) { - if (isDateTime(lhsType) && isTemporalAmount(rhsType) || isTemporalAmount(lhsType) && isDateTime(rhsType)) { - assertTrue(op.toString(), op.typeResolved().resolved()); - assertTrue(op.toString(), isTemporalAmount(lhsType) || isTemporalAmount(rhsType)); - assertFalse(op.toString(), isTemporalAmount(lhsType) && isTemporalAmount(rhsType)); - assertThat(op.toString(), op.dataType(), equalTo(expectedType(lhsType, rhsType))); - assertThat(op.toString(), op.getClass(), oneOf(Add.class, Sub.class)); - } else if (isDateTimeOrTemporal(lhsType) || isDateTimeOrTemporal(rhsType)) { - assertFalse(op.toString(), op.typeResolved().resolved()); - assertThat( - op.toString(), - op.typeResolved().message(), - equalTo( - String.format(Locale.ROOT, "[%s] has arguments with incompatible types [%s] and [%s]", op.symbol(), lhsType, rhsType) - ) - ); + if (isDateTime(lhsType) || isDateTime(rhsType)) { + String failureMessage = null; + if (isDateTime(lhsType) && isDateTime(rhsType) + || isNullOrTemporalAmount(lhsType) == false && isNullOrTemporalAmount(rhsType) == false) { + failureMessage = String.format( + Locale.ROOT, + "[%s] has arguments with incompatible types [%s] and [%s]", + op.symbol(), + lhsType, + rhsType + ); + } else if (op instanceof Sub && isDateTime(rhsType)) { + failureMessage = String.format( + Locale.ROOT, + "[%s] arguments are in unsupported order: cannot subtract a [DATETIME] value [%s] from a [%s] amount [%s]", + op.symbol(), + op.right().sourceText(), + lhsType, + op.left().sourceText() + ); + } + assertTypeResolution(failureMessage, op, lhsType, rhsType); + } else if (isTemporalAmount(lhsType) || isTemporalAmount(rhsType)) { + String failureMessage = isNull(lhsType) || isNull(rhsType) || lhsType == rhsType + ? null + : String.format(Locale.ROOT, "[%s] has arguments with incompatible types [%s] and [%s]", op.symbol(), lhsType, rhsType); + assertTypeResolution(failureMessage, op, lhsType, rhsType); } else { super.validateType(op, lhsType, rhsType); } } + private void assertTypeResolution(String failureMessage, BinaryOperator op, DataType lhsType, DataType rhsType) { + if (failureMessage != null) { + assertFalse(op.toString(), op.typeResolved().resolved()); + assertThat(op.toString(), op.typeResolved().message(), equalTo(failureMessage)); + } else { + assertTrue(op.toString(), op.typeResolved().resolved()); + assertThat(op.toString(), op.dataType(), equalTo(expectedType(lhsType, rhsType))); + } + } + @Override protected DataType expectedType(DataType lhsType, DataType rhsType) { - return isDateTimeOrTemporal(lhsType) ? DataTypes.DATETIME : super.expectedType(lhsType, rhsType); + if (isDateTime(lhsType) || isDateTime(rhsType)) { + return DataTypes.DATETIME; + } else if (isNullOrTemporalAmount(lhsType) || isNullOrTemporalAmount(rhsType)) { + if (isNull(lhsType)) { + return rhsType; + } else if (isNull(rhsType)) { + return lhsType; + } else if (lhsType == rhsType) { + return lhsType; + } // else: UnsupportedOperationException + } + return super.expectedType(lhsType, rhsType); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddTests.java index 91f5a80076626..2280ad9a2b1fe 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddTests.java @@ -28,6 +28,7 @@ import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isDateTimeOrTemporal; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isTemporalAmount; import static org.elasticsearch.xpack.ql.type.DataTypes.isDateTime; +import static org.elasticsearch.xpack.ql.type.DataTypes.isNull; import static org.elasticsearch.xpack.ql.type.DateUtils.asDateTime; import static org.elasticsearch.xpack.ql.type.DateUtils.asMillis; import static org.elasticsearch.xpack.ql.util.NumericUtils.asLongUnsigned; @@ -83,16 +84,41 @@ public static Iterable parameters() { DataTypes.UNSIGNED_LONG, TestCaseSupplier.ulongCases(BigInteger.ZERO, BigInteger.valueOf(Long.MAX_VALUE)), TestCaseSupplier.ulongCases(BigInteger.ZERO, BigInteger.valueOf(Long.MAX_VALUE)), - List.of(), - false + List.of() + ) + ); + + // Datetime, Period/Duration Cases + + suppliers.addAll( + TestCaseSupplier.forBinaryNotCasting( + "No evaluator, the tests only trigger the folding code since Period is not representable", + "lhs", + "rhs", + (lhs, rhs) -> ((Period) lhs).plus((Period) rhs), + EsqlDataTypes.DATE_PERIOD, + TestCaseSupplier.datePeriodCases(), + TestCaseSupplier.datePeriodCases(), + List.of() + ) + ); + suppliers.addAll( + TestCaseSupplier.forBinaryNotCasting( + "No evaluator, the tests only trigger the folding code since Duration is not representable", + "lhs", + "rhs", + (lhs, rhs) -> ((Duration) lhs).plus((Duration) rhs), + EsqlDataTypes.TIME_DURATION, + TestCaseSupplier.timeDurationCases(), + TestCaseSupplier.timeDurationCases(), + List.of() ) ); - // AwaitsFix https://github.com/elastic/elasticsearch/issues/103085 - // After fixing that issue, please move this line to below where the date cases are generated + // Datetime tests are split in two, depending on their permissiveness of null-injection, which cannot happen "automatically" for + // Datetime + Period/Duration, since the expression will take the non-null arg's type. suppliers = anyNullIsNull(true, suppliers); - // Datetime Cases suppliers.addAll( TestCaseSupplier.forBinaryNotCasting( // TODO: There is an evaluator for Datetime + Period, so it should be tested. Similarly below. @@ -115,26 +141,12 @@ public static Iterable parameters() { DataTypes.DATETIME, TestCaseSupplier.dateCases(), TestCaseSupplier.datePeriodCases(), - List.of(), - true - ) - ); - suppliers.addAll( - TestCaseSupplier.forBinaryNotCasting( - "No evaluator, the tests only trigger the folding code since Period is not representable", - "lhs", - "rhs", - (lhs, rhs) -> ((Period) lhs).plus((Period) rhs), - EsqlDataTypes.DATE_PERIOD, - TestCaseSupplier.datePeriodCases(), - TestCaseSupplier.datePeriodCases(), - List.of(), - false + List.of() ) ); suppliers.addAll( TestCaseSupplier.forBinaryNotCasting( - // TODO: There is an evaluator for Datetime + Duration, so it should be tested. Similarly below. + // TODO: There is an evaluator for Datetime + Duration, so it should be tested. Similarly above. "No evaluator, the tests only trigger the folding code since Duration is not representable", "lhs", "rhs", @@ -154,23 +166,33 @@ public static Iterable parameters() { DataTypes.DATETIME, TestCaseSupplier.dateCases(), TestCaseSupplier.timeDurationCases(), - List.of(), - true - ) - ); - suppliers.addAll( - TestCaseSupplier.forBinaryNotCasting( - "No evaluator, the tests only trigger the folding code since Duration is not representable", - "lhs", - "rhs", - (lhs, rhs) -> ((Duration) lhs).plus((Duration) rhs), - EsqlDataTypes.TIME_DURATION, - TestCaseSupplier.timeDurationCases(), - TestCaseSupplier.timeDurationCases(), - List.of(), - false + List.of() ) ); + suppliers.addAll(TestCaseSupplier.dateCases().stream().mapMulti((tds, consumer) -> { + consumer.accept( + new TestCaseSupplier( + List.of(DataTypes.DATETIME, DataTypes.NULL), + () -> new TestCaseSupplier.TestCase( + List.of(tds.get(), TestCaseSupplier.TypedData.NULL), + "LiteralsEvaluator[lit=null]", + DataTypes.DATETIME, + nullValue() + ) + ) + ); + consumer.accept( + new TestCaseSupplier( + List.of(DataTypes.NULL, DataTypes.DATETIME), + () -> new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.NULL, tds.get()), + "LiteralsEvaluator[lit=null]", + DataTypes.DATETIME, + nullValue() + ) + ) + ); + }).toList()); // Cases that should generate warnings suppliers.addAll(List.of(new TestCaseSupplier("MV", () -> { @@ -196,7 +218,11 @@ public static Iterable parameters() { @Override protected boolean supportsTypes(DataType lhsType, DataType rhsType) { if (isDateTimeOrTemporal(lhsType) || isDateTimeOrTemporal(rhsType)) { - return isDateTime(lhsType) && isTemporalAmount(rhsType) || isTemporalAmount(lhsType) && isDateTime(rhsType); + return isNull(lhsType) + || isNull(rhsType) + || isDateTime(lhsType) && isTemporalAmount(rhsType) + || isTemporalAmount(lhsType) && isDateTime(rhsType) + || isTemporalAmount(lhsType) && isTemporalAmount(rhsType) && lhsType == rhsType; } return super.supportsTypes(lhsType, rhsType); } @@ -232,4 +258,14 @@ protected long expectedUnsignedLongValue(long lhs, long rhs) { protected long expectedValue(long datetime, TemporalAmount temporalAmount) { return asMillis(asDateTime(datetime).plus(temporalAmount)); } + + @Override + protected Period expectedValue(Period lhs, Period rhs) { + return lhs.plus(rhs); + } + + @Override + protected Duration expectedValue(Duration lhs, Duration rhs) { + return lhs.plus(rhs); + } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubTests.java index db924d0d68c53..b2f54e4d2400c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubTests.java @@ -27,6 +27,7 @@ import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isDateTimeOrTemporal; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isTemporalAmount; import static org.elasticsearch.xpack.ql.type.DataTypes.isDateTime; +import static org.elasticsearch.xpack.ql.type.DataTypes.isNull; import static org.elasticsearch.xpack.ql.type.DateUtils.asDateTime; import static org.elasticsearch.xpack.ql.type.DateUtils.asMillis; import static org.elasticsearch.xpack.ql.util.NumericUtils.asLongUnsigned; @@ -162,9 +163,13 @@ public static Iterable parameters() { @Override protected boolean supportsTypes(DataType lhsType, DataType rhsType) { - return isDateTimeOrTemporal(lhsType) || isDateTimeOrTemporal(rhsType) - ? isDateTime(lhsType) && isTemporalAmount(rhsType) - : super.supportsTypes(lhsType, rhsType); + if (isDateTimeOrTemporal(lhsType) || isDateTimeOrTemporal(rhsType)) { + return isNull(lhsType) + || isNull(rhsType) + || isDateTime(lhsType) && isTemporalAmount(rhsType) + || isTemporalAmount(lhsType) && isTemporalAmount(rhsType) && lhsType == rhsType; + } + return super.supportsTypes(lhsType, rhsType); } @Override @@ -198,4 +203,14 @@ protected long expectedUnsignedLongValue(long lhs, long rhs) { protected long expectedValue(long datetime, TemporalAmount temporalAmount) { return asMillis(asDateTime(datetime).minus(temporalAmount)); } + + @Override + protected Period expectedValue(Period lhs, Period rhs) { + return lhs.minus(rhs); + } + + @Override + protected Duration expectedValue(Duration lhs, Duration rhs) { + return lhs.minus(rhs); + } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/AbstractBinaryComparisonTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/AbstractBinaryComparisonTestCase.java index 2348c32f58687..37ab820146bf4 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/AbstractBinaryComparisonTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/AbstractBinaryComparisonTestCase.java @@ -21,6 +21,7 @@ import java.util.List; import java.util.Locale; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isSpatial; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; @@ -68,7 +69,8 @@ protected Matcher resultsMatcher(List typedD @Override protected final boolean supportsType(DataType type) { - if (type == DataTypes.BOOLEAN) { + // Boolean and Spatial types do not support inequality operators + if (type == DataTypes.BOOLEAN || isSpatial(type)) { return isEquality(); } return EsqlDataTypes.isRepresentable(type); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatTests.java index 9430e984039fe..bbe32350a0465 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatTests.java @@ -8,14 +8,16 @@ package org.elasticsearch.xpack.esql.formatter; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.compute.data.BytesRefBlock; -import org.elasticsearch.compute.data.IntArrayVector; -import org.elasticsearch.compute.data.LongArrayVector; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.BytesRefArray; +import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.geometry.Point; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xpack.esql.TestBlockFactory; import org.elasticsearch.xpack.esql.action.ColumnInfo; import org.elasticsearch.xpack.esql.action.EsqlQueryResponse; import org.elasticsearch.xpack.ql.util.StringUtils; @@ -35,10 +37,13 @@ import static org.elasticsearch.xpack.esql.formatter.TextFormat.CSV; import static org.elasticsearch.xpack.esql.formatter.TextFormat.PLAIN_TEXT; import static org.elasticsearch.xpack.esql.formatter.TextFormat.TSV; +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.CARTESIAN; import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.GEO; public class TextFormatTests extends ESTestCase { + static final BlockFactory blockFactory = TestBlockFactory.getNonBreakingInstance(); + public void testCsvContentType() { assertEquals("text/csv; charset=utf-8; header=present", CSV.contentType(req())); } @@ -118,17 +123,17 @@ public void testTsvFormatWithEmptyData() { public void testCsvFormatWithRegularData() { String text = format(CSV, req(), regularData()); assertEquals(""" - string,number,location\r - Along The River Bank,708,POINT (12.0000000 56.0000000)\r - Mind Train,280,POINT (-97.0000000 26.0000000)\r + string,number,location,location2\r + Along The River Bank,708,POINT (12.0 56.0),POINT (1234.0 5678.0)\r + Mind Train,280,POINT (-97.0 26.0),POINT (-9753.0 2611.0)\r """, text); } public void testCsvFormatNoHeaderWithRegularData() { String text = format(CSV, reqWithParam("header", "absent"), regularData()); assertEquals(""" - Along The River Bank,708,POINT (12.0000000 56.0000000)\r - Mind Train,280,POINT (-97.0000000 26.0000000)\r + Along The River Bank,708,POINT (12.0 56.0),POINT (1234.0 5678.0)\r + Mind Train,280,POINT (-97.0 26.0),POINT (-9753.0 2611.0)\r """, text); } @@ -140,12 +145,15 @@ public void testCsvFormatWithCustomDelimiterRegularData() { "string", "number", "location", + "location2", "Along The River Bank", "708", - "POINT (12.0000000 56.0000000)", + "POINT (12.0 56.0)", + "POINT (1234.0 5678.0)", "Mind Train", "280", - "POINT (-97.0000000 26.0000000)" + "POINT (-97.0 26.0)", + "POINT (-9753.0 2611.0)" ); List expectedTerms = terms.stream() .map(x -> x.contains(String.valueOf(delim)) ? '"' + x + '"' : x) @@ -157,6 +165,8 @@ public void testCsvFormatWithCustomDelimiterRegularData() { sb.append(expectedTerms.remove(0)); sb.append(delim); sb.append(expectedTerms.remove(0)); + sb.append(delim); + sb.append(expectedTerms.remove(0)); sb.append("\r\n"); } while (expectedTerms.size() > 0); assertEquals(sb.toString(), text); @@ -165,9 +175,9 @@ public void testCsvFormatWithCustomDelimiterRegularData() { public void testTsvFormatWithRegularData() { String text = format(TSV, req(), regularData()); assertEquals(""" - string\tnumber\tlocation - Along The River Bank\t708\tPOINT (12.0000000 56.0000000) - Mind Train\t280\tPOINT (-97.0000000 26.0000000) + string\tnumber\tlocation\tlocation2 + Along The River Bank\t708\tPOINT (12.0 56.0)\tPOINT (1234.0 5678.0) + Mind Train\t280\tPOINT (-97.0 26.0)\tPOINT (-9753.0 2611.0) """, text); } @@ -231,35 +241,44 @@ public void testPlainTextEmptyCursorWithColumns() { public void testPlainTextEmptyCursorWithoutColumns() { assertEquals( StringUtils.EMPTY, - getTextBodyContent(PLAIN_TEXT.format(req(), new EsqlQueryResponse(emptyList(), emptyList(), null, false))) + getTextBodyContent(PLAIN_TEXT.format(req(), new EsqlQueryResponse(emptyList(), emptyList(), null, false, false))) ); } private static EsqlQueryResponse emptyData() { - return new EsqlQueryResponse(singletonList(new ColumnInfo("name", "keyword")), emptyList(), null, false); + return new EsqlQueryResponse(singletonList(new ColumnInfo("name", "keyword")), emptyList(), null, false, false); } private static EsqlQueryResponse regularData() { + BlockFactory blockFactory = TestBlockFactory.getNonBreakingInstance(); // headers List headers = asList( new ColumnInfo("string", "keyword"), new ColumnInfo("number", "integer"), - new ColumnInfo("location", "geo_point") + new ColumnInfo("location", "geo_point"), + new ColumnInfo("location2", "cartesian_point") ); + BytesRefArray geoPoints = new BytesRefArray(2, BigArrays.NON_RECYCLING_INSTANCE); + geoPoints.append(GEO.pointAsWKB(new Point(12, 56))); + geoPoints.append(GEO.pointAsWKB(new Point(-97, 26))); // values List values = List.of( new Page( - BytesRefBlock.newBlockBuilder(2) + blockFactory.newBytesRefBlockBuilder(2) .appendBytesRef(new BytesRef("Along The River Bank")) .appendBytesRef(new BytesRef("Mind Train")) .build(), - new IntArrayVector(new int[] { 11 * 60 + 48, 4 * 60 + 40 }, 2).asBlock(), - new LongArrayVector(new long[] { GEO.pointAsLong(12, 56), GEO.pointAsLong(-97, 26) }, 2).asBlock() + blockFactory.newIntArrayVector(new int[] { 11 * 60 + 48, 4 * 60 + 40 }, 2).asBlock(), + blockFactory.newBytesRefArrayVector(geoPoints, 2).asBlock(), + blockFactory.newBytesRefBlockBuilder(2) + .appendBytesRef(CARTESIAN.pointAsWKB(new Point(1234, 5678))) + .appendBytesRef(CARTESIAN.pointAsWKB(new Point(-9753, 2611))) + .build() ) ); - return new EsqlQueryResponse(headers, values, null, false); + return new EsqlQueryResponse(headers, values, null, false, false); } private static EsqlQueryResponse escapedData() { @@ -269,15 +288,18 @@ private static EsqlQueryResponse escapedData() { // values List values = List.of( new Page( - BytesRefBlock.newBlockBuilder(2).appendBytesRef(new BytesRef("normal")).appendBytesRef(new BytesRef("commas")).build(), - BytesRefBlock.newBlockBuilder(2) + blockFactory.newBytesRefBlockBuilder(2) + .appendBytesRef(new BytesRef("normal")) + .appendBytesRef(new BytesRef("commas")) + .build(), + blockFactory.newBytesRefBlockBuilder(2) .appendBytesRef(new BytesRef("\"quo\"ted\",\n")) .appendBytesRef(new BytesRef("a,b,c,\n,d,e,\t\n")) .build() ) ); - return new EsqlQueryResponse(headers, values, null, false); + return new EsqlQueryResponse(headers, values, null, false, false); } private static RestRequest req() { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatterTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatterTests.java index 22e532341d30b..b8800713eca89 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatterTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatterTests.java @@ -9,12 +9,13 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Strings; -import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.BytesRefBlock; -import org.elasticsearch.compute.data.DoubleArrayVector; -import org.elasticsearch.compute.data.LongArrayVector; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.BytesRefArray; +import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.geometry.Point; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.TestBlockFactory; import org.elasticsearch.xpack.esql.action.ColumnInfo; import org.elasticsearch.xpack.esql.action.EsqlQueryResponse; @@ -23,10 +24,14 @@ import static org.elasticsearch.rest.RestResponseUtils.getTextBodyContent; import static org.elasticsearch.xpack.ql.util.DateUtils.UTC_DATE_TIME_FORMATTER; +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.CARTESIAN; import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.GEO; import static org.hamcrest.Matchers.arrayWithSize; public class TextFormatterTests extends ESTestCase { + + static BlockFactory blockFactory = TestBlockFactory.getNonBreakingInstance(); + private final List columns = Arrays.asList( new ColumnInfo("foo", "keyword"), new ColumnInfo("bar", "long"), @@ -36,32 +41,45 @@ public class TextFormatterTests extends ESTestCase { new ColumnInfo("baz", "keyword"), new ColumnInfo("date", "date"), new ColumnInfo("location", "geo_point"), + new ColumnInfo("location2", "cartesian_point"), new ColumnInfo("null_field2", "keyword") ); + + private static final BytesRefArray geoPoints = new BytesRefArray(2, BigArrays.NON_RECYCLING_INSTANCE); + static { + geoPoints.append(GEO.pointAsWKB(new Point(12, 56))); + geoPoints.append(GEO.pointAsWKB(new Point(-97, 26))); + } + EsqlQueryResponse esqlResponse = new EsqlQueryResponse( columns, List.of( new Page( - BytesRefBlock.newBlockBuilder(2) + blockFactory.newBytesRefBlockBuilder(2) .appendBytesRef(new BytesRef("15charwidedata!")) .appendBytesRef(new BytesRef("dog")) .build(), - new LongArrayVector(new long[] { 1, 2 }, 2).asBlock(), - new DoubleArrayVector(new double[] { 6.888, 123124.888 }, 2).asBlock(), - Block.constantNullBlock(2), - new DoubleArrayVector(new double[] { 12, 9912 }, 2).asBlock(), - BytesRefBlock.newBlockBuilder(2).appendBytesRef(new BytesRef("rabbit")).appendBytesRef(new BytesRef("goat")).build(), - new LongArrayVector( + blockFactory.newLongArrayVector(new long[] { 1, 2 }, 2).asBlock(), + blockFactory.newDoubleArrayVector(new double[] { 6.888, 123124.888 }, 2).asBlock(), + blockFactory.newConstantNullBlock(2), + blockFactory.newDoubleArrayVector(new double[] { 12, 9912 }, 2).asBlock(), + blockFactory.newBytesRefBlockBuilder(2).appendBytesRef(new BytesRef("rabbit")).appendBytesRef(new BytesRef("goat")).build(), + blockFactory.newLongArrayVector( new long[] { UTC_DATE_TIME_FORMATTER.parseMillis("1953-09-02T00:00:00.000Z"), UTC_DATE_TIME_FORMATTER.parseMillis("2000-03-15T21:34:37.443Z") }, 2 ).asBlock(), - new LongArrayVector(new long[] { GEO.pointAsLong(12, 56), GEO.pointAsLong(-97, 26) }, 2).asBlock(), - Block.constantNullBlock(2) + blockFactory.newBytesRefArrayVector(geoPoints, 2).asBlock(), + blockFactory.newBytesRefBlockBuilder(2) + .appendBytesRef(CARTESIAN.pointAsWKB(new Point(1234, 5678))) + .appendBytesRef(CARTESIAN.pointAsWKB(new Point(-9753, 2611))) + .build(), + blockFactory.newConstantNullBlock(2) ) ), null, + randomBoolean(), randomBoolean() ); @@ -79,22 +97,22 @@ public void testFormatWithHeader() { assertThat(result, arrayWithSize(4)); assertEquals( " foo | bar |15charwidename!| null_field1 |superduperwidename!!!| baz |" - + " date | location | null_field2 ", + + " date | location | location2 | null_field2 ", result[0] ); assertEquals( - "---------------+---------------+---------------+---------------+---------------------+---------------+" - + "------------------------+------------------------------+---------------", + "---------------+---------------+---------------+---------------+---------------------+---------------+-------" + + "-----------------+------------------+----------------------+---------------", result[1] ); assertEquals( "15charwidedata!|1 |6.888 |null |12.0 |rabbit |" - + "1953-09-02T00:00:00.000Z|POINT (12.0000000 56.0000000) |null ", + + "1953-09-02T00:00:00.000Z|POINT (12.0 56.0) |POINT (1234.0 5678.0) |null ", result[2] ); assertEquals( "dog |2 |123124.888 |null |9912.0 |goat |" - + "2000-03-15T21:34:37.443Z|POINT (-97.0000000 26.0000000)|null ", + + "2000-03-15T21:34:37.443Z|POINT (-97.0 26.0)|POINT (-9753.0 2611.0)|null ", result[3] ); } @@ -108,23 +126,34 @@ public void testFormatWithoutHeader() { columns, List.of( new Page( - BytesRefBlock.newBlockBuilder(2).appendBytesRef(new BytesRef("doggie")).appendBytesRef(new BytesRef("dog")).build(), - new LongArrayVector(new long[] { 4, 2 }, 2).asBlock(), - new DoubleArrayVector(new double[] { 1, 123124.888 }, 2).asBlock(), - Block.constantNullBlock(2), - new DoubleArrayVector(new double[] { 77.0, 9912.0 }, 2).asBlock(), - BytesRefBlock.newBlockBuilder(2).appendBytesRef(new BytesRef("wombat")).appendBytesRef(new BytesRef("goat")).build(), - new LongArrayVector( + blockFactory.newBytesRefBlockBuilder(2) + .appendBytesRef(new BytesRef("doggie")) + .appendBytesRef(new BytesRef("dog")) + .build(), + blockFactory.newLongArrayVector(new long[] { 4, 2 }, 2).asBlock(), + blockFactory.newDoubleArrayVector(new double[] { 1, 123124.888 }, 2).asBlock(), + blockFactory.newConstantNullBlock(2), + blockFactory.newDoubleArrayVector(new double[] { 77.0, 9912.0 }, 2).asBlock(), + blockFactory.newBytesRefBlockBuilder(2) + .appendBytesRef(new BytesRef("wombat")) + .appendBytesRef(new BytesRef("goat")) + .build(), + blockFactory.newLongArrayVector( new long[] { UTC_DATE_TIME_FORMATTER.parseMillis("1955-01-21T01:02:03.342Z"), UTC_DATE_TIME_FORMATTER.parseMillis("2231-12-31T23:59:59.999Z") }, 2 ).asBlock(), - new LongArrayVector(new long[] { GEO.pointAsLong(12, 56), GEO.pointAsLong(-97, 26) }, 2).asBlock(), - Block.constantNullBlock(2) + blockFactory.newBytesRefArrayVector(geoPoints, 2).asBlock(), + blockFactory.newBytesRefBlockBuilder(2) + .appendBytesRef(CARTESIAN.pointAsWKB(new Point(1234, 5678))) + .appendBytesRef(CARTESIAN.pointAsWKB(new Point(-9753, 2611))) + .build(), + blockFactory.newConstantNullBlock(2) ) ), null, + randomBoolean(), randomBoolean() ); @@ -132,12 +161,12 @@ public void testFormatWithoutHeader() { assertThat(result, arrayWithSize(2)); assertEquals( "doggie |4 |1.0 |null |77.0 |wombat |" - + "1955-01-21T01:02:03.342Z|POINT (12.0000000 56.0000000) |null ", + + "1955-01-21T01:02:03.342Z|POINT (12.0 56.0) |POINT (1234.0 5678.0) |null ", result[0] ); assertEquals( "dog |2 |123124.888 |null |9912.0 |goat |" - + "2231-12-31T23:59:59.999Z|POINT (-97.0000000 26.0000000)|null ", + + "2231-12-31T23:59:59.999Z|POINT (-97.0 26.0)|POINT (-9753.0 2611.0)|null ", result[1] ); } @@ -157,13 +186,14 @@ public void testVeryLongPadding() { List.of(new ColumnInfo("foo", "keyword")), List.of( new Page( - BytesRefBlock.newBlockBuilder(2) + blockFactory.newBytesRefBlockBuilder(2) .appendBytesRef(new BytesRef(smallFieldContent)) .appendBytesRef(new BytesRef(largeFieldContent)) .build() ) ), null, + randomBoolean(), randomBoolean() ) ).format(false) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutputTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutputTests.java new file mode 100644 index 0000000000000..7f683e8f8003b --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutputTests.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.io.stream; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.TransportVersionUtils; + +import static org.hamcrest.Matchers.equalTo; + +public class PlanStreamOutputTests extends ESTestCase { + + public void testTransportVersion() { + BytesStreamOutput out = new BytesStreamOutput(); + TransportVersion v1 = TransportVersionUtils.randomCompatibleVersion(random()); + out.setTransportVersion(v1); + PlanStreamOutput planOut = new PlanStreamOutput(out, PlanNameRegistry.INSTANCE); + assertThat(planOut.getTransportVersion(), equalTo(v1)); + TransportVersion v2 = TransportVersionUtils.randomCompatibleVersion(random()); + planOut.setTransportVersion(v2); + assertThat(planOut.getTransportVersion(), equalTo(v2)); + assertThat(out.getTransportVersion(), equalTo(v2)); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java index 12b8185cbec5d..5887d61c652bb 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java @@ -258,9 +258,11 @@ public void testCountOneFieldWithFilter() { assertThat(esStatsQuery.limit(), is(nullValue())); assertThat(Expressions.names(esStatsQuery.output()), contains("count", "seen")); var stat = as(esStatsQuery.stats().get(0), Stat.class); - assertThat(stat.query(), is(QueryBuilders.existsQuery("salary"))); - var source = ((SingleValueQuery.Builder) esStatsQuery.query()).source(); - var expected = wrapWithSingleQuery(QueryBuilders.rangeQuery("salary").gt(1000), "salary", source); + Source source = new Source(2, 8, "salary > 1000"); + var exists = QueryBuilders.existsQuery("salary"); + assertThat(stat.query(), is(exists)); + var range = wrapWithSingleQuery(QueryBuilders.rangeQuery("salary").gt(1000), "salary", source); + var expected = QueryBuilders.boolQuery().must(range).must(exists); assertThat(expected.toString(), is(esStatsQuery.query().toString())); } @@ -381,6 +383,28 @@ public boolean exists(String field) { assertThat(Expressions.names(localSource.output()), contains("count", "seen")); } + public void testIsNotNullPushdownFilter() { + var plan = plan("from test | where emp_no is not null"); + + var limit = as(plan, LimitExec.class); + var exchange = as(limit.child(), ExchangeExec.class); + var query = as(exchange.child(), EsQueryExec.class); + assertThat(query.limit().fold(), is(500)); + var expected = QueryBuilders.existsQuery("emp_no"); + assertThat(query.query().toString(), is(expected.toString())); + } + + public void testIsNullPushdownFilter() { + var plan = plan("from test | where emp_no is null"); + + var limit = as(plan, LimitExec.class); + var exchange = as(limit.child(), ExchangeExec.class); + var query = as(exchange.child(), EsQueryExec.class); + assertThat(query.limit().fold(), is(500)); + var expected = QueryBuilders.boolQuery().mustNot(QueryBuilders.existsQuery("emp_no")); + assertThat(query.query().toString(), is(expected.toString())); + } + private QueryBuilder wrapWithSingleQuery(QueryBuilder inner, String fieldName, Source source) { return FilterTests.singleValueQuery(inner, fieldName, source); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index 352dccc046588..6320294d7ee54 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -9,9 +9,9 @@ import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.compute.aggregation.QuantileStates; -import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.EsqlTestUtils; +import org.elasticsearch.xpack.esql.TestBlockFactory; import org.elasticsearch.xpack.esql.analysis.Analyzer; import org.elasticsearch.xpack.esql.analysis.AnalyzerContext; import org.elasticsearch.xpack.esql.analysis.AnalyzerTestUtils; @@ -261,6 +261,62 @@ public void testCombineProjectionWithPruning() { var from = as(agg.child(), EsRelation.class); } + /** + * Expects + * Limit[500[INTEGER]] + * \_Aggregate[[f{r}#7],[SUM(emp_no{f}#15) AS s, COUNT(first_name{f}#16) AS c, first_name{f}#16 AS f]] + * \_EsRelation[test][_meta_field{f}#21, emp_no{f}#15, first_name{f}#16, ..] + */ + public void testCombineProjectionWithAggregationFirstAndAliasedGroupingUsedInAgg() { + var plan = plan(""" + from test + | rename emp_no as e, first_name as f + | stats s = sum(e), c = count(f) by f + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + var aggs = agg.aggregates(); + assertThat(Expressions.names(aggs), contains("s", "c", "f")); + Alias as = as(aggs.get(0), Alias.class); + var sum = as(as.child(), Sum.class); + assertThat(Expressions.name(sum.field()), is("emp_no")); + as = as(aggs.get(1), Alias.class); + var count = as(as.child(), Count.class); + assertThat(Expressions.name(count.field()), is("first_name")); + + as = as(aggs.get(2), Alias.class); + assertThat(Expressions.name(as.child()), is("first_name")); + + assertThat(Expressions.names(agg.groupings()), contains("f")); + } + + /** + * Expects + * Limit[500[INTEGER]] + * \_Aggregate[[f{r}#7],[SUM(emp_no{f}#15) AS s, first_name{f}#16 AS f]] + * \_EsRelation[test][_meta_field{f}#21, emp_no{f}#15, first_name{f}#16, ..] + */ + public void testCombineProjectionWithAggregationFirstAndAliasedGroupingUnused() { + var plan = plan(""" + from test + | rename emp_no as e, first_name as f, last_name as l + | stats s = sum(e) by f + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + var aggs = agg.aggregates(); + assertThat(Expressions.names(aggs), contains("s", "f")); + Alias as = as(aggs.get(0), Alias.class); + var aggFunc = as(as.child(), AggregateFunction.class); + assertThat(Expressions.name(aggFunc.field()), is("emp_no")); + as = as(aggs.get(1), Alias.class); + assertThat(Expressions.name(as.child()), is("first_name")); + + assertThat(Expressions.names(agg.groupings()), contains("f")); + } + /** * Expects * EsqlProject[[x{r}#3, y{r}#6]] @@ -301,7 +357,7 @@ public void testMultipleCombineLimits() { var limitWithMinimum = randomIntBetween(0, numberOfLimits - 1); var fa = getFieldAttribute("a", INTEGER); - var relation = localSource(BlockFactory.getNonBreakingInstance(), singletonList(fa), singletonList(1)); + var relation = localSource(TestBlockFactory.getNonBreakingInstance(), singletonList(fa), singletonList(1)); LogicalPlan plan = relation; for (int i = 0; i < numberOfLimits; i++) { @@ -2534,6 +2590,184 @@ private void aggFieldName(Expression exp, Class assertThat(name, is(fieldName)); } + /** + * Expects + * Limit[500[INTEGER]] + * \_Aggregate[[],[SUM(emp_no{f}#4) AS sum(emp_no)]] + * \_EsRelation[test][_meta_field{f}#10, emp_no{f}#4, first_name{f}#5, ge..] + */ + public void testIsNotNullConstraintForStatsWithoutGrouping() { + var plan = optimizedPlan(""" + from test + | stats sum(emp_no) + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(Expressions.names(agg.groupings()), is(empty())); + assertThat(Expressions.names(agg.aggregates()), contains("sum(emp_no)")); + var from = as(agg.child(), EsRelation.class); + } + + public void testIsNotNullConstraintForStatsWithGrouping() { + var plan = optimizedPlan(""" + from test + | stats sum(emp_no) by salary + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(Expressions.names(agg.groupings()), contains("salary")); + assertThat(Expressions.names(agg.aggregates()), contains("sum(emp_no)", "salary")); + var from = as(agg.child(), EsRelation.class); + } + + /** + * Expected + * Limit[500[INTEGER]] + * \_Aggregate[[salary{f}#1185],[SUM(salary{f}#1185) AS sum(salary), salary{f}#1185]] + * \_EsRelation[test][_meta_field{f}#1186, emp_no{f}#1180, first_name{f}#..] + */ + public void testIsNotNullConstraintForStatsWithAndOnGrouping() { + var plan = optimizedPlan(""" + from test + | stats sum(salary) by salary + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(Expressions.names(agg.groupings()), contains("salary")); + assertThat(Expressions.names(agg.aggregates()), contains("sum(salary)", "salary")); + var from = as(agg.child(), EsRelation.class); + } + + /** + * Expects + * Limit[500[INTEGER]] + * \_Aggregate[[x{r}#4],[SUM(salary{f}#13) AS sum(salary), salary{f}#13 AS x]] + * \_EsRelation[test][_meta_field{f}#14, emp_no{f}#8, first_name{f}#9, ge..] + */ + public void testIsNotNullConstraintForStatsWithAndOnGroupingAlias() { + var plan = optimizedPlan(""" + from test + | eval x = salary + | stats sum(salary) by x + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(Expressions.names(agg.groupings()), contains("x")); + assertThat(Expressions.names(agg.aggregates()), contains("sum(salary)", "x")); + var from = as(agg.child(), EsRelation.class); + } + + /** + * Expects + * Limit[500[INTEGER]] + * \_Aggregate[[salary{f}#13],[SUM(emp_no{f}#8) AS sum(x), salary{f}#13]] + * \_EsRelation[test][_meta_field{f}#14, emp_no{f}#8, first_name{f}#9, ge..] + */ + public void testIsNotNullConstraintSkippedForStatsWithAlias() { + var plan = optimizedPlan(""" + from test + | eval x = emp_no + | stats sum(x) by salary + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(Expressions.names(agg.groupings()), contains("salary")); + assertThat(Expressions.names(agg.aggregates()), contains("sum(x)", "salary")); + + // non null filter for stats + var from = as(agg.child(), EsRelation.class); + } + + /** + * Expects + * Limit[500[INTEGER]] + * \_Aggregate[[],[SUM(emp_no{f}#8) AS a, MIN(salary{f}#13) AS b]] + * \_EsRelation[test][_meta_field{f}#14, emp_no{f}#8, first_name{f}#9, ge..] + */ + public void testIsNotNullConstraintForStatsWithMultiAggWithoutGrouping() { + var plan = optimizedPlan(""" + from test + | stats a = sum(emp_no), b = min(salary) + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(Expressions.names(agg.aggregates()), contains("a", "b")); + + var from = as(agg.child(), EsRelation.class); + } + + /** + * Expects + * Limit[500[INTEGER]] + * \_Aggregate[[gender{f}#11],[SUM(emp_no{f}#9) AS a, MIN(salary{f}#14) AS b, gender{f}#11]] + * \_EsRelation[test][_meta_field{f}#15, emp_no{f}#9, first_name{f}#10, g..] + */ + public void testIsNotNullConstraintForStatsWithMultiAggWithGrouping() { + var plan = optimizedPlan(""" + from test + | stats a = sum(emp_no), b = min(salary) by gender + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(Expressions.names(agg.aggregates()), contains("a", "b", "gender")); + + var from = as(agg.child(), EsRelation.class); + } + + /** + * Expects + * Limit[500[INTEGER]] + * \_Aggregate[[emp_no{f}#9],[SUM(emp_no{f}#9) AS a, MIN(salary{f}#14) AS b, emp_no{f}#9]] + * \_EsRelation[test][_meta_field{f}#15, emp_no{f}#9, first_name{f}#10, g..] + */ + public void testIsNotNullConstraintForStatsWithMultiAggWithAndOnGrouping() { + var plan = optimizedPlan(""" + from test + | stats a = sum(emp_no), b = min(salary) by emp_no + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(Expressions.names(agg.aggregates()), contains("a", "b", "emp_no")); + + var from = as(agg.child(), EsRelation.class); + } + + /** + * Expects + * Limit[500[INTEGER]] + * \_Aggregate[[w{r}#14, g{r}#16],[COUNT(b{r}#24) AS c, w{r}#14, gender{f}#32 AS g]] + * \_Eval[[emp_no{f}#30 / 10[INTEGER] AS x, x{r}#4 + salary{f}#35 AS y, y{r}#8 / 4[INTEGER] AS z, z{r}#11 * 2[INTEGER] + + * 3[INTEGER] AS w, salary{f}#35 + 4[INTEGER] / 2[INTEGER] AS a, a{r}#21 + 3[INTEGER] AS b]] + * \_EsRelation[test][_meta_field{f}#36, emp_no{f}#30, first_name{f}#31, ..] + */ + public void testIsNotNullConstraintForAliasedExpressions() { + var plan = optimizedPlan(""" + from test + | eval x = emp_no / 10 + | eval y = x + salary + | eval z = y / 4 + | eval w = z * 2 + 3 + | rename gender as g, salary as s + | eval a = (s + 4) / 2 + | eval b = a + 3 + | stats c = count(b) by w, g + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(Expressions.names(agg.aggregates()), contains("c", "w", "g")); + var eval = as(agg.child(), Eval.class); + var from = as(eval.child(), EsRelation.class); + } + private LogicalPlan optimizedPlan(String query) { return plan(query); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java index 1f2bde2526fab..c05e11d8d8a13 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java @@ -84,6 +84,8 @@ import static java.util.Arrays.asList; import static org.elasticsearch.core.Tuple.tuple; +import static org.elasticsearch.index.query.QueryBuilders.boolQuery; +import static org.elasticsearch.index.query.QueryBuilders.existsQuery; import static org.elasticsearch.xpack.esql.EsqlTestUtils.TEST_VERIFIER; import static org.elasticsearch.xpack.esql.EsqlTestUtils.as; import static org.elasticsearch.xpack.esql.EsqlTestUtils.configuration; @@ -104,7 +106,7 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; -//@TestLogging(value = "org.elasticsearch.xpack.esql:TRACE", reason = "debug") +// @TestLogging(value = "org.elasticsearch.xpack.esql:TRACE", reason = "debug") public class PhysicalPlanOptimizerTests extends ESTestCase { private static final String PARAM_FORMATTING = "%1$s"; @@ -510,6 +512,16 @@ public void testExtractGroupingFieldsIfAggdWithEval() { assertThat(source.estimatedRowSize(), equalTo(Integer.BYTES + KEYWORD_EST)); } + /** + * Expects + * EvalExec[[agg_emp{r}#4 + 7[INTEGER] AS x]] + * \_LimitExec[500[INTEGER]] + * \_AggregateExec[[],[SUM(emp_no{f}#8) AS agg_emp],FINAL,16] + * \_ExchangeExec[[sum{r}#18, seen{r}#19],true] + * \_AggregateExec[[],[SUM(emp_no{f}#8) AS agg_emp],PARTIAL,8] + * \_FieldExtractExec[emp_no{f}#8] + * \_EsQueryExec[test], query[{"exists":{"field":"emp_no","boost":1.0}}][_doc{f}#34], limit[], sort[] estimatedRowSize[8] + */ public void testQueryWithAggregation() { var plan = physicalPlan(""" from test @@ -526,8 +538,22 @@ public void testQueryWithAggregation() { var extract = as(aggregate.child(), FieldExtractExec.class); assertThat(names(extract.attributesToExtract()), contains("emp_no")); assertThat(aggregate.estimatedRowSize(), equalTo(Long.BYTES)); + + var query = source(extract.child()); + assertThat(query.estimatedRowSize(), equalTo(Integer.BYTES * 2 /* for doc id, emp_no*/)); + assertThat(query.query(), is(existsQuery("emp_no"))); } + /** + * Expects + * EvalExec[[agg_emp{r}#4 + 7[INTEGER] AS x]] + * \_LimitExec[500[INTEGER]] + * \_AggregateExec[[],[SUM(emp_no{f}#8) AS agg_emp],FINAL,16] + * \_ExchangeExec[[sum{r}#18, seen{r}#19],true] + * \_AggregateExec[[],[SUM(emp_no{f}#8) AS agg_emp],PARTIAL,8] + * \_FieldExtractExec[emp_no{f}#8] + * \_EsQueryExec[test], query[{"exists":{"field":"emp_no","boost":1.0}}][_doc{f}#34], limit[], sort[] estimatedRowSize[8] + */ public void testQueryWithAggAfterEval() { var plan = physicalPlan(""" from test @@ -543,10 +569,35 @@ public void testQueryWithAggAfterEval() { assertThat(agg.estimatedRowSize(), equalTo(Long.BYTES * 2)); var exchange = asRemoteExchange(agg.child()); var aggregate = as(exchange.child(), AggregateExec.class); - // sum is long a long, x isn't calculated until the agg above + // sum is long, x isn't calculated until the agg above assertThat(aggregate.estimatedRowSize(), equalTo(Long.BYTES)); var extract = as(aggregate.child(), FieldExtractExec.class); assertThat(names(extract.attributesToExtract()), contains("emp_no")); + + var query = source(extract.child()); + assertThat(query.estimatedRowSize(), equalTo(Integer.BYTES * 2 /* for doc id, emp_no*/)); + assertThat(query.query(), is(existsQuery("emp_no"))); + } + + public void testQueryForStatWithMultiAgg() { + var plan = physicalPlan(""" + from test + | stats agg_1 = sum(emp_no), agg_2 = min(salary) + """); + + var stats = statsWithIndexedFields("emp_no", "salary"); + var optimized = optimizedPlan(plan, stats); + var topLimit = as(optimized, LimitExec.class); + var agg = as(topLimit.child(), AggregateExec.class); + var exchange = asRemoteExchange(agg.child()); + var aggregate = as(exchange.child(), AggregateExec.class); + // sum is long, x isn't calculated until the agg above + var extract = as(aggregate.child(), FieldExtractExec.class); + assertThat(names(extract.attributesToExtract()), contains("emp_no", "salary")); + + var query = source(extract.child()); + assertThat(query.estimatedRowSize(), equalTo(Integer.BYTES * 3 /* for doc id, emp_no, salary*/)); + assertThat(query.query(), is(boolQuery().should(existsQuery("emp_no")).should(existsQuery("salary")))); } public void testQueryWithNull() { @@ -1337,8 +1388,9 @@ public void testPushDownLike() { QueryBuilder query = source.query(); assertNotNull(query); - assertEquals(WildcardQueryBuilder.class, query.getClass()); - WildcardQueryBuilder wildcard = ((WildcardQueryBuilder) query); + assertEquals(SingleValueQuery.Builder.class, query.getClass()); + assertThat(((SingleValueQuery.Builder) query).next(), instanceOf(WildcardQueryBuilder.class)); + WildcardQueryBuilder wildcard = ((WildcardQueryBuilder) ((SingleValueQuery.Builder) query).next()); assertEquals("first_name", wildcard.fieldName()); assertEquals("*foo*", wildcard.value()); } @@ -1402,8 +1454,9 @@ public void testPushDownRLike() { QueryBuilder query = source.query(); assertNotNull(query); - assertEquals(RegexpQueryBuilder.class, query.getClass()); - RegexpQueryBuilder wildcard = ((RegexpQueryBuilder) query); + assertEquals(SingleValueQuery.Builder.class, query.getClass()); + assertThat(((SingleValueQuery.Builder) query).next(), instanceOf(RegexpQueryBuilder.class)); + RegexpQueryBuilder wildcard = ((RegexpQueryBuilder) ((SingleValueQuery.Builder) query).next()); assertEquals("first_name", wildcard.fieldName()); assertEquals(".*foo.*", wildcard.value()); } @@ -1424,8 +1477,9 @@ public void testPushDownNotRLike() { QueryBuilder query = source.query(); assertNotNull(query); - assertThat(query, instanceOf(BoolQueryBuilder.class)); - var boolQuery = (BoolQueryBuilder) query; + assertThat(query, instanceOf(SingleValueQuery.Builder.class)); + assertThat(((SingleValueQuery.Builder) query).next(), instanceOf(BoolQueryBuilder.class)); + var boolQuery = (BoolQueryBuilder) ((SingleValueQuery.Builder) query).next(); List mustNot = boolQuery.mustNot(); assertThat(mustNot.size(), is(1)); assertThat(mustNot.get(0), instanceOf(RegexpQueryBuilder.class)); @@ -1892,6 +1946,110 @@ public boolean exists(String field) { assertThat(Expressions.names(localSourceExec.output()), contains("languages", "min", "seen")); } + /** + * Expects + * intermediate plan + * LimitExec[500[INTEGER]] + * \_AggregateExec[[],[COUNT(emp_no{f}#6) AS c],FINAL,null] + * \_ExchangeExec[[count{r}#16, seen{r}#17],true] + * \_FragmentExec[filter=null, estimatedRowSize=0, fragment=[ + * Aggregate[[],[COUNT(emp_no{f}#6) AS c]] + * \_Filter[emp_no{f}#6 > 10[INTEGER]] + * \_EsRelation[test][_meta_field{f}#12, emp_no{f}#6, first_name{f}#7, ge..]]] + * + * and final plan is + * LimitExec[500[INTEGER]] + * \_AggregateExec[[],[COUNT(emp_no{f}#6) AS c],FINAL,8] + * \_ExchangeExec[[count{r}#16, seen{r}#17],true] + * \_LocalSourceExec[[count{r}#16, seen{r}#17],[LongVectorBlock[vector=ConstantLongVector[positions=1, value=0]]]] + */ + public void testPartialAggFoldingOutput() { + var plan = physicalPlan(""" + from test + | where emp_no > 10 + | stats c = count(emp_no) + """); + + var stats = statsForMissingField("emp_no"); + var optimized = optimizedPlan(plan, stats); + + var limit = as(optimized, LimitExec.class); + var agg = as(limit.child(), AggregateExec.class); + var exchange = as(agg.child(), ExchangeExec.class); + assertThat(Expressions.names(exchange.output()), contains("count", "seen")); + var source = as(exchange.child(), LocalSourceExec.class); + assertThat(Expressions.names(source.output()), contains("count", "seen")); + } + + /** + * Checks that when the folding happens on the coordinator, the intermediate agg state + * are not used anymore. + * + * Expects + * LimitExec[10000[INTEGER]] + * \_AggregateExec[[],[COUNT(emp_no{f}#5) AS c],FINAL,8] + * \_AggregateExec[[],[COUNT(emp_no{f}#5) AS c],PARTIAL,8] + * \_LimitExec[10[INTEGER]] + * \_ExchangeExec[[],false] + * \_ProjectExec[[emp_no{r}#5]] + * \_EvalExec[[null[INTEGER] AS emp_no]] + * \_EsQueryExec[test], query[][_doc{f}#26], limit[10], sort[] estimatedRowSize[8] + */ + public void testGlobalAggFoldingOutput() { + var plan = physicalPlan(""" + from test + | limit 10 + | stats c = count(emp_no) + """); + + var stats = statsForMissingField("emp_no"); + var optimized = optimizedPlan(plan, stats); + + var limit = as(optimized, LimitExec.class); + var aggFinal = as(limit.child(), AggregateExec.class); + var aggPartial = as(aggFinal.child(), AggregateExec.class); + assertThat(Expressions.names(aggPartial.output()), contains("c")); + limit = as(aggPartial.child(), LimitExec.class); + var exchange = as(limit.child(), ExchangeExec.class); + var project = as(exchange.child(), ProjectExec.class); + } + + /** + * Checks the folded aggregation preserves the intermediate output. + * + * Expects + * ProjectExec[[a{r}#5]] + * \_EvalExec[[__a_SUM@734e2841{r}#16 / __a_COUNT@12536eab{r}#17 AS a]] + * \_LimitExec[500[INTEGER]] + * \_AggregateExec[[],[SUM(emp_no{f}#6) AS __a_SUM@734e2841, COUNT(emp_no{f}#6) AS __a_COUNT@12536eab],FINAL,24] + * \_ExchangeExec[[sum{r}#18, seen{r}#19, count{r}#20, seen{r}#21],true] + * \_LocalSourceExec[[sum{r}#18, seen{r}#19, count{r}#20, seen{r}#21],[LongArrayBlock[positions=1, mvOrdering=UNORDERED, + * values=[0, + * 0]], BooleanVectorBlock[vector=ConstantBooleanVector[positions=1, value=true]], + * LongVectorBlock[vector=ConstantLongVector[positions=1, value=0]], + * BooleanVectorBlock[vector=ConstantBooleanVector[positions=1, value=true]]]] + */ + public void testPartialAggFoldingOutputForSyntheticAgg() { + var plan = physicalPlan(""" + from test + | where emp_no > 10 + | stats a = avg(emp_no) + """); + + var stats = statsForMissingField("emp_no"); + var optimized = optimizedPlan(plan, stats); + + var project = as(optimized, ProjectExec.class); + var eval = as(project.child(), EvalExec.class); + var limit = as(eval.child(), LimitExec.class); + var aggFinal = as(limit.child(), AggregateExec.class); + assertThat(aggFinal.output(), hasSize(2)); + var exchange = as(aggFinal.child(), ExchangeExec.class); + assertThat(Expressions.names(exchange.output()), contains("sum", "seen", "count", "seen")); + var source = as(exchange.child(), LocalSourceExec.class); + assertThat(Expressions.names(source.output()), contains("sum", "seen", "count", "seen")); + } + private static EsQueryExec source(PhysicalPlan plan) { if (plan instanceof ExchangeExec exchange) { plan = exchange.child(); @@ -1922,6 +2080,17 @@ private PhysicalPlan optimizedPlan(PhysicalPlan plan, SearchStats searchStats) { return l; } + static SearchStats statsWithIndexedFields(String... names) { + return new EsqlTestUtils.TestSearchStats() { + private final Set indexedFields = Set.of(names); + + @Override + public boolean isIndexed(String field) { + return indexedFields.contains(field); + } + }; + } + static PhysicalPlan localRelationshipAlignment(PhysicalPlan l) { // handle local reduction alignment return l.transformUp(ExchangeExec.class, exg -> { @@ -1941,6 +2110,7 @@ private PhysicalPlan physicalPlan(String query) { var logical = logicalOptimizer.optimize(analyzer.analyze(parser.createStatement(query))); // System.out.println("Logical\n" + logical); var physical = mapper.map(logical); + // System.out.println(physical); assertSerialization(physical); return physical; } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/ExpressionTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/ExpressionTests.java index c9b33a3f2f020..1657b371bfeda 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/ExpressionTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/ExpressionTests.java @@ -37,6 +37,8 @@ import java.time.Period; import java.util.ArrayList; import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.IntStream; import static org.elasticsearch.xpack.esql.EsqlTestUtils.as; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.DATE_PERIOD; @@ -129,6 +131,9 @@ public void testStringLiteralsExceptions() { () -> whereExpression("\"\"\"\"\"\" foo \"\"\"\" == abc"), "line 1:23: mismatched input 'foo' expecting {," ); + + var number = "1" + IntStream.range(0, 309).mapToObj(ignored -> "0").collect(Collectors.joining()); + assertParsingException(() -> parse("row foo == " + number), "line 1:13: Number [" + number + "] is too large"); } public void testBooleanLiteralsCondition() { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java index b4c9d7a9baeca..1d2b11d3deb89 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java @@ -12,12 +12,12 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.PageCacheRecycler; -import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.SerializationTestUtils; +import org.elasticsearch.xpack.esql.TestBlockFactory; import org.elasticsearch.xpack.esql.evaluator.EvalMapper; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThan; @@ -162,7 +162,7 @@ private static FieldAttribute field(String name, DataType type) { static DriverContext driverContext() { return new DriverContext( new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, new NoneCircuitBreakerService()).withCircuitBreaking(), - BlockFactory.getNonBreakingInstance() + TestBlockFactory.getNonBreakingInstance() ); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/FilterTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/FilterTests.java index 926f9dd27f84f..52620a0a55af7 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/FilterTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/FilterTests.java @@ -305,7 +305,7 @@ private QueryBuilder restFilterQuery(String field) { } private QueryBuilder filterQueryForTransportNodes(PhysicalPlan plan) { - return PlannerUtils.detectFilter(plan, EMP_NO); + return PlannerUtils.detectFilter(plan, EMP_NO, x -> true); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/GrokEvaluatorExtracterTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/GrokEvaluatorExtracterTests.java index 6a5c8fd3f92c2..af7a66fea9bb2 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/GrokEvaluatorExtracterTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/GrokEvaluatorExtracterTests.java @@ -9,6 +9,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.DoubleBlock; @@ -18,6 +19,7 @@ import org.elasticsearch.grok.Grok; import org.elasticsearch.grok.GrokBuiltinPatterns; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.TestBlockFactory; import org.elasticsearch.xpack.esql.evaluator.command.GrokEvaluatorExtracter; import java.util.Map; @@ -26,6 +28,8 @@ import static org.hamcrest.Matchers.is; public class GrokEvaluatorExtracterTests extends ESTestCase { + final BlockFactory blockFactory = TestBlockFactory.getNonBreakingInstance(); + final Map KEY_TO_BLOCK = Map.of("a", 0, "b", 1, "c", 2, "d", 3, "e", 4, "f", 5); final Map TYPES = Map.of( "a", @@ -196,7 +200,7 @@ private void checkBooleanBlock(Block.Builder builder, int[] itemsPerRow, boolean private BytesRefBlock buildInputBlock(int[] mvSize, String... input) { int nextString = 0; - BytesRefBlock.Builder inputBuilder = BytesRefBlock.newBlockBuilder(input.length); + BytesRefBlock.Builder inputBuilder = blockFactory.newBytesRefBlockBuilder(input.length); for (int i = 0; i < mvSize.length; i++) { if (mvSize[i] == 0) { inputBuilder.appendNull(); @@ -222,12 +226,12 @@ private BytesRefBlock buildInputBlock(int[] mvSize, String... input) { private Block.Builder[] buidDefaultTargetBlocks(int estimatedSize) { return new Block.Builder[] { - BytesRefBlock.newBlockBuilder(estimatedSize), - IntBlock.newBlockBuilder(estimatedSize), - LongBlock.newBlockBuilder(estimatedSize), - DoubleBlock.newBlockBuilder(estimatedSize), - DoubleBlock.newBlockBuilder(estimatedSize), - BooleanBlock.newBlockBuilder(estimatedSize) }; + blockFactory.newBytesRefBlockBuilder(estimatedSize), + blockFactory.newIntBlockBuilder(estimatedSize), + blockFactory.newLongBlockBuilder(estimatedSize), + blockFactory.newDoubleBlockBuilder(estimatedSize), + blockFactory.newDoubleBlockBuilder(estimatedSize), + blockFactory.newBooleanBlockBuilder(estimatedSize) }; } private GrokEvaluatorExtracter buildExtracter(String pattern, Map keyToBlock, Map types) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java index 24fcae0f6bbb0..27a45e71a69c1 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java @@ -19,7 +19,6 @@ import org.apache.lucene.tests.index.RandomIndexWriter; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.lucene.LuceneSourceOperator; import org.elasticsearch.compute.lucene.LuceneTopNSourceOperator; import org.elasticsearch.compute.operator.SourceOperator; @@ -31,6 +30,7 @@ import org.elasticsearch.search.internal.ContextIndexSearcher; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.TestSearchContext; +import org.elasticsearch.xpack.esql.TestBlockFactory; import org.elasticsearch.xpack.esql.plan.physical.EsQueryExec; import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; import org.elasticsearch.xpack.esql.plugin.QueryPragmas; @@ -124,7 +124,7 @@ private LocalExecutionPlanner planner() throws IOException { "test", null, BigArrays.NON_RECYCLING_INSTANCE, - BlockFactory.getNonBreakingInstance(), + TestBlockFactory.getNonBreakingInstance(), Settings.EMPTY, config(), null, diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/TestPhysicalOperationProviders.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/TestPhysicalOperationProviders.java index 601184252814e..8377530b9fbc2 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/TestPhysicalOperationProviders.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/TestPhysicalOperationProviders.java @@ -13,11 +13,10 @@ import org.elasticsearch.compute.aggregation.GroupingAggregator; import org.elasticsearch.compute.aggregation.blockhash.BlockHash; import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.DocBlock; import org.elasticsearch.compute.data.DocVector; import org.elasticsearch.compute.data.ElementType; -import org.elasticsearch.compute.data.IntArrayVector; -import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.DriverContext; @@ -27,6 +26,7 @@ import org.elasticsearch.compute.operator.SourceOperator; import org.elasticsearch.compute.operator.SourceOperator.SourceOperatorFactory; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; +import org.elasticsearch.xpack.esql.TestBlockFactory; import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; import org.elasticsearch.xpack.esql.plan.physical.EsQueryExec; import org.elasticsearch.xpack.esql.plan.physical.FieldExtractExec; @@ -92,6 +92,11 @@ public Operator.OperatorFactory ordinalGroupingOperatorFactory( private class TestSourceOperator extends SourceOperator { boolean finished = false; + private final DriverContext driverContext; + + TestSourceOperator(DriverContext driverContext) { + this.driverContext = driverContext; + } @Override public Page getOutput() { @@ -99,15 +104,14 @@ public Page getOutput() { finish(); } - return new Page( - new Block[] { - new DocVector( - IntBlock.newConstantBlockWith(0, testData.getPositionCount()).asVector(), - IntBlock.newConstantBlockWith(0, testData.getPositionCount()).asVector(), - new IntArrayVector(IntStream.range(0, testData.getPositionCount()).toArray(), testData.getPositionCount()), - true - ).asBlock() } + BlockFactory blockFactory = driverContext.blockFactory(); + DocVector docVector = new DocVector( + blockFactory.newConstantIntVector(0, testData.getPositionCount()), + blockFactory.newConstantIntVector(0, testData.getPositionCount()), + blockFactory.newIntArrayVector(IntStream.range(0, testData.getPositionCount()).toArray(), testData.getPositionCount()), + true ); + return new Page(docVector.asBlock()); } @Override @@ -128,11 +132,9 @@ public void close() { private class TestSourceOperatorFactory implements SourceOperatorFactory { - SourceOperator op = new TestSourceOperator(); - @Override public SourceOperator get(DriverContext driverContext) { - return op; + return new TestSourceOperator(driverContext); } @Override @@ -292,7 +294,8 @@ private Block extractBlockForColumn(Page page, String columnName) { DocBlock docBlock = page.getBlock(0); IntVector docIndices = docBlock.asVector().docs(); Block originalData = testData.getBlock(columnIndex); - Block.Builder builder = originalData.elementType().newBlockBuilder(docIndices.getPositionCount()); + Block.Builder builder = originalData.elementType() + .newBlockBuilder(docIndices.getPositionCount(), TestBlockFactory.getNonBreakingInstance()); for (int c = 0; c < docIndices.getPositionCount(); c++) { int doc = docIndices.getInt(c); builder.copyFrom(originalData, doc, doc + 1); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequestTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequestTests.java index 8970617548016..f1701ed696d2c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequestTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequestTests.java @@ -82,6 +82,7 @@ protected DataNodeRequest createTestInstance() { DataNodeRequest request = new DataNodeRequest( sessionId, EsqlConfigurationSerializationTests.randomConfiguration(query), + randomAlphaOfLength(10), shardIds, aliasFilters, physicalPlan @@ -92,9 +93,16 @@ protected DataNodeRequest createTestInstance() { @Override protected DataNodeRequest mutateInstance(DataNodeRequest in) throws IOException { - return switch (between(0, 5)) { + return switch (between(0, 6)) { case 0 -> { - var request = new DataNodeRequest(randomAlphaOfLength(20), in.configuration(), in.shardIds(), in.aliasFilters(), in.plan()); + var request = new DataNodeRequest( + randomAlphaOfLength(20), + in.configuration(), + in.clusterAlias(), + in.shardIds(), + in.aliasFilters(), + in.plan() + ); request.setParentTask(in.getParentTask()); yield request; } @@ -102,6 +110,7 @@ protected DataNodeRequest mutateInstance(DataNodeRequest in) throws IOException var request = new DataNodeRequest( in.sessionId(), EsqlConfigurationSerializationTests.randomConfiguration(), + in.clusterAlias(), in.shardIds(), in.aliasFilters(), in.plan() @@ -111,7 +120,14 @@ protected DataNodeRequest mutateInstance(DataNodeRequest in) throws IOException } case 2 -> { List shardIds = randomList(1, 10, () -> new ShardId("new-index-" + between(1, 10), "n/a", between(1, 10))); - var request = new DataNodeRequest(in.sessionId(), in.configuration(), shardIds, in.aliasFilters(), in.plan()); + var request = new DataNodeRequest( + in.sessionId(), + in.configuration(), + in.clusterAlias(), + shardIds, + in.aliasFilters(), + in.plan() + ); request.setParentTask(in.getParentTask()); yield request; } @@ -132,6 +148,7 @@ protected DataNodeRequest mutateInstance(DataNodeRequest in) throws IOException var request = new DataNodeRequest( in.sessionId(), in.configuration(), + in.clusterAlias(), in.shardIds(), in.aliasFilters(), mapAndMaybeOptimize(parse(newQuery)) @@ -146,18 +163,45 @@ protected DataNodeRequest mutateInstance(DataNodeRequest in) throws IOException } else { aliasFilters = Map.of(new Index("concrete-index", "n/a"), AliasFilter.of(new TermQueryBuilder("id", "2"), "alias-2")); } - var request = new DataNodeRequest(in.sessionId(), in.configuration(), in.shardIds(), aliasFilters, in.plan()); + var request = new DataNodeRequest( + in.sessionId(), + in.configuration(), + in.clusterAlias(), + in.shardIds(), + aliasFilters, + in.plan() + ); request.setParentTask(request.getParentTask()); yield request; } case 5 -> { - var request = new DataNodeRequest(in.sessionId(), in.configuration(), in.shardIds(), in.aliasFilters(), in.plan()); + var request = new DataNodeRequest( + in.sessionId(), + in.configuration(), + in.clusterAlias(), + in.shardIds(), + in.aliasFilters(), + in.plan() + ); request.setParentTask( randomValueOtherThan(request.getParentTask().getNodeId(), () -> randomAlphaOfLength(10)), randomNonNegativeLong() ); yield request; } + case 6 -> { + var clusterAlias = randomValueOtherThan(in.clusterAlias(), () -> randomAlphaOfLength(10)); + var request = new DataNodeRequest( + in.sessionId(), + in.configuration(), + clusterAlias, + in.shardIds(), + in.aliasFilters(), + in.plan() + ); + request.setParentTask(request.getParentTask()); + yield request; + } default -> throw new AssertionError("invalid value"); }; } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java index 6fce2646012af..f773904ed8973 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java @@ -77,14 +77,13 @@ public void testMatchAll() throws IOException { testCase(new SingleValueQuery(new MatchAll(Source.EMPTY), "foo").asBuilder(), false, false, this::runCase); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/102997") public void testMatchSome() throws IOException { int max = between(1, 100); testCase( new SingleValueQuery.Builder(new RangeQueryBuilder("i").lt(max), "foo", new SingleValueQuery.Stats(), Source.EMPTY), false, false, - (fieldValues, count) -> runCase(fieldValues, count, null, max) + (fieldValues, count) -> runCase(fieldValues, count, null, max, false) ); } @@ -138,14 +137,13 @@ public void testNotMatchNone() throws IOException { ); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/102997") public void testNotMatchSome() throws IOException { int max = between(1, 100); testCase( new SingleValueQuery(new RangeQuery(Source.EMPTY, "i", null, false, max, false, null), "foo").negate(Source.EMPTY).asBuilder(), false, true, - (fieldValues, count) -> runCase(fieldValues, count, max, 100) + (fieldValues, count) -> runCase(fieldValues, count, max, 100, true) ); } @@ -154,22 +152,34 @@ interface TestCase { void run(List> fieldValues, int count) throws IOException; } - private void runCase(List> fieldValues, int count, Integer docsStart, Integer docsStop) { + /** + * Helper to run the checks of some of the test cases. This will perform two verifications: one about the count of the values the query + * is supposed to match and one on the Warnings that are supposed to be raised. + * @param fieldValues The indexed values of the field the query runs against. + * @param count The count of the docs the query matched. + * @param docsStart The start of the slice in fieldValues we want to consider. If `null`, the start will be 0. + * @param docsStop The end of the slice in fieldValues we want to consider. If `null`, the end will be the fieldValues size. + * @param scanForMVs Should the check for Warnings scan the entire fieldValues? This will override the docsStart:docsStop interval, + * which is needed for some cases. + */ + private void runCase(List> fieldValues, int count, Integer docsStart, Integer docsStop, boolean scanForMVs) { int expected = 0; int min = docsStart != null ? docsStart : 0; int max = docsStop != null ? docsStop : fieldValues.size(); - int valuesCount = 0; + int mvCountInRange = 0; for (int i = min; i < max; i++) { - int mvCount = fieldValues.get(i).size(); - if (mvCount == 1) { + int valuesCount = fieldValues.get(i).size(); + if (valuesCount == 1) { expected++; + } else if (valuesCount > 1) { + mvCountInRange++; } - valuesCount += mvCount; } assertThat(count, equalTo(expected)); - // query's count runs against the full set, not just min-to-max - if (valuesCount > 0 && fieldValues.stream().anyMatch(x -> x.size() > 1)) { + // the SingleValueQuery.TwoPhaseIteratorForSortedNumericsAndTwoPhaseQueries can scan all docs - and generate warnings - even if + // inner query matches none, so warn if MVs have been encountered within given range, OR if a full scan is required + if (mvCountInRange > 0 || (scanForMVs && fieldValues.stream().anyMatch(x -> x.size() > 1))) { assertWarnings( "Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", "Line -1:-1: java.lang.IllegalArgumentException: single-value function encountered multi-value" @@ -178,7 +188,7 @@ private void runCase(List> fieldValues, int count, Integer docsStar } private void runCase(List> fieldValues, int count) { - runCase(fieldValues, count, null, null); + runCase(fieldValues, count, null, null, false); } private void testCase(SingleValueQuery.Builder builder, boolean rewritesToMatchNone, boolean subHasTwoPhase, TestCase testCase) diff --git a/x-pack/plugin/fleet/src/internalClusterTest/java/org/elasticsearch/xpack/fleet/action/GetGlobalCheckpointsActionIT.java b/x-pack/plugin/fleet/src/internalClusterTest/java/org/elasticsearch/xpack/fleet/action/GetGlobalCheckpointsActionIT.java index 5c87fe8dd6c19..88f3b126b228c 100644 --- a/x-pack/plugin/fleet/src/internalClusterTest/java/org/elasticsearch/xpack/fleet/action/GetGlobalCheckpointsActionIT.java +++ b/x-pack/plugin/fleet/src/internalClusterTest/java/org/elasticsearch/xpack/fleet/action/GetGlobalCheckpointsActionIT.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.fleet.action; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.UnavailableShardsException; @@ -181,7 +180,7 @@ public void testMustProvideCorrectNumberOfShards() { ); ElasticsearchStatusException exception = expectThrows( ElasticsearchStatusException.class, - () -> client().execute(GetGlobalCheckpointsAction.INSTANCE, request).actionGet() + client().execute(GetGlobalCheckpointsAction.INSTANCE, request) ); assertThat(exception.status(), equalTo(RestStatus.BAD_REQUEST)); assertThat( @@ -205,7 +204,7 @@ public void testWaitForAdvanceOnlySupportsOneShard() { ); ElasticsearchStatusException exception = expectThrows( ElasticsearchStatusException.class, - () -> client().execute(GetGlobalCheckpointsAction.INSTANCE, request).actionGet() + client().execute(GetGlobalCheckpointsAction.INSTANCE, request) ); assertThat(exception.status(), equalTo(RestStatus.BAD_REQUEST)); assertThat(exception.getMessage(), equalTo("wait_for_advance only supports indices with one shard. [shard count: 3]")); @@ -221,10 +220,7 @@ public void testIndexDoesNotExistNoWait() { ); long start = System.nanoTime(); - ElasticsearchException exception = expectThrows( - IndexNotFoundException.class, - () -> client().execute(GetGlobalCheckpointsAction.INSTANCE, request).actionGet() - ); + expectThrows(IndexNotFoundException.class, client().execute(GetGlobalCheckpointsAction.INSTANCE, request)); long elapsed = TimeValue.timeValueNanos(System.nanoTime() - start).seconds(); assertThat(elapsed, lessThanOrEqualTo(TEN_SECONDS.seconds())); } @@ -237,10 +233,7 @@ public void testWaitOnIndexTimeout() { EMPTY_ARRAY, TimeValue.timeValueMillis(between(1, 100)) ); - ElasticsearchException exception = expectThrows( - IndexNotFoundException.class, - () -> client().execute(GetGlobalCheckpointsAction.INSTANCE, request).actionGet() - ); + expectThrows(IndexNotFoundException.class, client().execute(GetGlobalCheckpointsAction.INSTANCE, request)); } public void testWaitOnIndexCreated() throws Exception { @@ -285,7 +278,7 @@ public void testPrimaryShardsNotReadyNoWait() { UnavailableShardsException exception = expectThrows( UnavailableShardsException.class, - () -> client().execute(GetGlobalCheckpointsAction.INSTANCE, request).actionGet() + client().execute(GetGlobalCheckpointsAction.INSTANCE, request) ); assertEquals("Primary shards were not active [shards=1, active=0]", exception.getMessage()); } @@ -309,7 +302,7 @@ public void testWaitOnPrimaryShardsReadyTimeout() { UnavailableShardsException exception = expectThrows( UnavailableShardsException.class, - () -> client().execute(GetGlobalCheckpointsAction.INSTANCE, request).actionGet() + client().execute(GetGlobalCheckpointsAction.INSTANCE, request) ); assertEquals("Primary shards were not active within timeout [timeout=" + timeout + ", shards=1, active=0]", exception.getMessage()); } diff --git a/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/rest/RestFleetSearchAction.java b/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/rest/RestFleetSearchAction.java index 3a09fe1d18382..73af65b2f31a6 100644 --- a/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/rest/RestFleetSearchAction.java +++ b/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/rest/RestFleetSearchAction.java @@ -18,7 +18,7 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.usage.SearchUsageHolder; @@ -96,7 +96,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli return channel -> { RestCancellableNodeClient cancelClient = new RestCancellableNodeClient(client, request.getHttpChannel()); - cancelClient.execute(TransportSearchAction.TYPE, searchRequest, new RestChunkedToXContentListener<>(channel)); + cancelClient.execute(TransportSearchAction.TYPE, searchRequest, new RestRefCountedChunkedToXContentListener<>(channel)); }; } diff --git a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexIT.java b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexIT.java index 5f219bd8ce592..ba4e1d98f63a6 100644 --- a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexIT.java +++ b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexIT.java @@ -232,21 +232,16 @@ public void testRetryPointInTime() throws Exception { ).keepAlive(TimeValue.timeValueMinutes(2)); final String pitId = client().execute(TransportOpenPointInTimeAction.TYPE, openPointInTimeRequest).actionGet().getPointInTimeId(); try { - assertNoFailuresAndResponse( - prepareSearch().setIndices(indexName).setPreference(null).setPointInTime(new PointInTimeBuilder(pitId)), - searchResponse -> { - assertThat(searchResponse.pointInTimeId(), equalTo(pitId)); - assertHitCount(searchResponse, numDocs); - } - ); + assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), searchResponse -> { + assertThat(searchResponse.pointInTimeId(), equalTo(pitId)); + assertHitCount(searchResponse, numDocs); + }); internalCluster().restartNode(assignedNode); ensureGreen(indexName); assertNoFailuresAndResponse( - prepareSearch().setIndices(indexName) - .setQuery(new RangeQueryBuilder("created_date").gte("2011-01-01").lte("2011-12-12")) + prepareSearch().setQuery(new RangeQueryBuilder("created_date").gte("2011-01-01").lte("2011-12-12")) .setSearchType(SearchType.QUERY_THEN_FETCH) - .setPreference(null) .setPreFilterShardSize(between(1, 10)) .setAllowPartialSearchResults(true) .setPointInTime(new PointInTimeBuilder(pitId)), @@ -287,7 +282,7 @@ public void testPointInTimeWithDeletedIndices() { indicesAdmin().prepareDelete("index-1").get(); // Return partial results if allow partial search result is allowed assertResponse( - prepareSearch().setPreference(null).setAllowPartialSearchResults(true).setPointInTime(new PointInTimeBuilder(pitId)), + prepareSearch().setAllowPartialSearchResults(true).setPointInTime(new PointInTimeBuilder(pitId)), searchResponse -> { assertFailures(searchResponse); assertHitCount(searchResponse, index2); @@ -296,7 +291,7 @@ public void testPointInTimeWithDeletedIndices() { // Fails if allow partial search result is not allowed expectThrows( ElasticsearchException.class, - prepareSearch().setPreference(null).setAllowPartialSearchResults(false).setPointInTime(new PointInTimeBuilder(pitId))::get + prepareSearch().setAllowPartialSearchResults(false).setPointInTime(new PointInTimeBuilder(pitId)) ); } finally { client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pitId)).actionGet(); @@ -322,7 +317,7 @@ public void testOpenPointInTimeWithNoIndexMatched() { .getPointInTimeId(); try { assertNoFailuresAndResponse( - prepareSearch().setPreference(null).setPointInTime(new PointInTimeBuilder(pitId)), + prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), searchResponse -> assertHitCount(searchResponse, numDocs) ); } finally { @@ -338,7 +333,7 @@ public void testOpenPointInTimeWithNoIndexMatched() { .actionGet() .getPointInTimeId(); try { - assertHitCountAndNoFailures(prepareSearch().setPreference(null).setPointInTime(new PointInTimeBuilder(pitId)), 0); + assertHitCountAndNoFailures(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), 0); } finally { client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pitId)).actionGet(); } diff --git a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexTests.java b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexTests.java index ca848c8bb8c44..f1d23739b0938 100644 --- a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexTests.java +++ b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexTests.java @@ -100,7 +100,7 @@ public void testCloseFreezeAndOpen() throws Exception { assertAcked(client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest(indexName)).actionGet()); expectThrows( ClusterBlockException.class, - () -> prepareIndex(indexName).setId("4").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get() + prepareIndex(indexName).setId("4").setSource("field", "value").setRefreshPolicy(IMMEDIATE) ); IndicesService indexServices = getInstanceFromNode(IndicesService.class); Index index = resolveIndex(indexName); @@ -150,11 +150,7 @@ public void testCloseFreezeAndOpen() throws Exception { try { for (int from = 0; from < 3; from++) { assertResponse( - client().prepareSearch() - .setIndicesOptions(IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED) - .setPointInTime(new PointInTimeBuilder(pitId)) - .setSize(1) - .setFrom(from), + client().prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)).setSize(1).setFrom(from), response -> { assertHitCount(response, 3); assertEquals(1, response.getHits().getHits().length); @@ -276,12 +272,12 @@ public void testDoubleFreeze() { assertAcked(client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest("test-idx")).actionGet()); ResourceNotFoundException exception = expectThrows( ResourceNotFoundException.class, - () -> client().execute( + client().execute( FreezeIndexAction.INSTANCE, new FreezeRequest("test-idx").indicesOptions( new IndicesOptions(EnumSet.noneOf(IndicesOptions.Option.class), EnumSet.of(IndicesOptions.WildcardStates.OPEN)) ) - ).actionGet() + ) ); assertEquals("no index found to freeze", exception.getMessage()); } @@ -473,10 +469,7 @@ public void testWriteToFrozenIndex() { prepareIndex("idx").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); assertAcked(client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest("idx")).actionGet()); assertIndexFrozen("idx"); - expectThrows( - ClusterBlockException.class, - () -> prepareIndex("idx").setId("2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get() - ); + expectThrows(ClusterBlockException.class, prepareIndex("idx").setId("2").setSource("field", "value").setRefreshPolicy(IMMEDIATE)); } public void testIgnoreUnavailable() { @@ -502,13 +495,13 @@ public void testUnfreezeClosedIndex() { assertEquals(IndexMetadata.State.CLOSE, clusterAdmin().prepareState().get().getState().metadata().index("idx").getState()); expectThrows( IndexNotFoundException.class, - () -> client().execute( + client().execute( FreezeIndexAction.INSTANCE, new FreezeRequest("id*").setFreeze(false) .indicesOptions( new IndicesOptions(EnumSet.noneOf(IndicesOptions.Option.class), EnumSet.of(IndicesOptions.WildcardStates.OPEN)) ) - ).actionGet() + ) ); // we don't resolve to closed indices assertAcked(client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest("idx").setFreeze(false)).actionGet()); diff --git a/x-pack/plugin/identity-provider/docs/en/rest-api/idp-saml-init.asciidoc b/x-pack/plugin/identity-provider/docs/en/rest-api/idp-saml-init.asciidoc index 561b5abd8a660..22c53b4612c39 100644 --- a/x-pack/plugin/identity-provider/docs/en/rest-api/idp-saml-init.asciidoc +++ b/x-pack/plugin/identity-provider/docs/en/rest-api/idp-saml-init.asciidoc @@ -98,8 +98,9 @@ Provider that should receive this SAML Response. -------------------------------------------------------------------- // TESTRESPONSE[skip:Do not enable identity provider for the docs cluster, at least not yet] -A failed call, in the case of an SP initiated SSO returns a SAML Response as an XML String with its status set to the appropriate error -code indicating that the authentication request failed and the reason for that failure. A `saml_status` of +A failed call, in the case of an SP initiated SSO returns a standard Elasticsearch error response with the appropriate HTTP Status code, +with the error containing a `saml_initiate_single_sign_on_response` field holding a SAML Response as an XML String with its status +set to the appropriate error code indicating that the authentication request failed and the reason for that failure. A `saml_status` of `urn:oasis:names:tc:SAML:2.0:status:Requester` indicates that the error is on the side of the SP or the user, while a `saml_status` of `urn:oasis:names:tc:SAML:2.0:status:Responder` indicates that something went wrong in the IDP side. The `error` field contains a short human friendly interpretation of the error that is outside the SAML standard and is meant to be communicated to the user, especially @@ -108,13 +109,35 @@ if the user is not redirected back the SP with the `saml_response` [source, console-result] -------------------------------------------------------------------- { - "post_url" : "https://sp1.kibana.org/saml/acs", - "saml_response" : "?xml version="1.0" encoding="UTF-8"?>https://idp.cloud.elastic.co...removed for brevity...", - "saml_status" : "urn:oasis:names:tc:SAML:2.0:status:Requester", - "error" : "User [user1] is not permitted to access service [https://sp1.kibana.org]", - "service_provider" : { - "entity_id" : "https://sp1.kibana.org" - } + "error":{ + "root_cause":[ + { + "type":"saml_initiate_single_sign_on_exception", + "reason":"User [es_user] is not permitted to access service [ec:abcdef:123456]", + "saml_initiate_single_sign_on_response":{ + "post_url":"https://AVoMOJLJfbru.elastic-cloud.com/saml/acs", + "saml_response":"urn:elastic:cloud:idp", + "saml_status":"urn:oasis:names:tc:SAML:2.0:status:Requester", + "error":"User [es_user] is not permitted to access service [ec:abcdef:123456]", + "service_provider":{ + "entity_id":"ec:abcdef:123456" + } + } + } + ], + "type":"saml_initiate_single_sign_on_exception", + "reason":"User [es_user] is not permitted to access service [ec:abcdef:123456]", + "saml_initiate_single_sign_on_response":{ + "post_url":"https://AVoMOJLJfbru.elastic-cloud.com/saml/acs", + "saml_response":"urn:elastic:cloud:idp", + "saml_status":"urn:oasis:names:tc:SAML:2.0:status:Requester", + "error":"User [es_user] is not permitted to access service [ec:abcdef:123456]", + "service_provider":{ + "entity_id":"ec:abcdef:123456" + } + } + }, + "status":403 } -------------------------------------------------------------------- // TESTRESPONSE[skip:Do not enable identity provider for the docs cluster, at least not yet] diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/action/SamlValidateAuthnRequestResponse.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/action/SamlValidateAuthnRequestResponse.java index 4a26a37c800f0..e2b32c7e7023c 100644 --- a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/action/SamlValidateAuthnRequestResponse.java +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/action/SamlValidateAuthnRequestResponse.java @@ -26,7 +26,7 @@ public SamlValidateAuthnRequestResponse(StreamInput in) throws IOException { this.spEntityId = in.readString(); this.assertionConsumerService = in.readString(); this.forceAuthn = in.readBoolean(); - this.authnState = in.readMap(); + this.authnState = in.readGenericMap(); } public SamlValidateAuthnRequestResponse(String spEntityId, String acs, boolean forceAuthn, Map authnState) { diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndex.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndex.java index a84e35ce47f32..bd425487b9ad0 100644 --- a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndex.java +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndex.java @@ -32,16 +32,15 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.util.CachedSupplier; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.SearchHit; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; @@ -332,9 +331,11 @@ private void findDocuments(QueryBuilder query, ActionListener stepInfo = (Map) explainIndexWithMissingPolicy.get("step_info"); diff --git a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/TimeSeriesLifecycleActionsIT.java b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/TimeSeriesLifecycleActionsIT.java index 34b80520b4bab..2b722a6555a08 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/TimeSeriesLifecycleActionsIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/TimeSeriesLifecycleActionsIT.java @@ -32,6 +32,7 @@ import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.ilm.AllocateAction; import org.elasticsearch.xpack.core.ilm.DeleteAction; +import org.elasticsearch.xpack.core.ilm.ErrorStep; import org.elasticsearch.xpack.core.ilm.ForceMergeAction; import org.elasticsearch.xpack.core.ilm.FreezeAction; import org.elasticsearch.xpack.core.ilm.LifecycleAction; @@ -599,7 +600,7 @@ public void testNonexistentPolicy() throws Exception { Map indexStatus = (Map) ((Map) responseMap.get("indices")).get(index); assertNull(indexStatus.get("phase")); assertNull(indexStatus.get("action")); - assertNull(indexStatus.get("step")); + assertEquals(ErrorStep.NAME, indexStatus.get("step")); Map stepInfo = (Map) indexStatus.get("step_info"); assertNotNull(stepInfo); assertEquals("policy [does_not_exist] does not exist", stepInfo.get("reason")); @@ -1221,7 +1222,7 @@ private void assertHistoryIsPresent( } // Finally, check that the history index is in a good state - String historyIndexName = DataStream.getDefaultBackingIndexName("ilm-history-6", 1); + String historyIndexName = DataStream.getDefaultBackingIndexName("ilm-history-7", 1); Response explainHistoryIndex = client().performRequest(new Request("GET", historyIndexName + "/_lifecycle/explain")); Map responseMap; try (InputStream is = explainHistoryIndex.getEntity().getContent()) { diff --git a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java index 6d3811fd66d9c..45aad066313b2 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java @@ -338,9 +338,6 @@ public void testTsdbDataStreams() throws Exception { rolloverMaxOneDocCondition(client(), dataStream); String rollupIndex = waitAndGetRollupIndexName(client(), backingIndexName, fixedInterval); - if (rollupIndex == null) { - logger.warn("explain:" + explainIndex(client(), backingIndexName)); - } assertNotNull(String.format(Locale.ROOT, "Cannot retrieve rollup index [%s]", rollupIndex), rollupIndex); assertBusy(() -> assertTrue("Rollup index does not exist", indexExists(rollupIndex)), 30, TimeUnit.SECONDS); assertBusy(() -> assertFalse("Source index should have been deleted", indexExists(backingIndexName)), 30, TimeUnit.SECONDS); @@ -395,6 +392,7 @@ public void testILMWaitsForTimeSeriesEndTimeToLapse() throws Exception { }, 30, TimeUnit.SECONDS); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103981") public void testRollupNonTSIndex() throws Exception { createIndex(index, alias, false); index(client(), index, true, null, "@timestamp", "2020-01-01T05:10:00Z", "volume", 11.0, "metricset", randomAlphaOfLength(5)); @@ -494,6 +492,7 @@ public void testDownsampleTwice() throws Exception { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/101428") public void testDownsampleTwiceSameInterval() throws Exception { // Create the ILM policy Request request = new Request("PUT", "_ilm/policy/" + policy); @@ -606,7 +605,7 @@ public void testDownsampleTwiceSameInterval() throws Exception { * @return the name of the rollup index for a given index, null if none exist */ public String waitAndGetRollupIndexName(RestClient client, String originalIndexName, DateHistogramInterval fixedInterval) - throws InterruptedException { + throws InterruptedException, IOException { final String[] rollupIndexName = new String[1]; waitUntil(() -> { try { @@ -616,7 +615,15 @@ public String waitAndGetRollupIndexName(RestClient client, String originalIndexN return false; } }, 120, TimeUnit.SECONDS); // High timeout in case we're unlucky and end_time has been increased. - logger.info("--> original index name is [{}], rollup index name is [{}]", originalIndexName, rollupIndexName[0]); + if (rollupIndexName[0] == null) { + logger.warn( + "--> original index name is [{}], rollup index name is NULL, possible explanation: {}", + originalIndexName, + explainIndex(client(), originalIndexName) + ); + } else { + logger.info("--> original index name is [{}], rollup index name is [{}]", originalIndexName, rollupIndexName[0]); + } return rollupIndexName[0]; } diff --git a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ClusterStateWaitThresholdBreachTests.java b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ClusterStateWaitThresholdBreachTests.java index da3323966fb94..287374896fd23 100644 --- a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ClusterStateWaitThresholdBreachTests.java +++ b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ClusterStateWaitThresholdBreachTests.java @@ -31,7 +31,8 @@ import org.elasticsearch.xpack.core.ilm.ShrunkShardsAllocatedStep; import org.elasticsearch.xpack.core.ilm.Step; import org.elasticsearch.xpack.core.ilm.action.ExplainLifecycleAction; -import org.elasticsearch.xpack.core.ilm.action.PutLifecycleAction; +import org.elasticsearch.xpack.core.ilm.action.ILMActions; +import org.elasticsearch.xpack.core.ilm.action.PutLifecycleRequest; import org.junit.Before; import java.util.Arrays; @@ -90,8 +91,8 @@ public void testWaitInShrunkShardsAllocatedExceedsThreshold() throws Exception { Map.of(MigrateAction.NAME, MigrateAction.DISABLED, ShrinkAction.NAME, new ShrinkAction(1, null)) ); LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, Map.of("warm", warmPhase)); - PutLifecycleAction.Request putLifecycleRequest = new PutLifecycleAction.Request(lifecyclePolicy); - assertAcked(client().execute(PutLifecycleAction.INSTANCE, putLifecycleRequest).get()); + PutLifecycleRequest putLifecycleRequest = new PutLifecycleRequest(lifecyclePolicy); + assertAcked(client().execute(ILMActions.PUT, putLifecycleRequest).get()); // we're configuring a very high number of replicas. this will make ths shrunk index unable to allocate successfully, so ILM will // wait in the `shrunk-shards-allocated` step (we don't wait for the original index to be GREEN before) diff --git a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/DataStreamAndIndexLifecycleMixingTests.java b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/DataStreamAndIndexLifecycleMixingTests.java index b40528664275d..637fbc8f8bf82 100644 --- a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/DataStreamAndIndexLifecycleMixingTests.java +++ b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/DataStreamAndIndexLifecycleMixingTests.java @@ -9,7 +9,7 @@ import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverConditions; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; @@ -49,7 +49,8 @@ import org.elasticsearch.xpack.core.ilm.RolloverAction; import org.elasticsearch.xpack.core.ilm.WaitForRolloverReadyStep; import org.elasticsearch.xpack.core.ilm.action.ExplainLifecycleAction; -import org.elasticsearch.xpack.core.ilm.action.PutLifecycleAction; +import org.elasticsearch.xpack.core.ilm.action.ILMActions; +import org.elasticsearch.xpack.core.ilm.action.PutLifecycleRequest; import org.junit.Before; import java.io.IOException; @@ -106,8 +107,8 @@ public void testIndexTemplateSwapsILMForDataStreamLifecycle() throws Exception { RolloverAction rolloverIlmAction = new RolloverAction(RolloverConditions.newBuilder().addMaxIndexDocsCondition(2L).build()); Phase hotPhase = new Phase("hot", TimeValue.ZERO, Map.of(rolloverIlmAction.getWriteableName(), rolloverIlmAction)); LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, Map.of("hot", hotPhase)); - PutLifecycleAction.Request putLifecycleRequest = new PutLifecycleAction.Request(lifecyclePolicy); - assertAcked(client().execute(PutLifecycleAction.INSTANCE, putLifecycleRequest).get()); + PutLifecycleRequest putLifecycleRequest = new PutLifecycleRequest(lifecyclePolicy); + assertAcked(client().execute(ILMActions.PUT, putLifecycleRequest).get()); putComposableIndexTemplate( indexTemplateName, @@ -288,8 +289,8 @@ public void testUpdateIndexTemplateFromILMtoBothILMAndDataStreamLifecycle() thro RolloverAction rolloverIlmAction = new RolloverAction(RolloverConditions.newBuilder().addMaxIndexDocsCondition(2L).build()); Phase hotPhase = new Phase("hot", TimeValue.ZERO, Map.of(rolloverIlmAction.getWriteableName(), rolloverIlmAction)); LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, Map.of("hot", hotPhase)); - PutLifecycleAction.Request putLifecycleRequest = new PutLifecycleAction.Request(lifecyclePolicy); - assertAcked(client().execute(PutLifecycleAction.INSTANCE, putLifecycleRequest).get()); + PutLifecycleRequest putLifecycleRequest = new PutLifecycleRequest(lifecyclePolicy); + assertAcked(client().execute(ILMActions.PUT, putLifecycleRequest).get()); putComposableIndexTemplate( indexTemplateName, @@ -463,8 +464,8 @@ public void testUpdateIndexTemplateToDataStreamLifecyclePreference() throws Exce RolloverAction rolloverIlmAction = new RolloverAction(RolloverConditions.newBuilder().addMaxIndexDocsCondition(2L).build()); Phase hotPhase = new Phase("hot", TimeValue.ZERO, Map.of(rolloverIlmAction.getWriteableName(), rolloverIlmAction)); LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, Map.of("hot", hotPhase)); - PutLifecycleAction.Request putLifecycleRequest = new PutLifecycleAction.Request(lifecyclePolicy); - assertAcked(client().execute(PutLifecycleAction.INSTANCE, putLifecycleRequest).get()); + PutLifecycleRequest putLifecycleRequest = new PutLifecycleRequest(lifecyclePolicy); + assertAcked(client().execute(ILMActions.PUT, putLifecycleRequest).get()); putComposableIndexTemplate( indexTemplateName, @@ -717,8 +718,8 @@ public void testUpdateIndexTemplateToMigrateFromDataStreamLifecycleToIlm() throw RolloverAction rolloverIlmAction = new RolloverAction(RolloverConditions.newBuilder().addMaxIndexDocsCondition(2L).build()); Phase hotPhase = new Phase("hot", TimeValue.ZERO, Map.of(rolloverIlmAction.getWriteableName(), rolloverIlmAction)); LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, Map.of("hot", hotPhase)); - PutLifecycleAction.Request putLifecycleRequest = new PutLifecycleAction.Request(lifecyclePolicy); - assertAcked(client().execute(PutLifecycleAction.INSTANCE, putLifecycleRequest).get()); + PutLifecycleRequest putLifecycleRequest = new PutLifecycleRequest(lifecyclePolicy); + assertAcked(client().execute(ILMActions.PUT, putLifecycleRequest).get()); // let's update the index template to remove the data stream lifecycle configuration and replace it with an ILM configuration // note that this change will apply to new backing indices only. The write index will continue to be managed by the data stream @@ -812,8 +813,8 @@ public void testGetDataStreamResponse() throws Exception { RolloverAction rolloverIlmAction = new RolloverAction(RolloverConditions.newBuilder().addMaxIndexDocsCondition(2L).build()); Phase hotPhase = new Phase("hot", TimeValue.ZERO, Map.of(rolloverIlmAction.getWriteableName(), rolloverIlmAction)); LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, Map.of("hot", hotPhase)); - PutLifecycleAction.Request putLifecycleRequest = new PutLifecycleAction.Request(lifecyclePolicy); - assertAcked(client().execute(PutLifecycleAction.INSTANCE, putLifecycleRequest).get()); + PutLifecycleRequest putLifecycleRequest = new PutLifecycleRequest(lifecyclePolicy); + assertAcked(client().execute(ILMActions.PUT, putLifecycleRequest).get()); putComposableIndexTemplate( indexTemplateName, @@ -958,7 +959,7 @@ static void putComposableIndexTemplate( @Nullable Map metadata, @Nullable DataStreamLifecycle lifecycle ) throws IOException { - PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request(name); + TransportPutComposableIndexTemplateAction.Request request = new TransportPutComposableIndexTemplateAction.Request(name); request.indexTemplate( ComposableIndexTemplate.builder() .indexPatterns(patterns) @@ -967,7 +968,7 @@ static void putComposableIndexTemplate( .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) .build() ); - client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); + client().execute(TransportPutComposableIndexTemplateAction.TYPE, request).actionGet(); } private static DataStreamLifecycle customEnabledLifecycle() { diff --git a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/DataTiersMigrationsTests.java b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/DataTiersMigrationsTests.java index 9dfc3ddcda91e..bf5ab23823614 100644 --- a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/DataTiersMigrationsTests.java +++ b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/DataTiersMigrationsTests.java @@ -27,7 +27,8 @@ import org.elasticsearch.xpack.core.ilm.LifecycleSettings; import org.elasticsearch.xpack.core.ilm.Phase; import org.elasticsearch.xpack.core.ilm.action.ExplainLifecycleAction; -import org.elasticsearch.xpack.core.ilm.action.PutLifecycleAction; +import org.elasticsearch.xpack.core.ilm.action.ILMActions; +import org.elasticsearch.xpack.core.ilm.action.PutLifecycleRequest; import org.junit.Before; import java.util.Arrays; @@ -104,8 +105,8 @@ public void testIndexDataTierMigration() throws Exception { Phase warmPhase = new Phase("warm", TimeValue.ZERO, Collections.emptyMap()); Phase coldPhase = new Phase("cold", TimeValue.ZERO, Collections.emptyMap()); LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, Map.of("hot", hotPhase, "warm", warmPhase, "cold", coldPhase)); - PutLifecycleAction.Request putLifecycleRequest = new PutLifecycleAction.Request(lifecyclePolicy); - assertAcked(client().execute(PutLifecycleAction.INSTANCE, putLifecycleRequest).get()); + PutLifecycleRequest putLifecycleRequest = new PutLifecycleRequest(lifecyclePolicy); + assertAcked(client().execute(ILMActions.PUT, putLifecycleRequest).get()); Settings settings = Settings.builder() .put(indexSettings()) @@ -165,8 +166,8 @@ public void testUserOptsOutOfTierMigration() throws Exception { Phase warmPhase = new Phase("warm", TimeValue.ZERO, Collections.emptyMap()); Phase coldPhase = new Phase("cold", TimeValue.ZERO, Collections.emptyMap()); LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, Map.of("hot", hotPhase, "warm", warmPhase, "cold", coldPhase)); - PutLifecycleAction.Request putLifecycleRequest = new PutLifecycleAction.Request(lifecyclePolicy); - assertAcked(client().execute(PutLifecycleAction.INSTANCE, putLifecycleRequest).get()); + PutLifecycleRequest putLifecycleRequest = new PutLifecycleRequest(lifecyclePolicy); + assertAcked(client().execute(ILMActions.PUT, putLifecycleRequest).get()); Settings settings = Settings.builder() .put(indexSettings()) diff --git a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ILMMultiNodeIT.java b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ILMMultiNodeIT.java index 8ad3458ceca68..4164911dcad79 100644 --- a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ILMMultiNodeIT.java +++ b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ILMMultiNodeIT.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.ilm; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -30,7 +30,8 @@ import org.elasticsearch.xpack.core.ilm.RolloverAction; import org.elasticsearch.xpack.core.ilm.ShrinkAction; import org.elasticsearch.xpack.core.ilm.action.ExplainLifecycleAction; -import org.elasticsearch.xpack.core.ilm.action.PutLifecycleAction; +import org.elasticsearch.xpack.core.ilm.action.ILMActions; +import org.elasticsearch.xpack.core.ilm.action.PutLifecycleRequest; import java.util.Arrays; import java.util.Collection; @@ -75,7 +76,7 @@ public void testShrinkOnTiers() throws Exception { phases.put(hotPhase.getName(), hotPhase); phases.put(warmPhase.getName(), warmPhase); LifecyclePolicy lifecyclePolicy = new LifecyclePolicy("shrink-policy", phases); - client().execute(PutLifecycleAction.INSTANCE, new PutLifecycleAction.Request(lifecyclePolicy)).get(); + client().execute(ILMActions.PUT, new PutLifecycleRequest(lifecyclePolicy)).get(); Template t = new Template( Settings.builder() @@ -93,8 +94,8 @@ public void testShrinkOnTiers() throws Exception { .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) .build(); client().execute( - PutComposableIndexTemplateAction.INSTANCE, - new PutComposableIndexTemplateAction.Request("template").indexTemplate(template) + TransportPutComposableIndexTemplateAction.TYPE, + new TransportPutComposableIndexTemplateAction.Request("template").indexTemplate(template) ).actionGet(); prepareIndex(index).setCreate(true).setId("1").setSource("@timestamp", "2020-09-09").get(); diff --git a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ILMMultiNodeWithCCRDisabledIT.java b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ILMMultiNodeWithCCRDisabledIT.java index e10e4a466f7f3..7314587573f96 100644 --- a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ILMMultiNodeWithCCRDisabledIT.java +++ b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ILMMultiNodeWithCCRDisabledIT.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.ilm; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -31,7 +31,8 @@ import org.elasticsearch.xpack.core.ilm.RolloverAction; import org.elasticsearch.xpack.core.ilm.ShrinkAction; import org.elasticsearch.xpack.core.ilm.action.ExplainLifecycleAction; -import org.elasticsearch.xpack.core.ilm.action.PutLifecycleAction; +import org.elasticsearch.xpack.core.ilm.action.ILMActions; +import org.elasticsearch.xpack.core.ilm.action.PutLifecycleRequest; import java.util.Arrays; import java.util.Collection; @@ -75,7 +76,7 @@ public void testShrinkOnTiers() throws Exception { Phase hotPhase = new Phase("hot", TimeValue.ZERO, actions); LifecyclePolicy lifecyclePolicy = new LifecyclePolicy("shrink-policy", Collections.singletonMap(hotPhase.getName(), hotPhase)); - client().execute(PutLifecycleAction.INSTANCE, new PutLifecycleAction.Request(lifecyclePolicy)).get(); + client().execute(ILMActions.PUT, new PutLifecycleRequest(lifecyclePolicy)).get(); Template t = new Template( Settings.builder() @@ -93,8 +94,8 @@ public void testShrinkOnTiers() throws Exception { .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) .build(); client().execute( - PutComposableIndexTemplateAction.INSTANCE, - new PutComposableIndexTemplateAction.Request("template").indexTemplate(template) + TransportPutComposableIndexTemplateAction.TYPE, + new TransportPutComposableIndexTemplateAction.Request("template").indexTemplate(template) ).actionGet(); prepareIndex(index).setCreate(true).setId("1").setSource("@timestamp", "2020-09-09").get(); diff --git a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java index 1f09fd7457187..b97caa6d96ed8 100644 --- a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java +++ b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java @@ -48,8 +48,8 @@ import org.elasticsearch.xpack.core.ilm.action.ExplainLifecycleAction; import org.elasticsearch.xpack.core.ilm.action.GetLifecycleAction; import org.elasticsearch.xpack.core.ilm.action.GetStatusAction; -import org.elasticsearch.xpack.core.ilm.action.PutLifecycleAction; -import org.elasticsearch.xpack.core.ilm.action.StopILMAction; +import org.elasticsearch.xpack.core.ilm.action.ILMActions; +import org.elasticsearch.xpack.core.ilm.action.PutLifecycleRequest; import org.junit.Before; import java.io.IOException; @@ -153,9 +153,9 @@ public void testSingleNodeCluster() throws Exception { assertThat(exception.getMessage(), containsString("Lifecycle policy not found: [non-existent-policy]")); logger.info("Creating lifecycle [test_lifecycle]"); - PutLifecycleAction.Request putLifecycleRequest = new PutLifecycleAction.Request(lifecyclePolicy); + PutLifecycleRequest putLifecycleRequest = new PutLifecycleRequest(lifecyclePolicy); long lowerBoundModifiedDate = Instant.now().toEpochMilli(); - assertAcked(client().execute(PutLifecycleAction.INSTANCE, putLifecycleRequest).get()); + assertAcked(client().execute(ILMActions.PUT, putLifecycleRequest).get()); long upperBoundModifiedDate = Instant.now().toEpochMilli(); // assert version and modified_date @@ -198,8 +198,8 @@ public void testNoOpPolicyUpdates() throws Exception { phases.put("hot", new Phase("hot", TimeValue.ZERO, Map.of())); LifecyclePolicy policy = new LifecyclePolicy("mypolicy", phases); - PutLifecycleAction.Request putLifecycleRequest = new PutLifecycleAction.Request(policy); - assertAcked(client().execute(PutLifecycleAction.INSTANCE, putLifecycleRequest).get()); + PutLifecycleRequest putLifecycleRequest = new PutLifecycleRequest(policy); + assertAcked(client().execute(ILMActions.PUT, putLifecycleRequest).get()); GetLifecycleAction.Response getLifecycleResponse = client().execute(GetLifecycleAction.INSTANCE, new GetLifecycleAction.Request()) .get(); @@ -209,8 +209,8 @@ public void testNoOpPolicyUpdates() throws Exception { assertThat(responseItem.getVersion(), equalTo(1L)); // Put the same policy in place, which should be a no-op - putLifecycleRequest = new PutLifecycleAction.Request(policy); - assertAcked(client().execute(PutLifecycleAction.INSTANCE, putLifecycleRequest).get()); + putLifecycleRequest = new PutLifecycleRequest(policy); + assertAcked(client().execute(ILMActions.PUT, putLifecycleRequest).get()); getLifecycleResponse = client().execute(GetLifecycleAction.INSTANCE, new GetLifecycleAction.Request()).get(); assertThat(getLifecycleResponse.getPolicies().size(), equalTo(1)); @@ -224,8 +224,8 @@ public void testNoOpPolicyUpdates() throws Exception { newPhases.put("cold", new Phase("cold", TimeValue.timeValueDays(1), Map.of())); policy = new LifecyclePolicy("mypolicy", newPhases); - putLifecycleRequest = new PutLifecycleAction.Request(policy); - assertAcked(client().execute(PutLifecycleAction.INSTANCE, putLifecycleRequest).get()); + putLifecycleRequest = new PutLifecycleRequest(policy); + assertAcked(client().execute(ILMActions.PUT, putLifecycleRequest).get()); getLifecycleResponse = client().execute(GetLifecycleAction.INSTANCE, new GetLifecycleAction.Request()).get(); assertThat(getLifecycleResponse.getPolicies().size(), equalTo(1)); @@ -240,8 +240,8 @@ public void testExplainExecution() throws Exception { logger.info("Starting server1"); internalCluster().startNode(); logger.info("Creating lifecycle [test_lifecycle]"); - PutLifecycleAction.Request putLifecycleRequest = new PutLifecycleAction.Request(lifecyclePolicy); - assertAcked(client().execute(PutLifecycleAction.INSTANCE, putLifecycleRequest).get()); + PutLifecycleRequest putLifecycleRequest = new PutLifecycleRequest(lifecyclePolicy); + assertAcked(client().execute(ILMActions.PUT, putLifecycleRequest).get()); GetLifecycleAction.Response getLifecycleResponse = client().execute(GetLifecycleAction.INSTANCE, new GetLifecycleAction.Request()) .get(); @@ -313,8 +313,8 @@ public void testExplainParseOriginationDate() throws Exception { logger.info("Starting server2"); internalCluster().startNode(); logger.info("Creating lifecycle [test_lifecycle]"); - PutLifecycleAction.Request putLifecycleRequest = new PutLifecycleAction.Request(lifecyclePolicy); - assertAcked(client().execute(PutLifecycleAction.INSTANCE, putLifecycleRequest).get()); + PutLifecycleRequest putLifecycleRequest = new PutLifecycleRequest(lifecyclePolicy); + assertAcked(client().execute(ILMActions.PUT, putLifecycleRequest).get()); GetLifecycleAction.Response getLifecycleResponse = client().execute(GetLifecycleAction.INSTANCE, new GetLifecycleAction.Request()) .get(); @@ -401,8 +401,8 @@ public void testMasterDedicatedDataDedicated() throws Exception { } logger.info("Creating lifecycle [test_lifecycle]"); - PutLifecycleAction.Request putLifecycleRequest = new PutLifecycleAction.Request(lifecyclePolicy); - assertAcked(client().execute(PutLifecycleAction.INSTANCE, putLifecycleRequest).get()); + PutLifecycleRequest putLifecycleRequest = new PutLifecycleRequest(lifecyclePolicy); + assertAcked(client().execute(ILMActions.PUT, putLifecycleRequest).get()); logger.info("Creating index [test]"); CreateIndexResponse createIndexResponse = indicesAdmin().create(new CreateIndexRequest("test").settings(settings)).actionGet(); assertAcked(createIndexResponse); @@ -429,7 +429,7 @@ public void testCreatePolicyWhenStopped() throws Exception { final String server_1 = internalCluster().startNode(); final String node1 = getLocalNodeId(server_1); - assertAcked(client().execute(StopILMAction.INSTANCE, new StopILMRequest()).get()); + assertAcked(client().execute(ILMActions.STOP, new StopILMRequest()).get()); assertBusy(() -> { OperationMode mode = client().execute(GetStatusAction.INSTANCE, new GetStatusAction.Request()).get().getMode(); logger.info("--> waiting for STOPPED, currently: {}", mode); @@ -437,9 +437,9 @@ public void testCreatePolicyWhenStopped() throws Exception { }); logger.info("Creating lifecycle [test_lifecycle]"); - PutLifecycleAction.Request putLifecycleRequest = new PutLifecycleAction.Request(lifecyclePolicy); + PutLifecycleRequest putLifecycleRequest = new PutLifecycleRequest(lifecyclePolicy); long lowerBoundModifiedDate = Instant.now().toEpochMilli(); - assertAcked(client().execute(PutLifecycleAction.INSTANCE, putLifecycleRequest).get()); + assertAcked(client().execute(ILMActions.PUT, putLifecycleRequest).get()); long upperBoundModifiedDate = Instant.now().toEpochMilli(); // assert version and modified_date diff --git a/x-pack/plugin/ilm/src/main/java/module-info.java b/x-pack/plugin/ilm/src/main/java/module-info.java index aa24c2d6f333c..591c9786247e6 100644 --- a/x-pack/plugin/ilm/src/main/java/module-info.java +++ b/x-pack/plugin/ilm/src/main/java/module-info.java @@ -18,4 +18,6 @@ provides org.elasticsearch.reservedstate.ReservedClusterStateHandlerProvider with org.elasticsearch.xpack.ilm.ReservedLifecycleStateHandlerProvider; + + provides org.elasticsearch.features.FeatureSpecification with org.elasticsearch.xpack.ilm.IndexLifecycleFeatures; } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java index 53e4d3de463fd..1b52486f2b5ea 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java @@ -58,12 +58,8 @@ import org.elasticsearch.xpack.core.ilm.action.ExplainLifecycleAction; import org.elasticsearch.xpack.core.ilm.action.GetLifecycleAction; import org.elasticsearch.xpack.core.ilm.action.GetStatusAction; -import org.elasticsearch.xpack.core.ilm.action.MoveToStepAction; -import org.elasticsearch.xpack.core.ilm.action.PutLifecycleAction; +import org.elasticsearch.xpack.core.ilm.action.ILMActions; import org.elasticsearch.xpack.core.ilm.action.RemoveIndexLifecyclePolicyAction; -import org.elasticsearch.xpack.core.ilm.action.RetryAction; -import org.elasticsearch.xpack.core.ilm.action.StartILMAction; -import org.elasticsearch.xpack.core.ilm.action.StopILMAction; import org.elasticsearch.xpack.ilm.action.ReservedLifecycleAction; import org.elasticsearch.xpack.ilm.action.RestDeleteLifecycleAction; import org.elasticsearch.xpack.ilm.action.RestExplainLifecycleAction; @@ -147,6 +143,7 @@ public Collection createComponents(PluginServices services) { ILMHistoryTemplateRegistry ilmTemplateRegistry = new ILMHistoryTemplateRegistry( settings, services.clusterService(), + services.featureService(), services.threadPool(), services.client(), services.xContentRegistry() @@ -291,15 +288,15 @@ public List getRestHandlers( actions.addAll( Arrays.asList( // add ILM actions - new ActionHandler<>(PutLifecycleAction.INSTANCE, TransportPutLifecycleAction.class), + new ActionHandler<>(ILMActions.PUT, TransportPutLifecycleAction.class), new ActionHandler<>(GetLifecycleAction.INSTANCE, TransportGetLifecycleAction.class), new ActionHandler<>(DeleteLifecycleAction.INSTANCE, TransportDeleteLifecycleAction.class), new ActionHandler<>(ExplainLifecycleAction.INSTANCE, TransportExplainLifecycleAction.class), new ActionHandler<>(RemoveIndexLifecyclePolicyAction.INSTANCE, TransportRemoveIndexLifecyclePolicyAction.class), - new ActionHandler<>(MoveToStepAction.INSTANCE, TransportMoveToStepAction.class), - new ActionHandler<>(RetryAction.INSTANCE, TransportRetryAction.class), - new ActionHandler<>(StartILMAction.INSTANCE, TransportStartILMAction.class), - new ActionHandler<>(StopILMAction.INSTANCE, TransportStopILMAction.class), + new ActionHandler<>(ILMActions.MOVE_TO_STEP, TransportMoveToStepAction.class), + new ActionHandler<>(ILMActions.RETRY, TransportRetryAction.class), + new ActionHandler<>(ILMActions.START, TransportStartILMAction.class), + new ActionHandler<>(ILMActions.STOP, TransportStopILMAction.class), new ActionHandler<>(GetStatusAction.INSTANCE, TransportGetStatusAction.class) ) ); diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleFeatures.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleFeatures.java new file mode 100644 index 0000000000000..cc78271e2d878 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleFeatures.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ilm; + +import org.elasticsearch.Version; +import org.elasticsearch.features.FeatureSpecification; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.xpack.ilm.history.ILMHistoryTemplateRegistry; + +import java.util.Map; + +public class IndexLifecycleFeatures implements FeatureSpecification { + @Override + public Map getHistoricalFeatures() { + return Map.of(ILMHistoryTemplateRegistry.MANAGED_BY_DATA_STREAM_LIFECYCLE, Version.V_8_12_0); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/ReservedLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/ReservedLifecycleAction.java index 75f12c7e351e1..947a028e9262e 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/ReservedLifecycleAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/ReservedLifecycleAction.java @@ -16,7 +16,7 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xpack.core.ilm.LifecyclePolicy; -import org.elasticsearch.xpack.core.ilm.action.PutLifecycleAction; +import org.elasticsearch.xpack.core.ilm.action.PutLifecycleRequest; import java.io.IOException; import java.util.ArrayList; @@ -56,12 +56,12 @@ public String name() { } @SuppressWarnings("unchecked") - public Collection prepare(Object input) throws IOException { - List result = new ArrayList<>(); + public Collection prepare(Object input) throws IOException { + List result = new ArrayList<>(); List policies = (List) input; for (var policy : policies) { - PutLifecycleAction.Request request = new PutLifecycleAction.Request(policy); + PutLifecycleRequest request = new PutLifecycleRequest(policy); validate(request); result.add(request); } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestGetLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestGetLifecycleAction.java index b92f603da49b5..f5221ba980440 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestGetLifecycleAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestGetLifecycleAction.java @@ -12,7 +12,7 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import org.elasticsearch.xpack.core.ilm.action.GetLifecycleAction; import java.util.List; @@ -41,7 +41,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient return channel -> new RestCancellableNodeClient(client, restRequest.getHttpChannel()).execute( GetLifecycleAction.INSTANCE, getLifecycleRequest, - new RestChunkedToXContentListener<>(channel) + new RestRefCountedChunkedToXContentListener<>(channel) ); } } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestMigrateToDataTiersAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestMigrateToDataTiersAction.java index 6a37ae708f872..095cb212be558 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestMigrateToDataTiersAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestMigrateToDataTiersAction.java @@ -33,9 +33,14 @@ public List routes() { @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { - MigrateToDataTiersRequest migrateRequest = request.hasContent() - ? MigrateToDataTiersRequest.parse(request.contentParser()) - : new MigrateToDataTiersRequest(); + MigrateToDataTiersRequest migrateRequest; + if (request.hasContent()) { + try (var parser = request.contentParser()) { + migrateRequest = MigrateToDataTiersRequest.parse(parser); + } + } else { + migrateRequest = new MigrateToDataTiersRequest(); + } migrateRequest.setDryRun(request.paramAsBoolean("dry_run", false)); return channel -> client.execute(MigrateToDataTiersAction.INSTANCE, migrateRequest, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestMoveToStepAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestMoveToStepAction.java index e963cade94c81..6740d7a2cd4b4 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestMoveToStepAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestMoveToStepAction.java @@ -13,7 +13,7 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xpack.core.ilm.action.MoveToStepAction; +import org.elasticsearch.xpack.core.ilm.action.ILMActions; import java.io.IOException; import java.util.List; @@ -35,10 +35,12 @@ public String getName() { @Override protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { String index = restRequest.param("name"); - XContentParser parser = restRequest.contentParser(); - MoveToStepAction.Request request = MoveToStepAction.Request.parseRequest(index, parser); + TransportMoveToStepAction.Request request; + try (XContentParser parser = restRequest.contentParser()) { + request = TransportMoveToStepAction.Request.parseRequest(index, parser); + } request.timeout(restRequest.paramAsTime("timeout", request.timeout())); request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); - return channel -> client.execute(MoveToStepAction.INSTANCE, request, new RestToXContentListener<>(channel)); + return channel -> client.execute(ILMActions.MOVE_TO_STEP, request, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestPutLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestPutLifecycleAction.java index f15a244710987..280f3a609e604 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestPutLifecycleAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestPutLifecycleAction.java @@ -12,7 +12,8 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xpack.core.ilm.action.PutLifecycleAction; +import org.elasticsearch.xpack.core.ilm.action.ILMActions; +import org.elasticsearch.xpack.core.ilm.action.PutLifecycleRequest; import java.io.IOException; import java.util.List; @@ -35,11 +36,11 @@ public String getName() { protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { String lifecycleName = restRequest.param("name"); try (XContentParser parser = restRequest.contentParser()) { - PutLifecycleAction.Request putLifecycleRequest = PutLifecycleAction.Request.parseRequest(lifecycleName, parser); + PutLifecycleRequest putLifecycleRequest = PutLifecycleRequest.parseRequest(lifecycleName, parser); putLifecycleRequest.timeout(restRequest.paramAsTime("timeout", putLifecycleRequest.timeout())); putLifecycleRequest.masterNodeTimeout(restRequest.paramAsTime("master_timeout", putLifecycleRequest.masterNodeTimeout())); - return channel -> client.execute(PutLifecycleAction.INSTANCE, putLifecycleRequest, new RestToXContentListener<>(channel)); + return channel -> client.execute(ILMActions.PUT, putLifecycleRequest, new RestToXContentListener<>(channel)); } } } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestRetryAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestRetryAction.java index d26094438d4c5..bfd186ab5b6ba 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestRetryAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestRetryAction.java @@ -14,7 +14,7 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xpack.core.ilm.action.RetryAction; +import org.elasticsearch.xpack.core.ilm.action.ILMActions; import java.util.List; @@ -35,11 +35,11 @@ public String getName() { @Override protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { String[] indices = Strings.splitStringByCommaToArray(restRequest.param("index")); - RetryAction.Request request = new RetryAction.Request(indices); + TransportRetryAction.Request request = new TransportRetryAction.Request(indices); request.timeout(restRequest.paramAsTime("timeout", request.timeout())); request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); request.indices(indices); request.indicesOptions(IndicesOptions.fromRequest(restRequest, IndicesOptions.strictExpandOpen())); - return channel -> client.execute(RetryAction.INSTANCE, request, new RestToXContentListener<>(channel)); + return channel -> client.execute(ILMActions.RETRY, request, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestStartILMAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestStartILMAction.java index cb2b98c7ef907..a130f255f3dfb 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestStartILMAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestStartILMAction.java @@ -12,7 +12,7 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.core.ilm.StartILMRequest; -import org.elasticsearch.xpack.core.ilm.action.StartILMAction; +import org.elasticsearch.xpack.core.ilm.action.ILMActions; import java.util.List; @@ -35,6 +35,6 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient StartILMRequest request = new StartILMRequest(); request.timeout(restRequest.paramAsTime("timeout", request.timeout())); request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); - return channel -> client.execute(StartILMAction.INSTANCE, request, new RestToXContentListener<>(channel)); + return channel -> client.execute(ILMActions.START, request, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestStopAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestStopAction.java index 3b786064f6450..e2bb6c20ab6ec 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestStopAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestStopAction.java @@ -12,7 +12,7 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.core.ilm.StopILMRequest; -import org.elasticsearch.xpack.core.ilm.action.StopILMAction; +import org.elasticsearch.xpack.core.ilm.action.ILMActions; import java.util.List; @@ -35,6 +35,6 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient StopILMRequest request = new StopILMRequest(); request.timeout(restRequest.paramAsTime("timeout", request.timeout())); request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); - return channel -> client.execute(StopILMAction.INSTANCE, request, new RestToXContentListener<>(channel)); + return channel -> client.execute(ILMActions.STOP, request, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportExplainLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportExplainLifecycleAction.java index 90438f5a753ba..e032b300a824e 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportExplainLifecycleAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportExplainLifecycleAction.java @@ -152,7 +152,8 @@ static IndexLifecycleExplainResponse getIndexLifecycleExplainResponse( originationDate != -1L ? originationDate : lifecycleState.lifecycleDate(), lifecycleState.phase(), lifecycleState.action(), - lifecycleState.step(), + // treat a missing policy as if the index is in the error step + indexLifecycleService.policyExists(policyName) == false ? ErrorStep.NAME : lifecycleState.step(), lifecycleState.failedStep(), lifecycleState.isAutoRetryableError(), lifecycleState.failedStepRetryCount(), diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportMoveToStepAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportMoveToStepAction.java index 0774e037fae5a..6061b6db89724 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportMoveToStepAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportMoveToStepAction.java @@ -11,7 +11,9 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; @@ -22,18 +24,30 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.Nullable; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.ilm.Step; -import org.elasticsearch.xpack.core.ilm.action.MoveToStepAction; -import org.elasticsearch.xpack.core.ilm.action.MoveToStepAction.Request; +import org.elasticsearch.xpack.core.ilm.action.ILMActions; import org.elasticsearch.xpack.ilm.IndexLifecycleService; -public class TransportMoveToStepAction extends TransportMasterNodeAction { +import java.io.IOException; +import java.util.Objects; + +public class TransportMoveToStepAction extends TransportMasterNodeAction { private static final Logger logger = LogManager.getLogger(TransportMoveToStepAction.class); IndexLifecycleService indexLifecycleService; @@ -48,7 +62,7 @@ public TransportMoveToStepAction( IndexLifecycleService indexLifecycleService ) { super( - MoveToStepAction.NAME, + ILMActions.MOVE_TO_STEP.name(), transportService, clusterService, threadPool, @@ -172,4 +186,214 @@ private void submitUnbatchedTask(@SuppressWarnings("SameParameterValue") String protected ClusterBlockException checkBlock(Request request, ClusterState state) { return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); } + + public static class Request extends AcknowledgedRequest implements ToXContentObject { + static final ParseField CURRENT_KEY_FIELD = new ParseField("current_step"); + static final ParseField NEXT_KEY_FIELD = new ParseField("next_step"); + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "move_to_step_request", + false, + (a, index) -> { + Step.StepKey currentStepKey = (Step.StepKey) a[0]; + PartialStepKey nextStepKey = (PartialStepKey) a[1]; + return new Request(index, currentStepKey, nextStepKey); + } + ); + + static { + // The current step uses the strict parser (meaning it requires all three parts of a stepkey) + PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, name) -> Step.StepKey.parse(p), CURRENT_KEY_FIELD); + // The target step uses the parser that allows specifying only the phase, or the phase and action + PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, name) -> PartialStepKey.parse(p), NEXT_KEY_FIELD); + } + + private String index; + private Step.StepKey currentStepKey; + private PartialStepKey nextStepKey; + + public Request(String index, Step.StepKey currentStepKey, PartialStepKey nextStepKey) { + this.index = index; + this.currentStepKey = currentStepKey; + this.nextStepKey = nextStepKey; + } + + public Request(StreamInput in) throws IOException { + super(in); + this.index = in.readString(); + this.currentStepKey = Step.StepKey.readFrom(in); + this.nextStepKey = new PartialStepKey(in); + } + + public Request() {} + + public String getIndex() { + return index; + } + + public Step.StepKey getCurrentStepKey() { + return currentStepKey; + } + + public PartialStepKey getNextStepKey() { + return nextStepKey; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + public static Request parseRequest(String name, XContentParser parser) { + return PARSER.apply(parser, name); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(index); + currentStepKey.writeTo(out); + nextStepKey.writeTo(out); + } + + @Override + public int hashCode() { + return Objects.hash(index, currentStepKey, nextStepKey); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(index, other.index) + && Objects.equals(currentStepKey, other.currentStepKey) + && Objects.equals(nextStepKey, other.nextStepKey); + } + + @Override + public String toString() { + return Strings.toString(this); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject() + .field(CURRENT_KEY_FIELD.getPreferredName(), currentStepKey) + .field(NEXT_KEY_FIELD.getPreferredName(), nextStepKey) + .endObject(); + } + + /** + * A PartialStepKey is like a {@link Step.StepKey}, however, the action and step name are optional. + */ + public static class PartialStepKey implements Writeable, ToXContentObject { + private final String phase; + private final String action; + private final String name; + + public static final ParseField PHASE_FIELD = new ParseField("phase"); + public static final ParseField ACTION_FIELD = new ParseField("action"); + public static final ParseField NAME_FIELD = new ParseField("name"); + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "step_specification", + a -> new PartialStepKey((String) a[0], (String) a[1], (String) a[2]) + ); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), PHASE_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), ACTION_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), NAME_FIELD); + } + + public PartialStepKey(String phase, @Nullable String action, @Nullable String name) { + this.phase = phase; + this.action = action; + this.name = name; + if (name != null && action == null) { + throw new IllegalArgumentException( + "phase; phase and action; or phase, action, and step must be provided, " + + "but a step name was specified without a corresponding action" + ); + } + } + + public PartialStepKey(StreamInput in) throws IOException { + this.phase = in.readString(); + this.action = in.readOptionalString(); + this.name = in.readOptionalString(); + if (name != null && action == null) { + throw new IllegalArgumentException( + "phase; phase and action; or phase, action, and step must be provided, " + + "but a step name was specified without a corresponding action" + ); + } + } + + public static PartialStepKey parse(XContentParser parser) { + return PARSER.apply(parser, null); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(phase); + out.writeOptionalString(action); + out.writeOptionalString(name); + } + + @Nullable + public String getPhase() { + return phase; + } + + @Nullable + public String getAction() { + return action; + } + + @Nullable + public String getName() { + return name; + } + + @Override + public int hashCode() { + return Objects.hash(phase, action, name); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + PartialStepKey other = (PartialStepKey) obj; + return Objects.equals(phase, other.phase) && Objects.equals(action, other.action) && Objects.equals(name, other.name); + } + + @Override + public String toString() { + return Strings.toString(this); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(PHASE_FIELD.getPreferredName(), phase); + if (action != null) { + builder.field(ACTION_FIELD.getPreferredName(), action); + } + if (name != null) { + builder.field(NAME_FIELD.getPreferredName(), name); + } + builder.endObject(); + return builder; + } + } + } } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportPutLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportPutLifecycleAction.java index 3b7a242ca5021..26cede5881aa5 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportPutLifecycleAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportPutLifecycleAction.java @@ -40,8 +40,8 @@ import org.elasticsearch.xpack.core.ilm.Phase; import org.elasticsearch.xpack.core.ilm.SearchableSnapshotAction; import org.elasticsearch.xpack.core.ilm.WaitForSnapshotAction; -import org.elasticsearch.xpack.core.ilm.action.PutLifecycleAction; -import org.elasticsearch.xpack.core.ilm.action.PutLifecycleAction.Request; +import org.elasticsearch.xpack.core.ilm.action.ILMActions; +import org.elasticsearch.xpack.core.ilm.action.PutLifecycleRequest; import org.elasticsearch.xpack.core.slm.SnapshotLifecycleMetadata; import java.time.Instant; @@ -61,7 +61,7 @@ * This class is responsible for bootstrapping {@link IndexLifecycleMetadata} into the cluster-state, as well * as adding the desired new policy to be inserted. */ -public class TransportPutLifecycleAction extends TransportMasterNodeAction { +public class TransportPutLifecycleAction extends TransportMasterNodeAction { private static final Logger logger = LogManager.getLogger(TransportPutLifecycleAction.class); private final NamedXContentRegistry xContentRegistry; @@ -80,12 +80,12 @@ public TransportPutLifecycleAction( Client client ) { super( - PutLifecycleAction.NAME, + ILMActions.PUT.name(), transportService, clusterService, threadPool, actionFilters, - Request::new, + PutLifecycleRequest::new, indexNameExpressionResolver, AcknowledgedResponse::readFrom, EsExecutors.DIRECT_EXECUTOR_SERVICE @@ -96,7 +96,12 @@ public TransportPutLifecycleAction( } @Override - protected void masterOperation(Task task, Request request, ClusterState state, ActionListener listener) { + protected void masterOperation( + Task task, + PutLifecycleRequest request, + ClusterState state, + ActionListener listener + ) { // headers from the thread context stored by the AuthenticationService to be shared between the // REST layer and the Transport layer here must be accessed within this thread and not in the // cluster state thread in the ClusterStateUpdateTask below since that thread does not share the @@ -122,7 +127,7 @@ protected void masterOperation(Task task, Request request, ClusterState state, A } public static class UpdateLifecyclePolicyTask extends AckedClusterStateUpdateTask { - private final Request request; + private final PutLifecycleRequest request; private final XPackLicenseState licenseState; private final Map filteredHeaders; private final NamedXContentRegistry xContentRegistry; @@ -130,7 +135,7 @@ public static class UpdateLifecyclePolicyTask extends AckedClusterStateUpdateTas private final boolean verboseLogging; public UpdateLifecyclePolicyTask( - Request request, + PutLifecycleRequest request, ActionListener listener, XPackLicenseState licenseState, Map filteredHeaders, @@ -152,7 +157,12 @@ public UpdateLifecyclePolicyTask( *

    * It disables verbose logging and has no filtered headers. */ - UpdateLifecyclePolicyTask(Request request, XPackLicenseState licenseState, NamedXContentRegistry xContentRegistry, Client client) { + UpdateLifecyclePolicyTask( + PutLifecycleRequest request, + XPackLicenseState licenseState, + NamedXContentRegistry xContentRegistry, + Client client + ) { super(request, null); this.request = request; this.licenseState = licenseState; @@ -308,7 +318,7 @@ private static void validatePrerequisites(LifecyclePolicy policy, ClusterState s } @Override - protected ClusterBlockException checkBlock(Request request, ClusterState state) { + protected ClusterBlockException checkBlock(PutLifecycleRequest request, ClusterState state) { return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); } @@ -318,7 +328,7 @@ public Optional reservedStateHandlerName() { } @Override - public Set modifiedKeys(Request request) { + public Set modifiedKeys(PutLifecycleRequest request) { return Set.of(request.getPolicy().getName()); } } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportRetryAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportRetryAction.java index 4a038551b04e0..5818ce6582bef 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportRetryAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportRetryAction.java @@ -10,7 +10,11 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; @@ -22,18 +26,24 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.LifecycleExecutionState; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ilm.Step.StepKey; -import org.elasticsearch.xpack.core.ilm.action.RetryAction; -import org.elasticsearch.xpack.core.ilm.action.RetryAction.Request; +import org.elasticsearch.xpack.core.ilm.action.ILMActions; import org.elasticsearch.xpack.ilm.IndexLifecycleService; -public class TransportRetryAction extends TransportMasterNodeAction { +import java.io.IOException; +import java.util.Arrays; +import java.util.Objects; + +public class TransportRetryAction extends TransportMasterNodeAction { private static final Logger logger = LogManager.getLogger(TransportRetryAction.class); @@ -49,7 +59,7 @@ public TransportRetryAction( IndexLifecycleService indexLifecycleService ) { super( - RetryAction.NAME, + ILMActions.RETRY.name(), transportService, clusterService, threadPool, @@ -102,4 +112,72 @@ private void submitUnbatchedTask(@SuppressWarnings("SameParameterValue") String protected ClusterBlockException checkBlock(Request request, ClusterState state) { return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); } + + public static class Request extends AcknowledgedRequest implements IndicesRequest.Replaceable { + private String[] indices = Strings.EMPTY_ARRAY; + private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen(); + + public Request(String... indices) { + this.indices = indices; + } + + public Request(StreamInput in) throws IOException { + super(in); + this.indices = in.readStringArray(); + this.indicesOptions = IndicesOptions.readIndicesOptions(in); + } + + public Request() {} + + @Override + public Request indices(String... indices) { + this.indices = indices; + return this; + } + + @Override + public String[] indices() { + return indices; + } + + @Override + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + public Request indicesOptions(IndicesOptions indicesOptions) { + this.indicesOptions = indicesOptions; + return this; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(indices); + indicesOptions.writeIndicesOptions(out); + } + + @Override + public int hashCode() { + return Objects.hash(Arrays.hashCode(indices), indicesOptions); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.deepEquals(indices, other.indices) && Objects.equals(indicesOptions, other.indicesOptions); + } + + } } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportStartILMAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportStartILMAction.java index f725480e2a902..5a0e3d1583066 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportStartILMAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportStartILMAction.java @@ -26,7 +26,7 @@ import org.elasticsearch.xpack.core.ilm.OperationMode; import org.elasticsearch.xpack.core.ilm.OperationModeUpdateTask; import org.elasticsearch.xpack.core.ilm.StartILMRequest; -import org.elasticsearch.xpack.core.ilm.action.StartILMAction; +import org.elasticsearch.xpack.core.ilm.action.ILMActions; public class TransportStartILMAction extends AcknowledgedTransportMasterNodeAction { @@ -39,7 +39,7 @@ public TransportStartILMAction( IndexNameExpressionResolver indexNameExpressionResolver ) { super( - StartILMAction.NAME, + ILMActions.START.name(), transportService, clusterService, threadPool, diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportStopILMAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportStopILMAction.java index af457df806569..20f07e6bf074e 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportStopILMAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportStopILMAction.java @@ -26,7 +26,7 @@ import org.elasticsearch.xpack.core.ilm.OperationMode; import org.elasticsearch.xpack.core.ilm.OperationModeUpdateTask; import org.elasticsearch.xpack.core.ilm.StopILMRequest; -import org.elasticsearch.xpack.core.ilm.action.StopILMAction; +import org.elasticsearch.xpack.core.ilm.action.ILMActions; public class TransportStopILMAction extends AcknowledgedTransportMasterNodeAction { @@ -39,7 +39,7 @@ public TransportStopILMAction( IndexNameExpressionResolver indexNameExpressionResolver ) { super( - StopILMAction.NAME, + ILMActions.STOP.name(), transportService, clusterService, threadPool, diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/history/ILMHistoryTemplateRegistry.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/history/ILMHistoryTemplateRegistry.java index c85ead4aada53..28c28ef6e4c55 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/history/ILMHistoryTemplateRegistry.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/history/ILMHistoryTemplateRegistry.java @@ -8,9 +8,12 @@ package org.elasticsearch.xpack.ilm.history; import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.features.FeatureService; +import org.elasticsearch.features.NodeFeature; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.core.ClientHelper; @@ -35,12 +38,15 @@ public class ILMHistoryTemplateRegistry extends IndexTemplateRegistry { // version 4: add `allow_auto_create` setting // version 5: convert to data stream // version 6: manage by data stream lifecycle - public static final int INDEX_TEMPLATE_VERSION = 6; + // version 7: version the index template name so we can upgrade existing deployments + public static final int INDEX_TEMPLATE_VERSION = 7; + public static final NodeFeature MANAGED_BY_DATA_STREAM_LIFECYCLE = new NodeFeature("ilm-history-managed-by-dsl"); public static final String ILM_TEMPLATE_VERSION_VARIABLE = "xpack.ilm_history.template.version"; - public static final String ILM_TEMPLATE_NAME = "ilm-history"; + public static final String ILM_TEMPLATE_NAME = "ilm-history-" + INDEX_TEMPLATE_VERSION; public static final String ILM_POLICY_NAME = "ilm-history-ilm-policy"; + private final FeatureService featureService; @Override protected boolean requiresMasterNode() { @@ -52,11 +58,13 @@ protected boolean requiresMasterNode() { public ILMHistoryTemplateRegistry( Settings nodeSettings, ClusterService clusterService, + FeatureService featureService, ThreadPool threadPool, Client client, NamedXContentRegistry xContentRegistry ) { super(nodeSettings, clusterService, threadPool, client, xContentRegistry); + this.featureService = featureService; this.ilmHistoryEnabled = LifecycleSettings.LIFECYCLE_HISTORY_INDEX_ENABLED_SETTING.get(nodeSettings); } @@ -96,4 +104,9 @@ protected List getLifecyclePolicies() { protected String getOrigin() { return ClientHelper.INDEX_LIFECYCLE_ORIGIN; } + + @Override + protected boolean isClusterReady(ClusterChangedEvent event) { + return featureService.clusterHasFeature(event.state(), MANAGED_BY_DATA_STREAM_LIFECYCLE); + } } diff --git a/x-pack/plugin/ilm/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification b/x-pack/plugin/ilm/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification new file mode 100644 index 0000000000000..1bf03ae25edd2 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification @@ -0,0 +1,8 @@ +# +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License +# 2.0; you may not use this file except in compliance with the Elastic License +# 2.0. +# + +org.elasticsearch.xpack.ilm.IndexLifecycleFeatures diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/LifecycleOperationSnapshotTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/LifecycleOperationSnapshotTests.java index a6f38b9dc716e..532a8f58b810e 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/LifecycleOperationSnapshotTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/LifecycleOperationSnapshotTests.java @@ -25,8 +25,8 @@ import org.elasticsearch.xpack.core.ilm.ReadOnlyAction; import org.elasticsearch.xpack.core.ilm.StopILMRequest; import org.elasticsearch.xpack.core.ilm.action.GetStatusAction; -import org.elasticsearch.xpack.core.ilm.action.PutLifecycleAction; -import org.elasticsearch.xpack.core.ilm.action.StopILMAction; +import org.elasticsearch.xpack.core.ilm.action.ILMActions; +import org.elasticsearch.xpack.core.ilm.action.PutLifecycleRequest; import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicy; import org.elasticsearch.xpack.core.slm.action.ExecuteSnapshotLifecycleAction; import org.elasticsearch.xpack.core.slm.action.GetSLMStatusAction; @@ -75,8 +75,8 @@ public void testModeSnapshotRestore() throws Exception { ).get(); client().execute( - PutLifecycleAction.INSTANCE, - new PutLifecycleAction.Request( + ILMActions.PUT, + new PutLifecycleRequest( new LifecyclePolicy( "ilm-policy", Map.of("warm", new Phase("warm", TimeValue.timeValueHours(1), Map.of("readonly", new ReadOnlyAction()))) @@ -108,7 +108,7 @@ public void testModeSnapshotRestore() throws Exception { } }); - assertAcked(client().execute(StopILMAction.INSTANCE, new StopILMRequest()).get()); + assertAcked(client().execute(ILMActions.STOP, new StopILMRequest()).get()); assertAcked(client().execute(StopSLMAction.INSTANCE, new StopSLMAction.Request()).get()); assertBusy(() -> assertThat(ilmMode(), equalTo(OperationMode.STOPPED))); assertBusy(() -> assertThat(slmMode(), equalTo(OperationMode.STOPPED))); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/action/MoveToStepRequestTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/MoveToStepRequestTests.java similarity index 59% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/action/MoveToStepRequestTests.java rename to x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/MoveToStepRequestTests.java index bbfc6221a41ea..441e61708e3cc 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/action/MoveToStepRequestTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/MoveToStepRequestTests.java @@ -5,17 +5,16 @@ * 2.0. * */ -package org.elasticsearch.xpack.core.ilm.action; +package org.elasticsearch.xpack.ilm.action; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.AbstractXContentSerializingTestCase; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.ilm.Step.StepKey; import org.elasticsearch.xpack.core.ilm.StepKeyTests; -import org.elasticsearch.xpack.core.ilm.action.MoveToStepAction.Request; import org.junit.Before; -public class MoveToStepRequestTests extends AbstractXContentSerializingTestCase { +public class MoveToStepRequestTests extends AbstractXContentSerializingTestCase { private String index; private static final StepKeyTests stepKeyTests = new StepKeyTests(); @@ -26,25 +25,25 @@ public void setup() { } @Override - protected Request createTestInstance() { - return new Request(index, stepKeyTests.createTestInstance(), randomStepSpecification()); + protected TransportMoveToStepAction.Request createTestInstance() { + return new TransportMoveToStepAction.Request(index, stepKeyTests.createTestInstance(), randomStepSpecification()); } @Override - protected Writeable.Reader instanceReader() { - return Request::new; + protected Writeable.Reader instanceReader() { + return TransportMoveToStepAction.Request::new; } @Override - protected Request doParseInstance(XContentParser parser) { - return Request.parseRequest(index, parser); + protected TransportMoveToStepAction.Request doParseInstance(XContentParser parser) { + return TransportMoveToStepAction.Request.parseRequest(index, parser); } @Override - protected Request mutateInstance(Request request) { + protected TransportMoveToStepAction.Request mutateInstance(TransportMoveToStepAction.Request request) { String indexName = request.getIndex(); StepKey currentStepKey = request.getCurrentStepKey(); - Request.PartialStepKey nextStepKey = request.getNextStepKey(); + TransportMoveToStepAction.Request.PartialStepKey nextStepKey = request.getNextStepKey(); switch (between(0, 2)) { case 0 -> indexName += randomAlphaOfLength(5); @@ -53,18 +52,18 @@ protected Request mutateInstance(Request request) { default -> throw new AssertionError("Illegal randomisation branch"); } - return new Request(indexName, currentStepKey, nextStepKey); + return new TransportMoveToStepAction.Request(indexName, currentStepKey, nextStepKey); } - private static Request.PartialStepKey randomStepSpecification() { + private static TransportMoveToStepAction.Request.PartialStepKey randomStepSpecification() { if (randomBoolean()) { StepKey key = stepKeyTests.createTestInstance(); - return new Request.PartialStepKey(key.phase(), key.action(), key.name()); + return new TransportMoveToStepAction.Request.PartialStepKey(key.phase(), key.action(), key.name()); } else { String phase = randomAlphaOfLength(10); String action = randomBoolean() ? null : randomAlphaOfLength(6); String name = action == null ? null : (randomBoolean() ? null : randomAlphaOfLength(6)); - return new Request.PartialStepKey(phase, action, name); + return new TransportMoveToStepAction.Request.PartialStepKey(phase, action, name); } } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/action/RetryRequestTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/RetryRequestTests.java similarity index 80% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/action/RetryRequestTests.java rename to x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/RetryRequestTests.java index fb1c4fb40bf6d..e4f3c58fe6e66 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/action/RetryRequestTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/RetryRequestTests.java @@ -5,20 +5,19 @@ * 2.0. * */ -package org.elasticsearch.xpack.core.ilm.action; +package org.elasticsearch.xpack.ilm.action; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.AbstractWireSerializingTestCase; -import org.elasticsearch.xpack.core.ilm.action.RetryAction.Request; import java.util.Arrays; -public class RetryRequestTests extends AbstractWireSerializingTestCase { +public class RetryRequestTests extends AbstractWireSerializingTestCase { @Override - protected Request createTestInstance() { - Request request = new Request(); + protected TransportRetryAction.Request createTestInstance() { + TransportRetryAction.Request request = new TransportRetryAction.Request(); if (randomBoolean()) { request.indices(generateRandomStringArray(20, 20, false)); } @@ -39,12 +38,12 @@ protected Request createTestInstance() { } @Override - protected Writeable.Reader instanceReader() { - return Request::new; + protected Writeable.Reader instanceReader() { + return TransportRetryAction.Request::new; } @Override - protected Request mutateInstance(Request instance) { + protected TransportRetryAction.Request mutateInstance(TransportRetryAction.Request instance) { String[] indices = instance.indices(); IndicesOptions indicesOptions = instance.indicesOptions(); switch (between(0, 1)) { @@ -67,7 +66,7 @@ protected Request mutateInstance(Request instance) { ); default -> throw new AssertionError("Illegal randomisation branch"); } - Request newRequest = new Request(); + TransportRetryAction.Request newRequest = new TransportRetryAction.Request(); newRequest.indices(indices); newRequest.indicesOptions(indicesOptions); return newRequest; diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/TransportExplainLifecycleActionTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/TransportExplainLifecycleActionTests.java index 87580a8165d61..246d2bcf21205 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/TransportExplainLifecycleActionTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/TransportExplainLifecycleActionTests.java @@ -158,6 +158,7 @@ public void testGetIndexLifecycleExplainResponse() throws IOException { ); assertThat(onlyErrorsResponse, notNullValue()); assertThat(onlyErrorsResponse.getPolicyName(), is("random-policy")); + assertThat(onlyErrorsResponse.getStep(), is(ErrorStep.NAME)); } { diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/TransportPutLifecycleActionTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/TransportPutLifecycleActionTests.java index 8467e46630876..e69e91192cf13 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/TransportPutLifecycleActionTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/TransportPutLifecycleActionTests.java @@ -22,7 +22,7 @@ import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.ilm.LifecyclePolicy; import org.elasticsearch.xpack.core.ilm.LifecyclePolicyMetadata; -import org.elasticsearch.xpack.core.ilm.action.PutLifecycleAction; +import org.elasticsearch.xpack.core.ilm.action.PutLifecycleRequest; import org.elasticsearch.xpack.ilm.LifecyclePolicyTestsUtils; import java.util.Map; @@ -75,7 +75,7 @@ public void testReservedStateHandler() throws Exception { }"""; try (XContentParser parser = XContentType.JSON.xContent().createParser(XContentParserConfiguration.EMPTY, json)) { - PutLifecycleAction.Request request = PutLifecycleAction.Request.parseRequest("my_timeseries_lifecycle2", parser); + PutLifecycleRequest request = PutLifecycleRequest.parseRequest("my_timeseries_lifecycle2", parser); assertThat(putAction.modifiedKeys(request), containsInAnyOrder("my_timeseries_lifecycle2")); } diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/TransportStopILMActionTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/TransportStopILMActionTests.java index 2c19a79ac5af0..69ff6215aea01 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/TransportStopILMActionTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/TransportStopILMActionTests.java @@ -21,7 +21,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ilm.StopILMRequest; -import org.elasticsearch.xpack.core.ilm.action.StopILMAction; +import org.elasticsearch.xpack.core.ilm.action.ILMActions; import org.mockito.ArgumentMatcher; import static java.util.Collections.emptyMap; @@ -47,7 +47,7 @@ public void testStopILMClusterStatePriorityIsImmediate() { Task task = new Task( randomLong(), "transport", - StopILMAction.NAME, + ILMActions.STOP.name(), "description", new TaskId(randomLong() + ":" + randomLong()), emptyMap() diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/history/ILMHistoryStoreTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/history/ILMHistoryStoreTests.java index 6ac3a4522fb3d..8675c27325b4b 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/history/ILMHistoryStoreTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/history/ILMHistoryStoreTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ClusterServiceUtils; @@ -39,6 +40,7 @@ import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xpack.ilm.IndexLifecycleFeatures; import org.hamcrest.Matchers; import org.junit.After; import org.junit.Before; @@ -53,6 +55,9 @@ import static org.elasticsearch.xpack.core.ilm.LifecycleSettings.LIFECYCLE_HISTORY_INDEX_ENABLED_SETTING; import static org.elasticsearch.xpack.ilm.history.ILMHistoryStore.ILM_HISTORY_DATA_STREAM; +import static org.elasticsearch.xpack.ilm.history.ILMHistoryTemplateRegistry.ILM_TEMPLATE_NAME; +import static org.elasticsearch.xpack.ilm.history.ILMHistoryTemplateRegistry.INDEX_TEMPLATE_VERSION; +import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -75,6 +80,7 @@ public void setup() { ILMHistoryTemplateRegistry registry = new ILMHistoryTemplateRegistry( clusterService.getSettings(), clusterService, + new FeatureService(List.of(new IndexLifecycleFeatures())), threadPool, client, NamedXContentRegistry.EMPTY @@ -284,6 +290,10 @@ public void onFailure(Exception e) { } } + public void testTemplateNameIsVersioned() { + assertThat(ILM_TEMPLATE_NAME, endsWith("-" + INDEX_TEMPLATE_VERSION)); + } + /** * A client that delegates to a verifying function for action/request/listener */ diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportPutInferenceModelAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportPutInferenceModelAction.java index d8f5b9424b162..142e071c9a133 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportPutInferenceModelAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportPutInferenceModelAction.java @@ -36,6 +36,9 @@ import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xpack.core.inference.action.PutInferenceModelAction; import org.elasticsearch.xpack.core.ml.MachineLearningField; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentUtils; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.ml.utils.MlPlatformArchitecturesUtil; import org.elasticsearch.xpack.inference.InferencePlugin; import org.elasticsearch.xpack.inference.registry.ModelRegistry; @@ -128,6 +131,14 @@ protected void masterOperation( return; } + var assignments = TrainedModelAssignmentUtils.modelAssignments(request.getModelId(), clusterService.state()); + if ((assignments == null || assignments.isEmpty()) == false) { + listener.onFailure( + ExceptionsHelper.badRequestException(Messages.MODEL_ID_MATCHES_EXISTING_MODEL_IDS_BUT_MUST_NOT, request.getModelId()) + ); + return; + } + if (service.get().isInClusterService()) { // Find the cluster platform as the service may need that // information when creating the model diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/RestInferenceAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/RestInferenceAction.java index 761c0b1f069a1..f04230e7697dc 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/RestInferenceAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/RestInferenceAction.java @@ -33,8 +33,9 @@ public List routes() { protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { String taskType = restRequest.param("task_type"); String modelId = restRequest.param("model_id"); - var request = InferenceAction.Request.parseRequest(modelId, taskType, restRequest.contentParser()); - - return channel -> client.execute(InferenceAction.INSTANCE, request, new RestToXContentListener<>(channel)); + try (var parser = restRequest.contentParser()) { + var request = InferenceAction.Request.parseRequest(modelId, taskType, parser); + return channel -> client.execute(InferenceAction.INSTANCE, request, new RestToXContentListener<>(channel)); + } } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/registry/ModelRegistryTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/registry/ModelRegistryTests.java index b7d491bf54ddc..a8ea237ba8b0c 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/registry/ModelRegistryTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/registry/ModelRegistryTests.java @@ -63,7 +63,7 @@ public void tearDownThreadPool() { public void testGetUnparsedModelMap_ThrowsResourceNotFound_WhenNoHitsReturned() { var client = mockClient(); - mockClientExecuteSearch(client, mockSearchResponse(new SearchHit[0])); + mockClientExecuteSearch(client, mockSearchResponse(SearchHits.EMPTY)); var registry = new ModelRegistry(client); diff --git a/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/action/TransportGetPipelineActionTests.java b/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/action/TransportGetPipelineActionTests.java index d93c24356422f..f75dd2926059a 100644 --- a/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/action/TransportGetPipelineActionTests.java +++ b/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/action/TransportGetPipelineActionTests.java @@ -30,7 +30,6 @@ import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchShardTarget; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.test.MockUtils; @@ -116,9 +115,14 @@ public void onFailure(Exception e) { * Test that the explicit and wildcard IDs are requested. */ public void testGetPipelinesByExplicitAndWildcardIds() { - InternalSearchResponse internalSearchResponse = new InternalSearchResponse(prepareSearchHits(), null, null, null, false, null, 1); SearchResponse searchResponse = new SearchResponse( - internalSearchResponse, + prepareSearchHits(), + null, + null, + false, + null, + null, + 1, null, 1, 1, diff --git a/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java b/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java index ebe25ea1da1d9..cc819c353f69c 100644 --- a/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java +++ b/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java @@ -36,6 +36,7 @@ import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperBuilderContext; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.SourceLoader; @@ -308,9 +309,9 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio } if (fieldType().value == null) { - Builder update = new Builder(simpleName()); - update.value.setValue(value); - context.addDynamicMapper(fieldType().name(), update); + ConstantKeywordFieldType newFieldType = new ConstantKeywordFieldType(fieldType().name(), value, fieldType().meta()); + Mapper update = new ConstantKeywordFieldMapper(simpleName(), newFieldType); + context.addDynamicMapper(update); } else if (Objects.equals(fieldType().value, value) == false) { throw new IllegalArgumentException( "[constant_keyword] field [" diff --git a/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapperTests.java b/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapperTests.java index 1bd591a827059..e2cf8c3014604 100644 --- a/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapperTests.java +++ b/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapperTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.index.mapper.BlockLoader; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentParsingException; +import org.elasticsearch.index.mapper.FieldNamesFieldMapper; import org.elasticsearch.index.mapper.LuceneDocument; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperParsingException; @@ -253,6 +254,11 @@ public Set sourcePaths(String name) { public String parentField(String field) { throw new UnsupportedOperationException(); } + + @Override + public FieldNamesFieldMapper.FieldNamesFieldType fieldNames() { + return FieldNamesFieldMapper.FieldNamesFieldType.get(true); + } }); try (Directory directory = newDirectory()) { RandomIndexWriter iw = new RandomIndexWriter(random(), directory); @@ -303,7 +309,7 @@ protected IngestScriptSupport ingestScriptSupport() { } @Override - protected Function loadBlockExpected() { + protected Function loadBlockExpected(MapperService mapper, String loaderFieldName) { return v -> ((BytesRef) v).utf8ToString(); } diff --git a/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordFieldMapper.java b/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordFieldMapper.java index d04bb88325cc7..ad5e224efd5db 100644 --- a/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordFieldMapper.java +++ b/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordFieldMapper.java @@ -78,16 +78,24 @@ public class CountedKeywordFieldMapper extends FieldMapper { public static final String CONTENT_TYPE = "counted_keyword"; public static final String COUNT_FIELD_NAME_SUFFIX = "_count"; - public static final FieldType FIELD_TYPE; + private static final FieldType FIELD_TYPE_INDEXED; + private static final FieldType FIELD_TYPE_NOT_INDEXED; static { - FieldType ft = new FieldType(); - ft.setDocValuesType(DocValuesType.SORTED_SET); - ft.setTokenized(false); - ft.setOmitNorms(true); - ft.setIndexOptions(IndexOptions.DOCS); - ft.freeze(); - FIELD_TYPE = freezeAndDeduplicateFieldType(ft); + FieldType indexed = new FieldType(); + indexed.setDocValuesType(DocValuesType.SORTED_SET); + indexed.setTokenized(false); + indexed.setOmitNorms(true); + indexed.setIndexOptions(IndexOptions.DOCS); + FIELD_TYPE_INDEXED = freezeAndDeduplicateFieldType(indexed); + + FieldType notIndexed = new FieldType(); + notIndexed.setDocValuesType(DocValuesType.SORTED_SET); + notIndexed.setTokenized(false); + notIndexed.setOmitNorms(true); + notIndexed.setIndexOptions(IndexOptions.NONE); + FIELD_TYPE_NOT_INDEXED = freezeAndDeduplicateFieldType(notIndexed); + } private static class CountedKeywordFieldType extends StringFieldType { @@ -261,7 +269,12 @@ public TermsEnum termsEnum() throws IOException { } } + private static CountedKeywordFieldMapper toType(FieldMapper in) { + return (CountedKeywordFieldMapper) in; + } + public static class Builder extends FieldMapper.Builder { + private final Parameter indexed = Parameter.indexParam(m -> toType(m).mappedFieldType.isIndexed(), true); private final Parameter> meta = Parameter.metaParam(); protected Builder(String name) { @@ -270,22 +283,24 @@ protected Builder(String name) { @Override protected Parameter[] getParameters() { - return new Parameter[] { meta }; + return new Parameter[] { meta, indexed }; } @Override public FieldMapper build(MapperBuilderContext context) { BinaryFieldMapper countFieldMapper = new BinaryFieldMapper.Builder(name + COUNT_FIELD_NAME_SUFFIX, true).build(context); + boolean isIndexed = indexed.getValue(); + FieldType ft = isIndexed ? FIELD_TYPE_INDEXED : FIELD_TYPE_NOT_INDEXED; return new CountedKeywordFieldMapper( name, - FIELD_TYPE, + ft, new CountedKeywordFieldType( context.buildFullName(name), - true, + isIndexed, false, true, - new TextSearchInfo(FIELD_TYPE, null, KEYWORD_ANALYZER, KEYWORD_ANALYZER), + new TextSearchInfo(ft, null, KEYWORD_ANALYZER, KEYWORD_ANALYZER), meta.getValue(), countFieldMapper.fieldType() ), diff --git a/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregationBuilder.java b/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregationBuilder.java index 72e3eb4efacf9..31be7f149831d 100644 --- a/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregationBuilder.java +++ b/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregationBuilder.java @@ -73,6 +73,11 @@ public static void registerAggregators(ValuesSourceRegistry.Builder builder) { CountedTermsAggregatorFactory.registerAggregators(builder); } + @Override + public boolean supportsSampling() { + return true; + } + public CountedTermsAggregationBuilder size(int size) { if (size <= 0) { throw new IllegalArgumentException("[size] must be greater than 0. Found [" + size + "] in [" + name + "]"); diff --git a/x-pack/plugin/mapper-counted-keyword/src/test/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordFieldMapperTests.java b/x-pack/plugin/mapper-counted-keyword/src/test/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordFieldMapperTests.java index 1468ed456b132..2ffd4468c814a 100644 --- a/x-pack/plugin/mapper-counted-keyword/src/test/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordFieldMapperTests.java +++ b/x-pack/plugin/mapper-counted-keyword/src/test/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordFieldMapperTests.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.countedkeyword; +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MappedFieldType; @@ -82,4 +84,15 @@ public void testDottedFieldNames() throws IOException { List fields = doc.rootDoc().getFields("dotted.field"); assertEquals(1, fields.size()); } + + public void testDisableIndex() throws IOException { + DocumentMapper mapper = createDocumentMapper( + fieldMapping(b -> b.field("type", CountedKeywordFieldMapper.CONTENT_TYPE).field("index", false)) + ); + ParsedDocument doc = mapper.parse(source(b -> b.field("field", "1234"))); + List fields = doc.rootDoc().getFields("field"); + assertEquals(1, fields.size()); + assertEquals(IndexOptions.NONE, fields.get(0).fieldType().indexOptions()); + assertEquals(DocValuesType.SORTED_SET, fields.get(0).fieldType().docValuesType()); + } } diff --git a/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java b/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java index 97ffd50d5b8c3..e8fdf7e0205da 100644 --- a/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java +++ b/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java @@ -324,7 +324,7 @@ public BlockLoader blockLoader(BlockLoaderContext blContext) { if (hasDocValues()) { return new BlockDocValuesReader.LongsBlockLoader(name()); } - return new BlockSourceReader.LongsBlockLoader(new SourceValueFetcher(blContext.sourcePaths(name()), nullValueFormatted) { + ValueFetcher valueFetcher = new SourceValueFetcher(blContext.sourcePaths(name()), nullValueFormatted) { @Override protected Object parseSourceValue(Object value) { if (value.equals("")) { @@ -332,7 +332,11 @@ protected Object parseSourceValue(Object value) { } return parseUnsignedLong(value); } - }); + }; + BlockSourceReader.LeafIteratorLookup lookup = isStored() || isIndexed() + ? BlockSourceReader.lookupFromFieldNames(blContext.fieldNames(), name()) + : BlockSourceReader.lookupMatchingAll(); + return new BlockSourceReader.LongsBlockLoader(valueFetcher, lookup); } @Override diff --git a/x-pack/plugin/mapper-unsigned-long/src/test/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapperTests.java b/x-pack/plugin/mapper-unsigned-long/src/test/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapperTests.java index 95fe8f0a530ba..cd8fdf7f89fbd 100644 --- a/x-pack/plugin/mapper-unsigned-long/src/test/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapperTests.java +++ b/x-pack/plugin/mapper-unsigned-long/src/test/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapperTests.java @@ -367,7 +367,7 @@ protected IngestScriptSupport ingestScriptSupport() { } @Override - protected Function loadBlockExpected() { + protected Function loadBlockExpected(MapperService mapper, String loaderFieldName) { return v -> { // Numbers are in the block as a long but the test needs to compare them to their BigInteger value parsed from xcontent. if (v instanceof BigInteger ul) { diff --git a/x-pack/plugin/mapper-version/src/test/java/org/elasticsearch/xpack/versionfield/VersionStringFieldMapperTests.java b/x-pack/plugin/mapper-version/src/test/java/org/elasticsearch/xpack/versionfield/VersionStringFieldMapperTests.java index 5653ed7f4302f..376263d5cfc99 100644 --- a/x-pack/plugin/mapper-version/src/test/java/org/elasticsearch/xpack/versionfield/VersionStringFieldMapperTests.java +++ b/x-pack/plugin/mapper-version/src/test/java/org/elasticsearch/xpack/versionfield/VersionStringFieldMapperTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentParsingException; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperTestCase; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SourceToParse; @@ -188,7 +189,7 @@ protected SyntheticSourceSupport syntheticSourceSupport(boolean ignoreMalformed) } @Override - protected Function loadBlockExpected() { + protected Function loadBlockExpected(MapperService mapper, String loaderFieldName) { return v -> new Version((BytesRef) v).toString(); } diff --git a/x-pack/plugin/mapper-version/src/test/java/org/elasticsearch/xpack/versionfield/VersionStringFieldTests.java b/x-pack/plugin/mapper-version/src/test/java/org/elasticsearch/xpack/versionfield/VersionStringFieldTests.java index f41cc145831cf..94d8a144b0bd6 100644 --- a/x-pack/plugin/mapper-version/src/test/java/org/elasticsearch/xpack/versionfield/VersionStringFieldTests.java +++ b/x-pack/plugin/mapper-version/src/test/java/org/elasticsearch/xpack/versionfield/VersionStringFieldTests.java @@ -17,9 +17,9 @@ import org.elasticsearch.search.aggregations.metrics.Cardinality; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESSingleNodeTestCase; -import org.elasticsearch.xpack.analytics.AnalyticsAggregationBuilders; import org.elasticsearch.xpack.analytics.AnalyticsPlugin; import org.elasticsearch.xpack.analytics.stringstats.InternalStringStats; +import org.elasticsearch.xpack.analytics.stringstats.StringStatsAggregationBuilder; import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; import java.io.IOException; @@ -415,7 +415,7 @@ public void testAggs() throws Exception { // string stats assertResponse( - client().prepareSearch(indexName).addAggregation(AnalyticsAggregationBuilders.stringStats("stats").field("version")), + client().prepareSearch(indexName).addAggregation(new StringStatsAggregationBuilder("stats").field("version")), response -> { InternalStringStats stats = response.getAggregations().get("stats"); assertEquals(3, stats.getMinLength()); diff --git a/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/ModelLoaderUtils.java b/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/ModelLoaderUtils.java index 43ab090e94381..2f3f9cbf3f32c 100644 --- a/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/ModelLoaderUtils.java +++ b/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/ModelLoaderUtils.java @@ -130,9 +130,13 @@ static VocabularyParts loadVocabulary(URI uri) { // visible for testing static VocabularyParts parseVocabParts(InputStream vocabInputStream) throws IOException { - XContentParser sourceParser = XContentType.JSON.xContent() - .createParser(XContentParserConfiguration.EMPTY, Streams.limitStream(vocabInputStream, VOCABULARY_SIZE_LIMIT.getBytes())); - Map> vocabParts = sourceParser.map(HashMap::new, XContentParser::list); + Map> vocabParts; + try ( + XContentParser sourceParser = XContentType.JSON.xContent() + .createParser(XContentParserConfiguration.EMPTY, Streams.limitStream(vocabInputStream, VOCABULARY_SIZE_LIMIT.getBytes())) + ) { + vocabParts = sourceParser.map(HashMap::new, XContentParser::list); + } List vocabulary = vocabParts.containsKey(VOCABULARY) ? vocabParts.get(VOCABULARY).stream().map(Object::toString).collect(Collectors.toList()) diff --git a/x-pack/plugin/ml/build.gradle b/x-pack/plugin/ml/build.gradle index d4c53f3f58c31..3e716f93ac949 100644 --- a/x-pack/plugin/ml/build.gradle +++ b/x-pack/plugin/ml/build.gradle @@ -1,5 +1,6 @@ import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.internal.dra.DraResolvePlugin +import org.elasticsearch.gradle.internal.info.BuildParams apply plugin: 'elasticsearch.internal-es-plugin' apply plugin: 'elasticsearch.internal-cluster-test' @@ -74,6 +75,7 @@ esplugin.bundleSpec.exclude 'platform/licenses/**' dependencies { compileOnly project(":server") + testImplementation project(path: ':x-pack:plugin:inference') compileOnly project(':modules:lang-painless:spi') compileOnly project(path: xpackModule('core')) compileOnly project(path: xpackModule('autoscaling')) @@ -114,6 +116,12 @@ artifacts { archives tasks.named("jar") } +if (BuildParams.isSnapshotBuild() == false) { + tasks.named("test").configure { + systemProperty 'es.semantic_text_feature_flag_enabled', 'true' + } +} + tasks.register("extractNativeLicenses", Copy) { dependsOn configurations.nativeBundle into "${buildDir}/extractedNativeLicenses" diff --git a/x-pack/plugin/ml/qa/ml-inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/CoordinatedInferenceIngestIT.java b/x-pack/plugin/ml/qa/ml-inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/CoordinatedInferenceIngestIT.java index c4c3ee016be0e..4d90d2a186858 100644 --- a/x-pack/plugin/ml/qa/ml-inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/CoordinatedInferenceIngestIT.java +++ b/x-pack/plugin/ml/qa/ml-inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/CoordinatedInferenceIngestIT.java @@ -8,48 +8,19 @@ package org.elasticsearch.xpack.ml.integration; import org.elasticsearch.client.Request; -import org.elasticsearch.common.settings.SecureString; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Strings; import org.elasticsearch.inference.TaskType; -import org.elasticsearch.test.cluster.ElasticsearchCluster; -import org.elasticsearch.test.cluster.local.distribution.DistributionType; -import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xpack.core.ml.utils.MapHelper; -import org.junit.ClassRule; import java.io.IOException; -import java.util.ArrayList; import java.util.List; import java.util.Map; -import java.util.stream.Collectors; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.hasSize; -public class CoordinatedInferenceIngestIT extends ESRestTestCase { - - @ClassRule - public static ElasticsearchCluster cluster = ElasticsearchCluster.local() - .distribution(DistributionType.DEFAULT) - .setting("xpack.license.self_generated.type", "trial") - .setting("xpack.security.enabled", "true") - .plugin("org.elasticsearch.xpack.inference.mock.TestInferenceServicePlugin") - .user("x_pack_rest_user", "x-pack-test-password") - .build(); - - @Override - protected String getTestRestCluster() { - return cluster.getHttpAddresses(); - } - - @Override - protected Settings restClientSettings() { - String token = basicAuthHeaderValue("x_pack_rest_user", new SecureString("x-pack-test-password".toCharArray())); - return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); - } +public class CoordinatedInferenceIngestIT extends InferenceBaseRestTest { @SuppressWarnings("unchecked") public void testIngestWithMultipleModelTypes() throws IOException { @@ -60,10 +31,10 @@ public void testIngestWithMultipleModelTypes() throws IOException { putInferenceServiceModel(inferenceServiceModelId, TaskType.SPARSE_EMBEDDING); putBoostedTreeRegressionModel(boostedTreeModelId); - putPyTorchModel(pyTorchModelId); - putPyTorchModelDefinition(pyTorchModelId); - putPyTorchModelVocabulary(List.of("these", "are", "my", "words"), pyTorchModelId); - startDeployment(pyTorchModelId); + putPyTorchModelTrainedModels(pyTorchModelId); + putPyTorchModelDefinitionTrainedModels(pyTorchModelId); + putPyTorchModelVocabularyTrainedModels(List.of("these", "are", "my", "words"), pyTorchModelId); + startDeploymentTrainedModels(pyTorchModelId); String docs = """ [ @@ -139,10 +110,10 @@ public void testPipelineConfiguredWithFieldMap() throws IOException { putInferenceServiceModel(inferenceServiceModelId, TaskType.SPARSE_EMBEDDING); putBoostedTreeRegressionModel(boostedTreeModelId); - putPyTorchModel(pyTorchModelId); - putPyTorchModelDefinition(pyTorchModelId); - putPyTorchModelVocabulary(List.of("these", "are", "my", "words"), pyTorchModelId); - startDeployment(pyTorchModelId); + putPyTorchModelTrainedModels(pyTorchModelId); + putPyTorchModelDefinitionTrainedModels(pyTorchModelId); + putPyTorchModelVocabularyTrainedModels(List.of("these", "are", "my", "words"), pyTorchModelId); + startDeploymentTrainedModels(pyTorchModelId); String docs = """ [ @@ -189,9 +160,9 @@ public void testPipelineConfiguredWithFieldMap() throws IOException { public void testWithUndeployedPyTorchModel() throws IOException { var pyTorchModelId = "test-undeployed"; - putPyTorchModel(pyTorchModelId); - putPyTorchModelDefinition(pyTorchModelId); - putPyTorchModelVocabulary(List.of("these", "are", "my", "words"), pyTorchModelId); + putPyTorchModelTrainedModels(pyTorchModelId); + putPyTorchModelDefinitionTrainedModels(pyTorchModelId); + putPyTorchModelVocabularyTrainedModels(List.of("these", "are", "my", "words"), pyTorchModelId); String docs = """ [ @@ -230,36 +201,6 @@ public void testWithUndeployedPyTorchModel() throws IOException { } } - private Map putInferenceServiceModel(String modelId, TaskType taskType) throws IOException { - String endpoint = org.elasticsearch.common.Strings.format("_inference/%s/%s", taskType, modelId); - var request = new Request("PUT", endpoint); - var modelConfig = ExampleModels.mockServiceModelConfig(); - request.setJsonEntity(modelConfig); - var response = client().performRequest(request); - return entityAsMap(response); - } - - private void putPyTorchModel(String modelId) throws IOException { - Request request = new Request("PUT", "_ml/trained_models/" + modelId); - var modelConfiguration = ExampleModels.pytorchPassThroughModelConfig(); - request.setJsonEntity(modelConfiguration); - client().performRequest(request); - } - - protected void putPyTorchModelVocabulary(List vocabulary, String modelId) throws IOException { - List vocabularyWithPad = new ArrayList<>(); - vocabularyWithPad.add("[PAD]"); - vocabularyWithPad.add("[UNK]"); - vocabularyWithPad.addAll(vocabulary); - String quotedWords = vocabularyWithPad.stream().map(s -> "\"" + s + "\"").collect(Collectors.joining(",")); - - Request request = new Request("PUT", "_ml/trained_models/" + modelId + "/vocabulary"); - request.setJsonEntity(Strings.format(""" - { "vocabulary": [%s] } - """, quotedWords)); - client().performRequest(request); - } - protected Map simulatePipeline(String pipelineDef, String docs) throws IOException { String simulate = Strings.format(""" { @@ -272,27 +213,6 @@ protected Map simulatePipeline(String pipelineDef, String docs) return entityAsMap(client().performRequest(request)); } - protected void putPyTorchModelDefinition(String modelId) throws IOException { - Request request = new Request("PUT", "_ml/trained_models/" + modelId + "/definition/0"); - String body = Strings.format( - """ - {"total_definition_length":%s,"definition": "%s","total_parts": 1}""", - ExampleModels.RAW_PYTORCH_MODEL_SIZE, - ExampleModels.BASE_64_ENCODED_PYTORCH_MODEL - ); - request.setJsonEntity(body); - client().performRequest(request); - } - - protected void startDeployment(String modelId) throws IOException { - String endPoint = "/_ml/trained_models/" - + modelId - + "/deployment/_start?timeout=40s&wait_for=started&threads_per_allocation=1&number_of_allocations=1"; - - Request request = new Request("POST", endPoint); - client().performRequest(request); - } - private void putBoostedTreeRegressionModel(String modelId) throws IOException { Request request = new Request("PUT", "_ml/trained_models/" + modelId); var modelConfiguration = ExampleModels.boostedTreeRegressionModel(); @@ -300,7 +220,7 @@ private void putBoostedTreeRegressionModel(String modelId) throws IOException { client().performRequest(request); } - public Map getModel(String modelId, TaskType taskType) throws IOException { + public Map getModelInference(String modelId, TaskType taskType) throws IOException { var endpoint = org.elasticsearch.common.Strings.format("_inference/%s/%s", taskType, modelId); var request = new Request("GET", endpoint); var reponse = client().performRequest(request); diff --git a/x-pack/plugin/ml/qa/ml-inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/InferenceBaseRestTest.java b/x-pack/plugin/ml/qa/ml-inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/InferenceBaseRestTest.java new file mode 100644 index 0000000000000..51838dba082b9 --- /dev/null +++ b/x-pack/plugin/ml/qa/ml-inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/InferenceBaseRestTest.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.integration; + +import org.elasticsearch.client.Request; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.core.Strings; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.junit.ClassRule; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +public class InferenceBaseRestTest extends ESRestTestCase { + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .setting("xpack.license.self_generated.type", "trial") + .setting("xpack.security.enabled", "true") + .plugin("org.elasticsearch.xpack.inference.mock.TestInferenceServicePlugin") + .user("x_pack_rest_user", "x-pack-test-password") + .build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + @Override + protected Settings restClientSettings() { + String token = basicAuthHeaderValue("x_pack_rest_user", new SecureString("x-pack-test-password".toCharArray())); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); + } + + protected Map putInferenceServiceModel(String modelId, TaskType taskType) throws IOException { + String endpoint = org.elasticsearch.common.Strings.format("_inference/%s/%s", taskType, modelId); + var request = new Request("PUT", endpoint); + var modelConfig = ExampleModels.mockServiceModelConfig(); + request.setJsonEntity(modelConfig); + var response = client().performRequest(request); + return entityAsMap(response); + } + + protected void putPyTorchModelTrainedModels(String modelId) throws IOException { + Request request = new Request("PUT", "_ml/trained_models/" + modelId); + var modelConfiguration = ExampleModels.pytorchPassThroughModelConfig(); + request.setJsonEntity(modelConfiguration); + client().performRequest(request); + } + + protected void putPyTorchModelVocabularyTrainedModels(List vocabulary, String modelId) throws IOException { + List vocabularyWithPad = new ArrayList<>(); + vocabularyWithPad.add("[PAD]"); + vocabularyWithPad.add("[UNK]"); + vocabularyWithPad.addAll(vocabulary); + String quotedWords = vocabularyWithPad.stream().map(s -> "\"" + s + "\"").collect(Collectors.joining(",")); + + Request request = new Request("PUT", "_ml/trained_models/" + modelId + "/vocabulary"); + request.setJsonEntity(Strings.format(""" + { "vocabulary": [%s] } + """, quotedWords)); + client().performRequest(request); + } + + protected void putPyTorchModelDefinitionTrainedModels(String modelId) throws IOException { + Request request = new Request("PUT", "_ml/trained_models/" + modelId + "/definition/0"); + String body = Strings.format( + """ + {"total_definition_length":%s,"definition": "%s","total_parts": 1}""", + ExampleModels.RAW_PYTORCH_MODEL_SIZE, + ExampleModels.BASE_64_ENCODED_PYTORCH_MODEL + ); + request.setJsonEntity(body); + client().performRequest(request); + } + + protected void startDeploymentTrainedModels(String modelId) throws IOException { + String endPoint = "/_ml/trained_models/" + + modelId + + "/deployment/_start?timeout=40s&wait_for=started&threads_per_allocation=1&number_of_allocations=1"; + + Request request = new Request("POST", endPoint); + client().performRequest(request); + } +} diff --git a/x-pack/plugin/ml/qa/ml-inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ModelIdUniquenessIT.java b/x-pack/plugin/ml/qa/ml-inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ModelIdUniquenessIT.java new file mode 100644 index 0000000000000..9904cfb752de5 --- /dev/null +++ b/x-pack/plugin/ml/qa/ml-inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ModelIdUniquenessIT.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + * + * this file was contributed to by a generative AI + */ + +package org.elasticsearch.xpack.ml.integration; + +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.inference.TaskType; +import org.hamcrest.Matchers; + +import java.util.List; + +public class ModelIdUniquenessIT extends InferenceBaseRestTest { + + public void testPutInferenceModelFailsWhenTrainedModelWithIdAlreadyExists() throws Exception { + + String modelId = "duplicate_model_id"; + putPyTorchModelTrainedModels(modelId); + putPyTorchModelDefinitionTrainedModels(modelId); + putPyTorchModelVocabularyTrainedModels(List.of("these", "are", "my", "words"), modelId); + startDeploymentTrainedModels(modelId); + + var e = expectThrows(ResponseException.class, () -> putInferenceServiceModel(modelId, TaskType.SPARSE_EMBEDDING)); + assertThat( + e.getMessage(), + Matchers.containsString( + "Model IDs must be unique. Requested model ID [" + modelId + "] matches existing model IDs but must not." + ) + + ); + } + + public void testPutTrainedModelFailsWhenInferenceModelWithIdAlreadyExists() throws Exception { + + String modelId = "duplicate_model_id"; + putPyTorchModelTrainedModels(modelId); + putPyTorchModelDefinitionTrainedModels(modelId); + putPyTorchModelVocabularyTrainedModels(List.of("these", "are", "my", "words"), modelId); + + putInferenceServiceModel(modelId, TaskType.SPARSE_EMBEDDING); + + var e = expectThrows(ResponseException.class, () -> startDeploymentTrainedModels(modelId)); + assertThat( + e.getMessage(), + Matchers.containsString( + "Model IDs must be unique. Requested model ID [" + modelId + "] matches existing model IDs but must not." + ) + + ); + + } +} diff --git a/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/build.gradle b/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/build.gradle index 1cc37f5c4ffc0..9d931974d25d5 100644 --- a/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/build.gradle +++ b/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/build.gradle @@ -2,6 +2,7 @@ import org.elasticsearch.gradle.internal.test.RestIntegTestTask import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionProperties import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE +import org.elasticsearch.gradle.internal.info.BuildParams apply plugin: 'elasticsearch.internal-testclusters' apply plugin: 'elasticsearch.standalone-rest-test' @@ -49,16 +50,29 @@ testClusters.register('mixed-cluster') { tasks.register('remote-cluster', RestIntegTestTask) { mustRunAfter("precommit") systemProperty 'tests.rest.suite', 'remote_cluster' + maybeDisableForFips(it) } tasks.register('mixed-cluster', RestIntegTestTask) { dependsOn 'remote-cluster' useCluster remoteCluster systemProperty 'tests.rest.suite', 'multi_cluster' + maybeDisableForFips(it) } tasks.register("integTest") { dependsOn 'mixed-cluster' + maybeDisableForFips(it) } tasks.named("check").configure { dependsOn("integTest") } + +//TODO: remove with version 8.14. A new FIPS setting was added in 8.13. Since FIPS configures all test clusters and this specific integTest uses +// the previous minor version, that setting is not available when running in FIPS until 8.14. +def maybeDisableForFips(task) { + if (BuildParams.inFipsJvm) { + if(Version.fromString(project.version).before(Version.fromString('8.14.0'))) { + task.enabled = false + } + } +} diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsIT.java index 113ed9a5aa686..398ef5f2e743a 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsIT.java @@ -6,19 +6,20 @@ */ package org.elasticsearch.xpack.ml.integration; +import org.apache.logging.log4j.Level; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.action.admin.cluster.node.hotthreads.NodeHotThreads; -import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsResponse; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.core.CheckedRunnable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.monitor.jvm.HotThreads; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.AggregationBuilders; @@ -468,11 +469,7 @@ public void testRealtime() throws Exception { StopDatafeedAction.Response stopJobResponse = stopDatafeed(datafeedId); assertTrue(stopJobResponse.isStopped()); } catch (Exception e) { - NodesHotThreadsResponse nodesHotThreadsResponse = clusterAdmin().prepareNodesHotThreads().get(); - int i = 0; - for (NodeHotThreads nodeHotThreads : nodesHotThreadsResponse.getNodes()) { - logger.info(i++ + ":\n" + nodeHotThreads.getHotThreads()); - } + HotThreads.logLocalHotThreads(logger, Level.INFO, "hot threads at failure", ReferenceDocs.LOGGING); throw e; } assertBusy(() -> { @@ -491,11 +488,7 @@ public void testCloseJobStopsRealtimeDatafeed() throws Exception { CloseJobAction.Response closeJobResponse = closeJob(jobId); assertTrue(closeJobResponse.isClosed()); } catch (Exception e) { - NodesHotThreadsResponse nodesHotThreadsResponse = clusterAdmin().prepareNodesHotThreads().get(); - int i = 0; - for (NodeHotThreads nodeHotThreads : nodesHotThreadsResponse.getNodes()) { - logger.info(i++ + ":\n" + nodeHotThreads.getHotThreads()); - } + HotThreads.logLocalHotThreads(logger, Level.INFO, "hot threads at failure", ReferenceDocs.LOGGING); throw e; } assertBusy(() -> { @@ -538,11 +531,7 @@ public void testCloseJobStopsLookbackOnlyDatafeed() throws Exception { CloseJobAction.Response closeJobResponse = closeJob(jobId, useForce); assertTrue(closeJobResponse.isClosed()); } catch (Exception e) { - NodesHotThreadsResponse nodesHotThreadsResponse = clusterAdmin().prepareNodesHotThreads().get(); - int i = 0; - for (NodeHotThreads nodeHotThreads : nodesHotThreadsResponse.getNodes()) { - logger.info(i++ + ":\n" + nodeHotThreads.getHotThreads()); - } + HotThreads.logLocalHotThreads(logger, Level.INFO, "hot threads at failure", ReferenceDocs.LOGGING); throw e; } GetDatafeedsStatsAction.Request request = new GetDatafeedsStatsAction.Request(datafeedId); diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java index 1657a6df5074e..d12672fd4afb0 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java @@ -681,7 +681,6 @@ public void testInsufficientSearchPrivilegesOnPreview() throws Exception { options.addHeader("Authorization", BASIC_AUTH_VALUE_ML_ADMIN); getFeed.setOptions(options); ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(getFeed)); - assertThat(e.getMessage(), containsString("[indices:data/read/field_caps] is unauthorized for user [ml_admin]")); } @@ -722,7 +721,12 @@ public void testSecondaryAuthSearchPrivilegesOnPreview() throws Exception { options.addHeader("es-secondary-authorization", BASIC_AUTH_VALUE_ML_ADMIN_WITH_SOME_DATA_ACCESS); getFeed.setOptions(options); // Should not fail as secondary auth has permissions. - client().performRequest(getFeed); + var response = client().performRequest(getFeed); + assertXProductResponseHeader(response); + } + + private void assertXProductResponseHeader(Response response) { + assertEquals("Elasticsearch", response.getHeader("X-elastic-product")); } public void testLookbackOnlyGivenAggregationsWithHistogram() throws Exception { @@ -1518,6 +1522,7 @@ public LookbackOnlyTestHelper setShouldSucceedProcessing(boolean value) { public void execute() throws Exception { Response jobResponse = createJob(jobId, airlineVariant); + assertXProductResponseHeader(jobResponse); assertThat(jobResponse.getStatusLine().getStatusCode(), equalTo(200)); String datafeedId = "datafeed-" + jobId; new DatafeedBuilder(datafeedId, jobId, dataIndex).setScriptedFields(scriptedFields).build(); @@ -1529,6 +1534,7 @@ public void execute() throws Exception { Response jobStatsResponse = client().performRequest( new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats") ); + assertXProductResponseHeader(jobStatsResponse); String jobStatsResponseAsString = EntityUtils.toString(jobStatsResponse.getEntity()); if (shouldSucceedInput) { assertThat(jobStatsResponseAsString, containsString("\"input_record_count\":2")); @@ -1556,12 +1562,14 @@ private void startDatafeedAndWaitUntilStopped(String datafeedId, String authHead options.addHeader("Authorization", authHeader); request.setOptions(options); Response startDatafeedResponse = client().performRequest(request); + assertXProductResponseHeader(startDatafeedResponse); assertThat(EntityUtils.toString(startDatafeedResponse.getEntity()), containsString("\"started\":true")); assertBusy(() -> { try { Response datafeedStatsResponse = client().performRequest( new Request("GET", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_stats") ); + assertXProductResponseHeader(datafeedStatsResponse); assertThat(EntityUtils.toString(datafeedStatsResponse.getEntity()), containsString("\"state\":\"stopped\"")); } catch (Exception e) { throw new RuntimeException(e); @@ -1575,6 +1583,7 @@ private void waitUntilJobIsClosed(String jobId) throws Exception { Response jobStatsResponse = client().performRequest( new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats") ); + assertXProductResponseHeader(jobStatsResponse); assertThat(EntityUtils.toString(jobStatsResponse.getEntity()), containsString("\"state\":\"closed\"")); } catch (Exception e) { throw new RuntimeException(e); diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ExplainDataFrameAnalyticsIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ExplainDataFrameAnalyticsIT.java index ecc601b0f1eae..5ce0a24a40d9d 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ExplainDataFrameAnalyticsIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ExplainDataFrameAnalyticsIT.java @@ -7,11 +7,12 @@ package org.elasticsearch.xpack.ml.integration; import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.RefCountingListener; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.query.QueryBuilders; @@ -27,11 +28,11 @@ import org.elasticsearch.xpack.core.ml.utils.QueryProvider; import java.io.IOException; -import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import static org.hamcrest.Matchers.contains; @@ -203,26 +204,26 @@ public void testSimultaneousExplainSameConfig() throws IOException { ) .buildForExplain(); - List> futures = new ArrayList<>(); - - for (int i = 0; i < simultaneousInvocationCount; ++i) { - futures.add(client().execute(ExplainDataFrameAnalyticsAction.INSTANCE, new ExplainDataFrameAnalyticsAction.Request(config))); - } - - ExplainDataFrameAnalyticsAction.Response previous = null; - for (ActionFuture future : futures) { - // The main purpose of this test is that actionGet() here will throw an exception - // if any of the simultaneous calls returns an error due to interaction between - // the many estimation processes that get run - ExplainDataFrameAnalyticsAction.Response current = future.actionGet(10000); - if (previous != null) { - // A secondary check the test can perform is that the multiple invocations - // return the same result (but it was failures due to unwanted interactions - // that caused this test to be written) - assertEquals(previous, current); + safeAwait(SubscribableListener.newForked(testListener -> { + try (var listeners = new RefCountingListener(testListener)) { + final var firstResponseRef = new AtomicReference(); + for (int i = 0; i < simultaneousInvocationCount; ++i) { + client().execute( + ExplainDataFrameAnalyticsAction.INSTANCE, + new ExplainDataFrameAnalyticsAction.Request(config), + // The main purpose of this test is that the action will complete its listener exceptionally if any of the + // simultaneous calls returns an error due to interaction between the many estimation processes that get run. + listeners.acquire(response -> { + // A secondary check the test can perform is that the multiple invocations return the same result + // (but it was failures due to unwanted interactions that caused this test to be written) + assertNotNull(response); + firstResponseRef.compareAndSet(null, response); + assertEquals(firstResponseRef.get(), response); + }) + ); + } } - previous = current; - } + })); } public void testRuntimeFields() { diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java index 858c5ba946f78..5f2f7cfe491ca 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java @@ -11,7 +11,7 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshAction; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.datastreams.CreateDataStreamAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchRequestBuilder; @@ -80,6 +80,8 @@ import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsTaskState; +import org.elasticsearch.xpack.core.ml.inference.ModelAliasMetadata; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.core.ml.inference.persistence.InferenceIndexConstants; import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.core.ml.job.config.MlFilter; @@ -91,8 +93,6 @@ import org.elasticsearch.xpack.ilm.IndexLifecycle; import org.elasticsearch.xpack.ml.LocalStateMachineLearning; import org.elasticsearch.xpack.ml.autoscaling.MlScalingReason; -import org.elasticsearch.xpack.ml.inference.ModelAliasMetadata; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.slm.SnapshotLifecycle; import org.elasticsearch.xpack.slm.history.SnapshotLifecycleTemplateRegistry; import org.elasticsearch.xpack.transform.Transform; @@ -440,8 +440,8 @@ protected void ensureClusterStateConsistency() throws IOException { protected static void createDataStreamAndTemplate(String dataStreamName, String mapping) throws IOException { client().execute( - PutComposableIndexTemplateAction.INSTANCE, - new PutComposableIndexTemplateAction.Request(dataStreamName + "_template").indexTemplate( + TransportPutComposableIndexTemplateAction.TYPE, + new TransportPutComposableIndexTemplateAction.Request(dataStreamName + "_template").indexTemplate( ComposableIndexTemplate.builder() .indexPatterns(Collections.singletonList(dataStreamName)) .template(new Template(null, new CompressedXContent(mapping), null)) diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/RegressionIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/RegressionIT.java index ed7cfad8bf195..a130f669583fa 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/RegressionIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/RegressionIT.java @@ -33,6 +33,7 @@ import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ensemble.Ensemble; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.metadata.Hyperparameters; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.metadata.TrainedModelMetadata; +import org.hamcrest.Matchers; import org.junit.After; import java.io.IOException; @@ -47,11 +48,12 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; -import static org.elasticsearch.test.hamcrest.OptionalMatchers.isPresent; +import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.emptyString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasItems; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.hasSize; @@ -129,9 +131,8 @@ public void testSingleNumericFeatureAndMixedTrainingAndNonTrainingRows() throws // double predictionValue = (double) resultsObject.get(predictedClassField); // assertThat(predictionValue, closeTo(10 * featureValue, 2.0)); - assertThat(resultsObject.containsKey(predictedClassField), is(true)); - assertThat(resultsObject.containsKey("is_training"), is(true)); - assertThat(resultsObject.get("is_training"), is(destDoc.containsKey(DEPENDENT_VARIABLE_FIELD))); + assertThat(resultsObject, hasKey(predictedClassField)); + assertThat(resultsObject, hasEntry("is_training", destDoc.containsKey(DEPENDENT_VARIABLE_FIELD))); @SuppressWarnings("unchecked") List> importanceArray = (List>) resultsObject.get("feature_importance"); @@ -144,15 +145,13 @@ public void testSingleNumericFeatureAndMixedTrainingAndNonTrainingRows() throws } } - assertThat(importanceArray, hasSize(greaterThan(0))); assertThat( - importanceArray.stream() - .filter( - m -> NUMERICAL_FEATURE_FIELD.equals(m.get("feature_name")) - || DISCRETE_NUMERICAL_FEATURE_FIELD.equals(m.get("feature_name")) + importanceArray, + hasItem( + either(Matchers.hasEntry("feature_name", NUMERICAL_FEATURE_FIELD)).or( + hasEntry("feature_name", DISCRETE_NUMERICAL_FEATURE_FIELD) ) - .findAny(), - isPresent() + ) ); } @@ -504,20 +503,18 @@ public void testWithDatastream() throws Exception { Map destDoc = getDestDoc(config, hit); Map resultsObject = getMlResultsObjectFromDestDoc(destDoc); - assertThat(resultsObject.containsKey(predictedClassField), is(true)); - assertThat(resultsObject.containsKey("is_training"), is(true)); - assertThat(resultsObject.get("is_training"), is(destDoc.containsKey(DEPENDENT_VARIABLE_FIELD))); + assertThat(resultsObject, hasKey(predictedClassField)); + assertThat(resultsObject, hasEntry("is_training", destDoc.containsKey(DEPENDENT_VARIABLE_FIELD))); @SuppressWarnings("unchecked") List> importanceArray = (List>) resultsObject.get("feature_importance"); - assertThat(importanceArray, hasSize(greaterThan(0))); + assertThat( - importanceArray.stream() - .filter( - m -> NUMERICAL_FEATURE_FIELD.equals(m.get("feature_name")) - || DISCRETE_NUMERICAL_FEATURE_FIELD.equals(m.get("feature_name")) + importanceArray, + hasItem( + either(Matchers.hasEntry("feature_name", NUMERICAL_FEATURE_FIELD)).or( + hasEntry("feature_name", DISCRETE_NUMERICAL_FEATURE_FIELD) ) - .findAny(), - isPresent() + ) ); } }); diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/TestFeatureResetIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/TestFeatureResetIT.java index 948544dc95bf8..7158e494bee68 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/TestFeatureResetIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/TestFeatureResetIT.java @@ -8,10 +8,10 @@ import org.elasticsearch.action.admin.cluster.snapshots.features.ResetFeatureStateAction; import org.elasticsearch.action.admin.cluster.snapshots.features.ResetFeatureStateRequest; -import org.elasticsearch.action.ingest.DeletePipelineAction; import org.elasticsearch.action.ingest.DeletePipelineRequest; -import org.elasticsearch.action.ingest.PutPipelineAction; +import org.elasticsearch.action.ingest.DeletePipelineTransportAction; import org.elasticsearch.action.ingest.PutPipelineRequest; +import org.elasticsearch.action.ingest.PutPipelineTransportAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.tasks.TaskInfo; @@ -114,7 +114,7 @@ public void cleanup() throws Exception { cleanUp(); for (String pipeline : createdPipelines) { try { - client().execute(DeletePipelineAction.INSTANCE, new DeletePipelineRequest(pipeline)).actionGet(); + client().execute(DeletePipelineTransportAction.TYPE, new DeletePipelineRequest(pipeline)).actionGet(); } catch (Exception ex) { logger.warn(() -> "error cleaning up pipeline [" + pipeline + "]", ex); } @@ -130,7 +130,7 @@ public void testMLFeatureReset() throws Exception { for (int i = 0; i < 100; i++) { indexDocForInference("feature_reset_inference_pipeline"); } - client().execute(DeletePipelineAction.INSTANCE, new DeletePipelineRequest("feature_reset_inference_pipeline")).actionGet(); + client().execute(DeletePipelineTransportAction.TYPE, new DeletePipelineRequest("feature_reset_inference_pipeline")).actionGet(); createdPipelines.remove("feature_reset_inference_pipeline"); assertBusy(() -> assertThat(countInferenceProcessors(clusterAdmin().prepareState().get().getState()), equalTo(0))); @@ -158,7 +158,8 @@ public void testMLFeatureResetFailureDueToPipelines() throws Exception { "Unable to reset machine learning feature as there are ingest pipelines still referencing trained machine learning models" ) ); - client().execute(DeletePipelineAction.INSTANCE, new DeletePipelineRequest("feature_reset_failure_inference_pipeline")).actionGet(); + client().execute(DeletePipelineTransportAction.TYPE, new DeletePipelineRequest("feature_reset_failure_inference_pipeline")) + .actionGet(); createdPipelines.remove("feature_reset_failure_inference_pipeline"); assertThat(isResetMode(), is(false)); } @@ -292,7 +293,7 @@ private void startRealtime(String jobId) throws Exception { } private void putTrainedModelIngestPipeline(String pipelineId) throws Exception { - client().execute(PutPipelineAction.INSTANCE, new PutPipelineRequest(pipelineId, new BytesArray(""" + client().execute(PutPipelineTransportAction.TYPE, new PutPipelineRequest(pipelineId, new BytesArray(""" { "processors": [ { diff --git a/x-pack/plugin/ml/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DatafeedWithoutSecurityRestIT.java b/x-pack/plugin/ml/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DatafeedWithoutSecurityRestIT.java new file mode 100644 index 0000000000000..76159660187ff --- /dev/null +++ b/x-pack/plugin/ml/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DatafeedWithoutSecurityRestIT.java @@ -0,0 +1,163 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.integration; + +import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.Response; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.junit.Before; + +import java.io.IOException; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.not; + +public class DatafeedWithoutSecurityRestIT extends ESRestTestCase { + + @Before + public void setUpData() throws Exception { + addAirlineData(); + } + + /** + * The main purpose this test is to ensure the X-elastic-product + * header is returned when security is disabled. The vast majority + * of Datafeed test coverage is in DatafeedJobsRestIT but that + * suite runs with security enabled. + */ + public void testPreviewMissingHeader() throws Exception { + String jobId = "missing-header"; + Request createJobRequest = new Request("PUT", "/_ml/anomaly_detectors/" + jobId); + createJobRequest.setJsonEntity(""" + { + "description": "Aggs job", + "analysis_config": { + "bucket_span": "1h", + "detectors": [ + { + "function": "count", + "partition_field_name": "airline" + } + ], + "influencers": [ + "airline" + ], + "model_prune_window": "30d" + }, + "model_plot_config": { + "enabled": false, + "annotations_enabled": false + }, + "analysis_limits": { + "model_memory_limit": "11mb", + "categorization_examples_limit": 4 + }, + "data_description" : {"time_field": "time stamp"} + }"""); + client().performRequest(createJobRequest); + + String datafeedId = "datafeed-" + jobId; + Request createDatafeedRequest = new Request("PUT", "/_ml/datafeeds/" + datafeedId); + createDatafeedRequest.setJsonEntity(""" + { + "job_id": "missing-header", + "query": { + "bool": { + "must": [ + { + "match_all": {} + } + ] + } + }, + "indices": [ + "airline-data" + ], + "scroll_size": 1000 + } + """); + client().performRequest(createDatafeedRequest); + + Request getFeed = new Request("GET", "/_ml/datafeeds/" + datafeedId + "/_preview"); + RequestOptions.Builder options = getFeed.getOptions().toBuilder(); + getFeed.setOptions(options); + var previewResponse = client().performRequest(getFeed); + assertXProductResponseHeader(previewResponse); + + client().performRequest(new Request("POST", "/_ml/anomaly_detectors/" + jobId + "/_open")); + Request startRequest = new Request("POST", "/_ml/datafeeds/" + datafeedId + "/_start"); + Response startDatafeedResponse = client().performRequest(startRequest); + assertXProductResponseHeader(startDatafeedResponse); + } + + private void assertXProductResponseHeader(Response response) { + assertEquals("Elasticsearch", response.getHeader("X-elastic-product")); + } + + private void addAirlineData() throws IOException { + StringBuilder bulk = new StringBuilder(); + + // Create index with source = enabled, doc_values = enabled, stored = false + multi-field + Request createAirlineDataRequest = new Request("PUT", "/airline-data"); + // space in 'time stamp' is intentional + createAirlineDataRequest.setJsonEntity(""" + { + "mappings": { + "runtime": { + "airline_lowercase_rt": { + "type": "keyword", + "script": { + "source": "emit(params._source.airline.toLowerCase())" + } + } + }, + "properties": { + "time stamp": { + "type": "date" + }, + "airline": { + "type": "text", + "fields": { + "text": { + "type": "text" + }, + "keyword": { + "type": "keyword" + } + } + }, + "responsetime": { + "type": "float" + } + } + } + }"""); + client().performRequest(createAirlineDataRequest); + + bulk.append(""" + {"index": {"_index": "airline-data", "_id": 1}} + {"time stamp":"2016-06-01T00:00:00Z","airline":"AAA","responsetime":135.22} + {"index": {"_index": "airline-data", "_id": 2}} + {"time stamp":"2016-06-01T01:59:00Z","airline":"AAA","responsetime":541.76} + """); + + bulkIndex(bulk.toString()); + } + + private void bulkIndex(String bulk) throws IOException { + Request bulkRequest = new Request("POST", "/_bulk"); + bulkRequest.setJsonEntity(bulk); + bulkRequest.addParameter("refresh", "true"); + bulkRequest.addParameter("pretty", null); + String bulkResponse = EntityUtils.toString(client().performRequest(bulkRequest).getEntity()); + assertThat(bulkResponse, not(containsString("\"errors\": false"))); + } + +} diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/license/MachineLearningLicensingIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/license/MachineLearningLicensingIT.java index a98dfa223b8ae..592f42e13e301 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/license/MachineLearningLicensingIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/license/MachineLearningLicensingIT.java @@ -9,8 +9,8 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.ingest.PutPipelineAction; import org.elasticsearch.action.ingest.PutPipelineRequest; +import org.elasticsearch.action.ingest.PutPipelineTransportAction; import org.elasticsearch.action.ingest.SimulateDocumentBaseResult; import org.elasticsearch.action.ingest.SimulatePipelineAction; import org.elasticsearch.action.ingest.SimulatePipelineRequest; @@ -529,7 +529,7 @@ public void testMachineLearningCreateInferenceProcessorRestricted() { // Creating a pipeline should work PlainActionFuture putPipelineListener = new PlainActionFuture<>(); client().execute( - PutPipelineAction.INSTANCE, + PutPipelineTransportAction.TYPE, new PutPipelineRequest( "test_infer_license_pipeline", new BytesArray(pipeline.getBytes(StandardCharsets.UTF_8)), @@ -577,7 +577,7 @@ public void testMachineLearningCreateInferenceProcessorRestricted() { // Creating a new pipeline with an inference processor should work putPipelineListener = new PlainActionFuture<>(); client().execute( - PutPipelineAction.INSTANCE, + PutPipelineTransportAction.TYPE, new PutPipelineRequest( "test_infer_license_pipeline_again", new BytesArray(pipeline.getBytes(StandardCharsets.UTF_8)), @@ -611,7 +611,7 @@ public void testMachineLearningCreateInferenceProcessorRestricted() { // test that license restricted apis do now work PlainActionFuture putPipelineListenerNewLicense = new PlainActionFuture<>(); client().execute( - PutPipelineAction.INSTANCE, + PutPipelineTransportAction.TYPE, new PutPipelineRequest( "test_infer_license_pipeline", new BytesArray(pipeline.getBytes(StandardCharsets.UTF_8)), diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/ChunkedTrainedModelPersisterIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/ChunkedTrainedModelPersisterIT.java index 5cf87cff66a25..9b3326a4ba348 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/ChunkedTrainedModelPersisterIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/ChunkedTrainedModelPersisterIT.java @@ -19,6 +19,7 @@ import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsDest; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsSource; import org.elasticsearch.xpack.core.ml.dataframe.analyses.Regression; +import org.elasticsearch.xpack.core.ml.inference.ModelAliasMetadata; import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; import org.elasticsearch.xpack.core.ml.inference.TrainedModelDefinition; import org.elasticsearch.xpack.core.ml.inference.TrainedModelDefinitionTests; @@ -36,7 +37,6 @@ import org.elasticsearch.xpack.ml.extractor.DocValueField; import org.elasticsearch.xpack.ml.extractor.ExtractedField; import org.elasticsearch.xpack.ml.extractor.ExtractedFields; -import org.elasticsearch.xpack.ml.inference.ModelAliasMetadata; import org.elasticsearch.xpack.ml.inference.modelsize.ModelSizeInfo; import org.elasticsearch.xpack.ml.inference.modelsize.ModelSizeInfoTests; import org.elasticsearch.xpack.ml.inference.persistence.TrainedModelProvider; diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java index 891779e28439b..b4ffe46e6ea92 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java @@ -63,6 +63,7 @@ import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.Quantiles; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.TimingStats; +import org.elasticsearch.xpack.core.ml.job.results.Result; import org.elasticsearch.xpack.core.ml.utils.MlIndexAndAlias; import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; import org.elasticsearch.xpack.ml.MlSingleNodeTestCase; @@ -846,6 +847,16 @@ public void testGetSnapshots() { assertNull(snapshots.get(3).getQuantiles()); assertNull(snapshots.get(4).getQuantiles()); + // test get single snapshot + PlainActionFuture> singleFuture = new PlainActionFuture<>(); + jobProvider.getModelSnapshot(jobId, "snap_1", true, singleFuture::onResponse, singleFuture::onFailure); + ModelSnapshot withQuantiles = singleFuture.actionGet().result; + assertThat(withQuantiles.getQuantiles().getTimestamp().getTime(), equalTo(11L)); + + singleFuture = new PlainActionFuture<>(); + jobProvider.getModelSnapshot(jobId, "snap_2", false, singleFuture::onResponse, singleFuture::onFailure); + ModelSnapshot withoutQuantiles = singleFuture.actionGet().result; + assertNull(withoutQuantiles.getQuantiles()); } public void testGetAutodetectParams() throws Exception { diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java index 8f516ab2a62da..33fd7c108863b 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java @@ -28,7 +28,6 @@ import org.elasticsearch.persistent.PersistentTaskResponse; import org.elasticsearch.persistent.PersistentTasksClusterService; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; import org.elasticsearch.persistent.UpdatePersistentTaskStatusAction; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -719,15 +718,19 @@ private void run(String jobId, CheckedRunnable disrupt) throws Except // https://github.com/elastic/elasticsearch/pull/50907 - now that the cluster state is stored // in a Lucene index it can take a while to update when there are many updates in quick // succession, like we see in internal cluster tests of node failure scenarios - assertBusy(() -> { - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); - List> tasks = findTasks(clusterState, Set.of(DATAFEED_TASK_NAME, JOB_TASK_NAME)); - assertNotNull(tasks); - assertEquals("Expected 2 tasks, but got [" + tasks + "]", 2, tasks.size()); - for (PersistentTask task : tasks) { - assertFalse(needsReassignment(task.getAssignment(), clusterState.nodes())); + awaitClusterState(state -> { + List> tasks = findTasks(state, Set.of(DATAFEED_TASK_NAME, JOB_TASK_NAME)); + if (tasks == null || tasks.size() != 2) { + return false; } - + for (PersistentTasksCustomMetadata.PersistentTask task : tasks) { + if (needsReassignment(task.getAssignment(), state.nodes())) { + return false; + } + } + return true; + }); + assertBusy(() -> { GetJobsStatsAction.Request jobStatsRequest = new GetJobsStatsAction.Request(jobId); JobStats jobStats = client().execute(GetJobsStatsAction.INSTANCE, jobStatsRequest).actionGet().getResponse().results().get(0); assertEquals(JobState.OPENED, jobStats.getState()); diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TestFeatureLicenseTrackingIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TestFeatureLicenseTrackingIT.java index f7bf94e0479e8..936e499e94feb 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TestFeatureLicenseTrackingIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TestFeatureLicenseTrackingIT.java @@ -7,10 +7,10 @@ package org.elasticsearch.xpack.ml.integration; -import org.elasticsearch.action.ingest.DeletePipelineAction; import org.elasticsearch.action.ingest.DeletePipelineRequest; -import org.elasticsearch.action.ingest.PutPipelineAction; +import org.elasticsearch.action.ingest.DeletePipelineTransportAction; import org.elasticsearch.action.ingest.PutPipelineRequest; +import org.elasticsearch.action.ingest.PutPipelineTransportAction; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.core.Strings; import org.elasticsearch.license.GetFeatureUsageRequest; @@ -61,7 +61,7 @@ public class TestFeatureLicenseTrackingIT extends MlSingleNodeTestCase { public void cleanup() throws Exception { for (String pipeline : createdPipelines) { try { - client().execute(DeletePipelineAction.INSTANCE, new DeletePipelineRequest(pipeline)).actionGet(); + client().execute(DeletePipelineTransportAction.TYPE, new DeletePipelineRequest(pipeline)).actionGet(); } catch (Exception ex) { logger.warn(() -> "error cleaning up pipeline [" + pipeline + "]", ex); } @@ -170,7 +170,7 @@ public void testFeatureTrackingInferenceModelPipeline() throws Exception { assertThat(lastUsage.toInstant(), lessThan(recentUsage.toInstant())); }); - client().execute(DeletePipelineAction.INSTANCE, new DeletePipelineRequest(pipelineId)).actionGet(); + client().execute(DeletePipelineTransportAction.TYPE, new DeletePipelineRequest(pipelineId)).actionGet(); createdPipelines.remove(pipelineId); // Make sure that feature usage keeps the last usage once the model is removed @@ -211,7 +211,7 @@ private List getJobStats(String jobId) { } private void putTrainedModelIngestPipeline(String pipelineId, String modelId) throws Exception { - client().execute(PutPipelineAction.INSTANCE, new PutPipelineRequest(pipelineId, new BytesArray(Strings.format(""" + client().execute(PutPipelineTransportAction.TYPE, new PutPipelineRequest(pipelineId, new BytesArray(Strings.format(""" { "processors": [ { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index f9c483496445e..f3254245168b8 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -74,6 +74,7 @@ import org.elasticsearch.plugins.SystemIndexPlugin; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.telemetry.TelemetryProvider; import org.elasticsearch.threadpool.ExecutorBuilder; import org.elasticsearch.threadpool.ScalingExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; @@ -188,6 +189,8 @@ import org.elasticsearch.xpack.core.ml.dataframe.evaluation.MlEvaluationNamedXContentProvider; import org.elasticsearch.xpack.core.ml.dataframe.stats.AnalysisStatsNamedWriteablesProvider; import org.elasticsearch.xpack.core.ml.inference.MlInferenceNamedXContentProvider; +import org.elasticsearch.xpack.core.ml.inference.ModelAliasMetadata; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.core.ml.inference.persistence.InferenceIndexConstants; import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; @@ -318,10 +321,8 @@ import org.elasticsearch.xpack.ml.dataframe.process.NativeMemoryUsageEstimationProcessFactory; import org.elasticsearch.xpack.ml.dataframe.process.results.AnalyticsResult; import org.elasticsearch.xpack.ml.dataframe.process.results.MemoryUsageEstimationResult; -import org.elasticsearch.xpack.ml.inference.ModelAliasMetadata; import org.elasticsearch.xpack.ml.inference.TrainedModelStatsService; import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentClusterService; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentService; import org.elasticsearch.xpack.ml.inference.deployment.DeploymentManager; import org.elasticsearch.xpack.ml.inference.ingest.InferenceProcessor; @@ -910,6 +911,7 @@ public Collection createComponents(PluginServices services) { Environment environment = services.environment(); NamedXContentRegistry xContentRegistry = services.xContentRegistry(); IndexNameExpressionResolver indexNameExpressionResolver = services.indexNameExpressionResolver(); + TelemetryProvider telemetryProvider = services.telemetryProvider(); if (enabled == false) { // Holders for @link(MachineLearningFeatureSetUsage) which needs access to job manager and ML extension, @@ -1051,7 +1053,7 @@ public Collection createComponents(PluginServices services) { normalizerProcessFactory = (jobId, quantilesState, bucketSpan, executorService) -> new MultiplyingNormalizerProcess(1.0); analyticsProcessFactory = (jobId, analyticsProcessConfig, hasState, executorService, onProcessCrash) -> null; memoryEstimationProcessFactory = (jobId, analyticsProcessConfig, hasState, executorService, onProcessCrash) -> null; - pyTorchProcessFactory = (task, executorService, onProcessCrash) -> new BlackHolePyTorchProcess(); + pyTorchProcessFactory = (task, executorService, afterInputStreamClose, onProcessCrash) -> new BlackHolePyTorchProcess(); } NormalizerFactory normalizerFactory = new NormalizerFactory( normalizerProcessFactory, @@ -1251,6 +1253,14 @@ public Collection createComponents(PluginServices services) { machineLearningExtension.get().isNlpEnabled() ); + MlMetrics mlMetrics = new MlMetrics( + telemetryProvider.getMeterRegistry(), + clusterService, + settings, + autodetectProcessManager, + dataFrameAnalyticsManager + ); + return List.of( mlLifeCycleService, new MlControllerHolder(mlController), @@ -1282,7 +1292,8 @@ public Collection createComponents(PluginServices services) { trainedModelAllocationClusterServiceSetOnce.get(), deploymentManager.get(), nodeAvailabilityZoneMapper, - new MachineLearningExtensionHolder(machineLearningExtension.get()) + new MachineLearningExtensionHolder(machineLearningExtension.get()), + mlMetrics ); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlInitializationService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlInitializationService.java index d7c5f15fcaf47..dab2010035b66 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlInitializationService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlInitializationService.java @@ -16,7 +16,7 @@ import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; -import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction; +import org.elasticsearch.action.admin.indices.settings.put.TransportUpdateSettingsAction; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -234,7 +234,7 @@ private void makeMlInternalIndicesHidden() { UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest().indices(nonHiddenIndices) .indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED_HIDDEN) .settings(Collections.singletonMap(SETTING_INDEX_HIDDEN, true)); - executeAsyncWithOrigin(client, ML_ORIGIN, UpdateSettingsAction.INSTANCE, updateSettingsRequest, updateSettingsListener); + executeAsyncWithOrigin(client, ML_ORIGIN, TransportUpdateSettingsAction.TYPE, updateSettingsRequest, updateSettingsListener); }, finalListener::onFailure); // Step 1: Fetch ML internal indices settings to find out whether they are already hidden or not. diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java index c6a360a018e2a..976e5ec255b85 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java @@ -15,9 +15,9 @@ import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingInfo; import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingState; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.datafeed.DatafeedRunner; import org.elasticsearch.xpack.ml.dataframe.DataFrameAnalyticsManager; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager; import org.elasticsearch.xpack.ml.process.MlController; import org.elasticsearch.xpack.ml.process.MlMemoryTracker; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlMetrics.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlMetrics.java new file mode 100644 index 0000000000000..4fd1af3cfa2bc --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlMetrics.java @@ -0,0 +1,563 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodeRole; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.gateway.GatewayService; +import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.telemetry.metric.LongWithAttributes; +import org.elasticsearch.telemetry.metric.MeterRegistry; +import org.elasticsearch.xpack.core.ml.MlTasks; +import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingInfo; +import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingState; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; +import org.elasticsearch.xpack.ml.dataframe.DataFrameAnalyticsManager; +import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager; +import org.elasticsearch.xpack.ml.utils.NativeMemoryCalculator; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Optional; + +import static org.elasticsearch.xpack.core.ml.MachineLearningField.USE_AUTO_MACHINE_MEMORY_PERCENT; +import static org.elasticsearch.xpack.core.ml.MlTasks.DATAFEED_TASK_NAME; +import static org.elasticsearch.xpack.core.ml.MlTasks.DATA_FRAME_ANALYTICS_TASK_NAME; +import static org.elasticsearch.xpack.core.ml.MlTasks.JOB_SNAPSHOT_UPGRADE_TASK_NAME; +import static org.elasticsearch.xpack.core.ml.MlTasks.JOB_TASK_NAME; +import static org.elasticsearch.xpack.ml.MachineLearning.NATIVE_EXECUTABLE_CODE_OVERHEAD; + +/** + * This class adds two types of ML metrics to the meter registry, such that they can be collected by Elastic APM. + *

    + * 1. Per-node ML native memory statistics for ML nodes + * 2. Cluster-wide job/model statuses for master-eligible nodes + *

    + * The memory metrics relate solely to the ML node they are collected from. + *

    + * The job/model metrics are cluster-wide because a key problem we want to be able to detect is when there are + * jobs or models that are not assigned to any node. The consumer of the data needs to account for the fact that + * multiple master-eligible nodes are reporting the same information. The es.ml.is_master attribute in the records + * indicates which one was actually master, so can be used to deduplicate. + */ +public final class MlMetrics extends AbstractLifecycleComponent implements ClusterStateListener { + + private static final Logger logger = LogManager.getLogger(MlMetrics.class); + + private final MeterRegistry meterRegistry; + private final ClusterService clusterService; + private final AutodetectProcessManager autodetectProcessManager; + private final DataFrameAnalyticsManager dataFrameAnalyticsManager; + private final boolean hasMasterRole; + private final boolean hasMlRole; + private final List metrics = new ArrayList<>(); + + private static final Map MASTER_TRUE_MAP = Map.of("es.ml.is_master", Boolean.TRUE); + private static final Map MASTER_FALSE_MAP = Map.of("es.ml.is_master", Boolean.FALSE); + private volatile Map isMasterMap = MASTER_FALSE_MAP; + private volatile boolean firstTime = true; + + private volatile MlTaskStatusCounts mlTaskStatusCounts = MlTaskStatusCounts.EMPTY; + private volatile TrainedModelAllocationCounts trainedModelAllocationCounts = TrainedModelAllocationCounts.EMPTY; + + private volatile long nativeMemLimit; + private volatile long nativeMemAdUsage; + private volatile long nativeMemDfaUsage; + private volatile long nativeMemTrainedModelUsage; + private volatile long nativeMemFree; + + public MlMetrics( + MeterRegistry meterRegistry, + ClusterService clusterService, + Settings settings, + AutodetectProcessManager autodetectProcessManager, + DataFrameAnalyticsManager dataFrameAnalyticsManager + ) { + this.meterRegistry = meterRegistry; + this.clusterService = clusterService; + this.autodetectProcessManager = autodetectProcessManager; + this.dataFrameAnalyticsManager = dataFrameAnalyticsManager; + hasMasterRole = DiscoveryNode.hasRole(settings, DiscoveryNodeRole.MASTER_ROLE); + hasMlRole = DiscoveryNode.hasRole(settings, DiscoveryNodeRole.ML_ROLE); + if (hasMasterRole || hasMlRole) { + clusterService.addListener(this); + } + } + + private void registerMlNodeMetrics(MeterRegistry meterRegistry) { + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.native_memory.limit.size", + "ML native memory limit on this node.", + "bytes", + () -> new LongWithAttributes(nativeMemLimit, Map.of()) + ) + ); + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.native_memory.anomaly_detectors.usage", + "ML native memory used by anomaly detection jobs on this node.", + "bytes", + () -> new LongWithAttributes(nativeMemAdUsage, Map.of()) + ) + ); + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.native_memory.data_frame_analytics.usage", + "ML native memory used by data frame analytics jobs on this node.", + "bytes", + () -> new LongWithAttributes(nativeMemDfaUsage, Map.of()) + ) + ); + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.native_memory.trained_models.usage", + "ML native memory used by trained models on this node.", + "bytes", + () -> new LongWithAttributes(nativeMemTrainedModelUsage, Map.of()) + ) + ); + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.native_memory.free.size", + "Free ML native memory on this node.", + "bytes", + () -> new LongWithAttributes(nativeMemFree, Map.of()) + ) + ); + } + + private void registerMasterNodeMetrics(MeterRegistry meterRegistry) { + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.anomaly_detectors.opening.current", + "Count of anomaly detection jobs in the opening state cluster-wide.", + "jobs", + () -> new LongWithAttributes(mlTaskStatusCounts.adOpeningCount, isMasterMap) + ) + ); + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.anomaly_detectors.opened.current", + "Count of anomaly detection jobs in the opened state cluster-wide.", + "jobs", + () -> new LongWithAttributes(mlTaskStatusCounts.adOpenedCount, isMasterMap) + ) + ); + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.anomaly_detectors.closing.current", + "Count of anomaly detection jobs in the closing state cluster-wide.", + "jobs", + () -> new LongWithAttributes(mlTaskStatusCounts.adClosingCount, isMasterMap) + ) + ); + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.anomaly_detectors.failed.current", + "Count of anomaly detection jobs in the failed state cluster-wide.", + "jobs", + () -> new LongWithAttributes(mlTaskStatusCounts.adFailedCount, isMasterMap) + ) + ); + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.datafeeds.starting.current", + "Count of datafeeds in the starting state cluster-wide.", + "datafeeds", + () -> new LongWithAttributes(mlTaskStatusCounts.datafeedStartingCount, isMasterMap) + ) + ); + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.datafeeds.started.current", + "Count of datafeeds in the started state cluster-wide.", + "datafeeds", + () -> new LongWithAttributes(mlTaskStatusCounts.datafeedStartedCount, isMasterMap) + ) + ); + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.datafeeds.stopping.current", + "Count of datafeeds in the stopping state cluster-wide.", + "datafeeds", + () -> new LongWithAttributes(mlTaskStatusCounts.datafeedStoppingCount, isMasterMap) + ) + ); + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.data_frame_analytics.starting.current", + "Count of data frame analytics jobs in the starting state cluster-wide.", + "jobs", + () -> new LongWithAttributes(mlTaskStatusCounts.dfaStartingCount, isMasterMap) + ) + ); + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.data_frame_analytics.started.current", + "Count of data frame analytics jobs in the started state cluster-wide.", + "jobs", + () -> new LongWithAttributes(mlTaskStatusCounts.dfaStartedCount, isMasterMap) + ) + ); + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.data_frame_analytics.reindexing.current", + "Count of data frame analytics jobs in the reindexing state cluster-wide.", + "jobs", + () -> new LongWithAttributes(mlTaskStatusCounts.dfaReindexingCount, isMasterMap) + ) + ); + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.data_frame_analytics.analyzing.current", + "Count of data frame analytics jobs in the analyzing state cluster-wide.", + "jobs", + () -> new LongWithAttributes(mlTaskStatusCounts.dfaAnalyzingCount, isMasterMap) + ) + ); + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.data_frame_analytics.stopping.current", + "Count of data frame analytics jobs in the stopping state cluster-wide.", + "jobs", + () -> new LongWithAttributes(mlTaskStatusCounts.dfaStoppingCount, isMasterMap) + ) + ); + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.data_frame_analytics.failed.current", + "Count of data frame analytics jobs in the failed state cluster-wide.", + "jobs", + () -> new LongWithAttributes(mlTaskStatusCounts.dfaFailedCount, isMasterMap) + ) + ); + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.trained_models.deployment.target_allocations.current", + "Sum of target trained model allocations across all deployments cluster-wide.", + "allocations", + () -> new LongWithAttributes(trainedModelAllocationCounts.trainedModelsTargetAllocations, isMasterMap) + ) + ); + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.trained_models.deployment.current_allocations.current", + "Sum of current trained model allocations across all deployments cluster-wide.", + "allocations", + () -> new LongWithAttributes(trainedModelAllocationCounts.trainedModelsCurrentAllocations, isMasterMap) + ) + ); + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.trained_models.deployment.failed_allocations.current", + "Sum of failed trained model allocations across all deployments cluster-wide.", + "allocations", + () -> new LongWithAttributes(trainedModelAllocationCounts.trainedModelsFailedAllocations, isMasterMap) + ) + ); + } + + @Override + protected void doStart() { + metrics.clear(); + if (hasMasterRole) { + registerMasterNodeMetrics(meterRegistry); + } + if (hasMlRole) { + registerMlNodeMetrics(meterRegistry); + } + } + + @Override + protected void doStop() {} + + @Override + protected void doClose() { + metrics.forEach(metric -> { + try { + metric.close(); + } catch (Exception e) { + logger.warn("metrics close() method should not throw Exception", e); + } + }); + } + + /** + * Metric values are recalculated in response to cluster state changes and then cached. + * This means that the telemetry provider can poll the metrics registry as often as it + * likes without causing extra work in recalculating the metric values. + */ + @Override + public void clusterChanged(ClusterChangedEvent event) { + isMasterMap = event.localNodeMaster() ? MASTER_TRUE_MAP : MASTER_FALSE_MAP; + + if (event.state().blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) { + // Wait until the gateway has recovered from disk. + return; + } + + boolean mustRecalculateFreeMem = false; + + final ClusterState currentState = event.state(); + final ClusterState previousState = event.previousState(); + + if (firstTime || event.metadataChanged()) { + final PersistentTasksCustomMetadata tasks = currentState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + final PersistentTasksCustomMetadata oldTasks = firstTime + ? null + : previousState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + if (tasks != null && tasks.equals(oldTasks) == false) { + if (hasMasterRole) { + mlTaskStatusCounts = findTaskStatuses(tasks); + } + if (hasMlRole) { + nativeMemAdUsage = findAdMemoryUsage(autodetectProcessManager); + nativeMemDfaUsage = findDfaMemoryUsage(dataFrameAnalyticsManager, tasks); + mustRecalculateFreeMem = true; + } + } + } + + final TrainedModelAssignmentMetadata currentMetadata = TrainedModelAssignmentMetadata.fromState(currentState); + final TrainedModelAssignmentMetadata previousMetadata = firstTime ? null : TrainedModelAssignmentMetadata.fromState(previousState); + if (currentMetadata != null && currentMetadata.equals(previousMetadata) == false) { + if (hasMasterRole) { + trainedModelAllocationCounts = findTrainedModelAllocationCounts(currentMetadata); + } + if (hasMlRole) { + nativeMemTrainedModelUsage = findTrainedModelMemoryUsage(currentMetadata, currentState.nodes().getLocalNode().getId()); + mustRecalculateFreeMem = true; + } + } + + if (firstTime) { + firstTime = false; + nativeMemLimit = findNativeMemoryLimit(currentState.nodes().getLocalNode(), clusterService.getClusterSettings()); + mustRecalculateFreeMem = true; + // Install a listener to recalculate limit and free in response to settings changes. + // This isn't done in the constructor, but instead only after the three usage variables + // have been populated. Doing this means that immediately after startup, when the stats + // are inaccurate, they'll _all_ be zero. Installing the settings listeners immediately + // could mean that free would be misleadingly set based on zero usage when actual usage + // is _not_ zero. + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(USE_AUTO_MACHINE_MEMORY_PERCENT, s -> memoryLimitClusterSettingUpdated()); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(MachineLearning.MAX_MACHINE_MEMORY_PERCENT, s -> memoryLimitClusterSettingUpdated()); + } + + if (mustRecalculateFreeMem) { + nativeMemFree = findNativeMemoryFree(nativeMemLimit, nativeMemAdUsage, nativeMemDfaUsage, nativeMemTrainedModelUsage); + } + } + + /** + * This method is registered to be called whenever a cluster setting is changed that affects + * any of the calculations this class performs. + */ + private void memoryLimitClusterSettingUpdated() { + nativeMemLimit = findNativeMemoryLimit(clusterService.localNode(), clusterService.getClusterSettings()); + nativeMemFree = findNativeMemoryFree(nativeMemLimit, nativeMemAdUsage, nativeMemDfaUsage, nativeMemTrainedModelUsage); + } + + /** + * Returns up-to-date stats about the states of the ML entities that are persistent tasks. + * Currently this includes: + * - Anomaly detection jobs + * - Datafeeds + * - Data frame analytics jobs + *

    + * In the future it could possibly also include model snapshot upgrade tasks. + *

    + * These stats relate to the whole cluster and not just the current node. + *

    + * The caller is expected to cache the returned stats to avoid unnecessary recalculation. + */ + static MlTaskStatusCounts findTaskStatuses(PersistentTasksCustomMetadata tasks) { + + int adOpeningCount = 0; + int adOpenedCount = 0; + int adClosingCount = 0; + int adFailedCount = 0; + int datafeedStartingCount = 0; + int datafeedStartedCount = 0; + int datafeedStoppingCount = 0; + int dfaStartingCount = 0; + int dfaStartedCount = 0; + int dfaReindexingCount = 0; + int dfaAnalyzingCount = 0; + int dfaStoppingCount = 0; + int dfaFailedCount = 0; + + for (PersistentTasksCustomMetadata.PersistentTask task : tasks.tasks()) { + switch (task.getTaskName()) { + case JOB_TASK_NAME: + switch (MlTasks.getJobStateModifiedForReassignments(task)) { + case OPENING -> ++adOpeningCount; + case OPENED -> ++adOpenedCount; + case CLOSING -> ++adClosingCount; + case FAILED -> ++adFailedCount; + } + break; + case DATAFEED_TASK_NAME: + switch (MlTasks.getDatafeedState(task)) { + case STARTING -> ++datafeedStartingCount; + case STARTED -> ++datafeedStartedCount; + case STOPPING -> ++datafeedStoppingCount; + } + break; + case DATA_FRAME_ANALYTICS_TASK_NAME: + switch (MlTasks.getDataFrameAnalyticsState(task)) { + case STARTING -> ++dfaStartingCount; + case STARTED -> ++dfaStartedCount; + case REINDEXING -> ++dfaReindexingCount; + case ANALYZING -> ++dfaAnalyzingCount; + case STOPPING -> ++dfaStoppingCount; + case FAILED -> ++dfaFailedCount; + } + break; + case JOB_SNAPSHOT_UPGRADE_TASK_NAME: + // Not currently tracked + // TODO: consider in the future, especially when we're at the stage of needing to upgrade serverless model snapshots + break; + } + } + + return new MlTaskStatusCounts( + adOpeningCount, + adOpenedCount, + adClosingCount, + adFailedCount, + datafeedStartingCount, + datafeedStartedCount, + datafeedStoppingCount, + dfaStartingCount, + dfaStartedCount, + dfaReindexingCount, + dfaAnalyzingCount, + dfaStoppingCount, + dfaFailedCount + ); + } + + /** + * Return the memory usage, in bytes, of the anomaly detection jobs that are running on the + * current node. + */ + static long findAdMemoryUsage(AutodetectProcessManager autodetectProcessManager) { + return autodetectProcessManager.getOpenProcessMemoryUsage().getBytes(); + } + + /** + * Return the memory usage, in bytes, of the data frame analytics jobs that are running on the + * current node. + */ + static long findDfaMemoryUsage(DataFrameAnalyticsManager dataFrameAnalyticsManager, PersistentTasksCustomMetadata tasks) { + return dataFrameAnalyticsManager.getActiveTaskMemoryUsage(tasks).getBytes(); + } + + /** + * Returns up-to-date stats about the numbers of allocations of ML trained models. + *

    + * These stats relate to the whole cluster and not just the current node. + *

    + * The caller is expected to cache the returned stats to avoid unnecessary recalculation. + */ + static TrainedModelAllocationCounts findTrainedModelAllocationCounts(TrainedModelAssignmentMetadata metadata) { + int trainedModelsTargetAllocations = 0; + int trainedModelsCurrentAllocations = 0; + int trainedModelsFailedAllocations = 0; + + for (TrainedModelAssignment trainedModelAssignment : metadata.allAssignments().values()) { + trainedModelsTargetAllocations += trainedModelAssignment.totalTargetAllocations(); + trainedModelsCurrentAllocations += trainedModelAssignment.totalCurrentAllocations(); + trainedModelsFailedAllocations += trainedModelAssignment.totalFailedAllocations(); + } + + return new TrainedModelAllocationCounts( + trainedModelsTargetAllocations, + trainedModelsCurrentAllocations, + trainedModelsFailedAllocations + ); + } + + /** + * Return the memory usage, in bytes, of the trained models that are running on the + * current node. + */ + static long findTrainedModelMemoryUsage(TrainedModelAssignmentMetadata metadata, String localNodeId) { + long trainedModelMemoryUsageBytes = 0; + for (TrainedModelAssignment assignment : metadata.allAssignments().values()) { + if (Optional.ofNullable(assignment.getNodeRoutingTable().get(localNodeId)) + .map(RoutingInfo::getState) + .orElse(RoutingState.STOPPED) + .consumesMemory()) { + trainedModelMemoryUsageBytes += assignment.getTaskParams().estimateMemoryUsageBytes(); + } + } + return trainedModelMemoryUsageBytes; + } + + /** + * Return the maximum amount of memory, in bytes, permitted for ML processes running on the + * current node. + */ + static long findNativeMemoryLimit(DiscoveryNode localNode, ClusterSettings settings) { + return NativeMemoryCalculator.allowedBytesForMl(localNode, settings).orElse(0L); + } + + /** + * Return the amount of free memory, in bytes, that remains available for ML processes running on the + * current node. + */ + static long findNativeMemoryFree(long nativeMemLimit, long nativeMemAdUsage, long nativeMemDfaUsage, long nativeMemTrainedModelUsage) { + long totalUsage = nativeMemAdUsage + nativeMemDfaUsage + nativeMemTrainedModelUsage; + if (totalUsage > 0) { + totalUsage += NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes(); + } + return nativeMemLimit - totalUsage; + } + + record MlTaskStatusCounts( + int adOpeningCount, + int adOpenedCount, + int adClosingCount, + int adFailedCount, + int datafeedStartingCount, + int datafeedStartedCount, + int datafeedStoppingCount, + int dfaStartingCount, + int dfaStartedCount, + int dfaReindexingCount, + int dfaAnalyzingCount, + int dfaStoppingCount, + int dfaFailedCount + ) { + static final MlTaskStatusCounts EMPTY = new MlTaskStatusCounts(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + } + + record TrainedModelAllocationCounts( + int trainedModelsTargetAllocations, + int trainedModelsCurrentAllocations, + int trainedModelsFailedAllocations + ) { + static final TrainedModelAllocationCounts EMPTY = new TrainedModelAllocationCounts(0, 0, 0); + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportClearDeploymentCacheAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportClearDeploymentCacheAction.java index 44235882a6582..5ecd0322674e1 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportClearDeploymentCacheAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportClearDeploymentCacheAction.java @@ -23,8 +23,8 @@ import org.elasticsearch.xpack.core.ml.action.ClearDeploymentCacheAction.Request; import org.elasticsearch.xpack.core.ml.action.ClearDeploymentCacheAction.Response; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.inference.deployment.TrainedModelDeploymentTask; import java.util.List; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java index 7512aa2b42acf..02801864a3e78 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java @@ -244,7 +244,7 @@ static class OpenAndClosingIds { } List openJobIds; - List closingJobIds; + final List closingJobIds; } /** @@ -616,8 +616,8 @@ private void normalCloseJob( } static class WaitForCloseRequest { - List> persistentTasks = new ArrayList<>(); - List jobsToFinalize = new ArrayList<>(); + final List> persistentTasks = new ArrayList<>(); + final List jobsToFinalize = new ArrayList<>(); public boolean hasJobsToWaitFor() { return persistentTasks.isEmpty() == false; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCoordinatedInferenceAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCoordinatedInferenceAction.java index 478b80e6e58be..4562f664cbc1a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCoordinatedInferenceAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCoordinatedInferenceAction.java @@ -28,9 +28,9 @@ import org.elasticsearch.xpack.core.inference.action.GetInferenceModelAction; import org.elasticsearch.xpack.core.ml.action.CoordinatedInferenceAction; import org.elasticsearch.xpack.core.ml.action.InferModelAction; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentUtils; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.EmptyConfigUpdate; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfigUpdate; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentUtils; import java.util.ArrayList; import java.util.function.Supplier; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAction.java index 093e4213a5db1..d19871d0e1b2f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAction.java @@ -41,9 +41,9 @@ import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.ml.action.DeleteTrainedModelAction; import org.elasticsearch.xpack.core.ml.action.StopTrainedModelDeploymentAction; +import org.elasticsearch.xpack.core.ml.inference.ModelAliasMetadata; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; -import org.elasticsearch.xpack.ml.inference.ModelAliasMetadata; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.inference.ingest.InferenceProcessor; import org.elasticsearch.xpack.ml.inference.persistence.TrainedModelProvider; import org.elasticsearch.xpack.ml.notifications.InferenceAuditor; @@ -231,7 +231,6 @@ private void deleteModel(DeleteTrainedModelAction.Request request, ClusterState id ) ); - return; } } else { deleteAliasesAndModel(request, modelAliases, listener); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAliasAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAliasAction.java index 73601ef86ff13..fe8a4ff029d69 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAliasAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAliasAction.java @@ -32,7 +32,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ml.action.DeleteTrainedModelAliasAction; -import org.elasticsearch.xpack.ml.inference.ModelAliasMetadata; +import org.elasticsearch.xpack.core.ml.inference.ModelAliasMetadata; import org.elasticsearch.xpack.ml.notifications.InferenceAuditor; import java.util.HashMap; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportExplainDataFrameAnalyticsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportExplainDataFrameAnalyticsAction.java index 35cab760665ce..d19b67b52afe1 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportExplainDataFrameAnalyticsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportExplainDataFrameAnalyticsAction.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.client.internal.ParentTaskAssigningClient; import org.elasticsearch.client.internal.node.NodeClient; @@ -153,11 +154,20 @@ private void explain( ); }); } else { + var responseHeaderPreservingListener = ContextPreservingActionListener.wrapPreservingContext( + listener, + threadPool.getThreadContext() + ); extractedFieldsDetectorFactory.createFromSource( request.getConfig(), ActionListener.wrap( - extractedFieldsDetector -> explain(parentTaskId, request.getConfig(), extractedFieldsDetector, listener), - listener::onFailure + extractedFieldsDetector -> explain( + parentTaskId, + request.getConfig(), + extractedFieldsDetector, + responseHeaderPreservingListener + ), + responseHeaderPreservingListener::onFailure ) ); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java index 36d225a943348..14afd6999b0c0 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java @@ -28,7 +28,7 @@ import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingInfo; import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingState; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.inference.deployment.ModelStats; import org.elasticsearch.xpack.ml.inference.deployment.TrainedModelDeploymentTask; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetOverallBucketsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetOverallBucketsAction.java index 10fb54ee8ae4c..d9dfd0fb23eeb 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetOverallBucketsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetOverallBucketsAction.java @@ -14,7 +14,6 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.client.internal.Client; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.TimeValue; @@ -66,7 +65,6 @@ public class TransportGetOverallBucketsAction extends HandledTransportAction< private final ThreadPool threadPool; private final Client client; - private final ClusterService clusterService; private final JobManager jobManager; @Inject @@ -74,7 +72,6 @@ public TransportGetOverallBucketsAction( ThreadPool threadPool, TransportService transportService, ActionFilters actionFilters, - ClusterService clusterService, JobManager jobManager, Client client ) { @@ -86,7 +83,6 @@ public TransportGetOverallBucketsAction( EsExecutors.DIRECT_EXECUTOR_SERVICE ); this.threadPool = threadPool; - this.clusterService = clusterService; this.client = client; this.jobManager = jobManager; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsAction.java index e6d1fe30d7646..78d030d454f0b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsAction.java @@ -21,11 +21,11 @@ import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction; import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction.Request; import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction.Response; +import org.elasticsearch.xpack.core.ml.inference.ModelAliasMetadata; import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; import org.elasticsearch.xpack.core.ml.inference.TrainedModelType; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; -import org.elasticsearch.xpack.ml.inference.ModelAliasMetadata; import org.elasticsearch.xpack.ml.inference.persistence.TrainedModelProvider; import java.util.Collections; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsAction.java index 3c9ba3700dc8e..76321608ba4fb 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsAction.java @@ -43,16 +43,16 @@ import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction; import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsStatsAction; import org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction; +import org.elasticsearch.xpack.core.ml.inference.ModelAliasMetadata; import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; import org.elasticsearch.xpack.core.ml.inference.TrainedModelType; import org.elasticsearch.xpack.core.ml.inference.assignment.AssignmentStats; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.core.ml.inference.persistence.InferenceIndexConstants; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceStats; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TrainedModelSizeStats; import org.elasticsearch.xpack.core.ml.utils.TransportVersionUtils; -import org.elasticsearch.xpack.ml.inference.ModelAliasMetadata; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.inference.persistence.TrainedModelDefinitionDoc; import org.elasticsearch.xpack.ml.inference.persistence.TrainedModelProvider; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java index 3cf0189c28df2..6a8dca8e2776b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java @@ -32,15 +32,15 @@ import org.elasticsearch.xpack.core.ml.action.InferModelAction.Request; import org.elasticsearch.xpack.core.ml.action.InferModelAction.Response; import org.elasticsearch.xpack.core.ml.action.InferTrainedModelDeploymentAction; +import org.elasticsearch.xpack.core.ml.inference.ModelAliasMetadata; import org.elasticsearch.xpack.core.ml.inference.TrainedModelType; import org.elasticsearch.xpack.core.ml.inference.assignment.AssignmentState; import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingState; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.core.ml.inference.results.ErrorInferenceResults; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.ml.MachineLearning; -import org.elasticsearch.xpack.ml.inference.ModelAliasMetadata; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.inference.loadingservice.LocalModel; import org.elasticsearch.xpack.ml.inference.loadingservice.ModelLoadingService; import org.elasticsearch.xpack.ml.inference.persistence.TrainedModelProvider; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDataFrameAnalyticsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDataFrameAnalyticsAction.java index 3e3d08b23d59f..9bf18671e7c11 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDataFrameAnalyticsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDataFrameAnalyticsAction.java @@ -8,6 +8,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.client.internal.ParentTaskAssigningClient; import org.elasticsearch.client.internal.node.NodeClient; @@ -98,7 +99,11 @@ protected void doExecute(Task task, Request request, ActionListener li preview(task, config, listener); }); } else { - preview(task, request.getConfig(), listener); + preview( + task, + request.getConfig(), + ContextPreservingActionListener.wrapPreservingContext(listener, threadPool.getThreadContext()) + ); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java index 9d28619ba1db6..5ceb34bfc0510 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; import org.elasticsearch.action.fieldcaps.TransportFieldCapabilitiesAction; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.ParentTaskAssigningClient; @@ -132,6 +133,17 @@ private void previewDatafeed( PreviewDatafeedAction.Request request, ActionListener listener ) { + // The datafeed preview runs in its own context with the provided + // headers for auth. When security is not enabled the context + // preserving listener is required to restore the request/response + // headers. If security is enabled the context wrapping done in + // SecondaryAuthorizationUtils::useSecondaryAuthIfAvailable is + // sufficient to preserve the context. + var responseHeaderPreservingListener = ContextPreservingActionListener.wrapPreservingContext( + listener, + threadPool.getThreadContext() + ); + final QueryBuilder extraFilters = request.getStartTime().isPresent() || request.getEndTime().isPresent() ? null : QueryBuilders.boolQuery().mustNot(QueryBuilders.termsQuery(DataTierFieldMapper.NAME, "data_frozen", "data_cold")); @@ -153,18 +165,16 @@ private void previewDatafeed( xContentRegistry, // Fake DatafeedTimingStatsReporter that does not have access to results index new DatafeedTimingStatsReporter(new DatafeedTimingStats(datafeedConfig.getJobId()), (ts, refreshPolicy, listener1) -> {}), - listener.delegateFailure( + responseHeaderPreservingListener.delegateFailure( (l, dataExtractorFactory) -> isDateNanos( previewDatafeedConfig, job.getDataDescription().getTimeField(), - listener.delegateFailure((l2, isDateNanos) -> { - final QueryBuilder hotOnly = QueryBuilders.boolQuery() - .mustNot(QueryBuilders.termsQuery(DataTierFieldMapper.NAME, "data_frozen", "data_cold")); + l.delegateFailure((l2, isDateNanos) -> { final long start = request.getStartTime().orElse(0); final long end = request.getEndTime() .orElse(isDateNanos ? DateUtils.MAX_NANOSECOND_INSTANT.toEpochMilli() : Long.MAX_VALUE); DataExtractor dataExtractor = dataExtractorFactory.newExtractor(start, end); - threadPool.executor(UTILITY_THREAD_POOL_NAME).execute(() -> previewDatafeed(dataExtractor, l)); + threadPool.executor(UTILITY_THREAD_POOL_NAME).execute(() -> previewDatafeed(dataExtractor, l2)); }) ) ) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDatafeedAction.java index d71e99040177f..8cdb8050bd257 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDatafeedAction.java @@ -71,7 +71,7 @@ protected void masterOperation( ClusterState state, ActionListener listener ) { - datafeedManager.putDatafeed(request, state, licenseState, securityContext, threadPool, listener); + datafeedManager.putDatafeed(request, state, securityContext, threadPool, listener); } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutJobAction.java index 767ec08078b42..55f89a993ce61 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutJobAction.java @@ -99,7 +99,6 @@ protected void masterOperation( new PutDatafeedAction.Request(jobCreated.getResponse().getDatafeedConfig().get()), // Use newer state from cluster service as the job creation may have created shared indexes clusterService.state(), - licenseState, securityContext, threadPool, ActionListener.wrap(createdDatafeed -> { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelAction.java index 7462b6cd918aa..d6e52b6de1fd4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelAction.java @@ -6,8 +6,6 @@ */ package org.elasticsearch.xpack.ml.action; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ResourceNotFoundException; @@ -29,10 +27,10 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.HeaderWarning; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; @@ -42,26 +40,24 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.tasks.Task; -import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xcontent.DeprecationHandler; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.ml.MachineLearningField; import org.elasticsearch.xpack.core.ml.MlConfigVersion; -import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction; import org.elasticsearch.xpack.core.ml.action.PutTrainedModelAction; import org.elasticsearch.xpack.core.ml.action.PutTrainedModelAction.Request; import org.elasticsearch.xpack.core.ml.action.PutTrainedModelAction.Response; +import org.elasticsearch.xpack.core.ml.inference.ModelAliasMetadata; import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; import org.elasticsearch.xpack.core.ml.inference.TrainedModelType; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.core.ml.inference.persistence.InferenceIndexConstants; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfig; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.LenientlyParsedInferenceConfig; @@ -72,8 +68,6 @@ import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.ml.utils.MlPlatformArchitecturesUtil; import org.elasticsearch.xpack.ml.MachineLearning; -import org.elasticsearch.xpack.ml.inference.ModelAliasMetadata; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.inference.persistence.TrainedModelProvider; import org.elasticsearch.xpack.ml.utils.TaskRetriever; @@ -87,22 +81,18 @@ import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; -import static org.elasticsearch.xpack.core.ml.MlTasks.downloadModelTaskDescription; public class TransportPutTrainedModelAction extends TransportMasterNodeAction { private static final ByteSizeValue MAX_NATIVE_DEFINITION_INDEX_SIZE = ByteSizeValue.ofGb(50); - private static final Logger logger = LogManager.getLogger(TransportPutTrainedModelAction.class); private final TrainedModelProvider trainedModelProvider; private final XPackLicenseState licenseState; private final NamedXContentRegistry xContentRegistry; private final OriginSettingClient client; - private final Settings settings; @Inject public TransportPutTrainedModelAction( - Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, @@ -128,7 +118,6 @@ public TransportPutTrainedModelAction( this.trainedModelProvider = trainedModelProvider; this.xContentRegistry = xContentRegistry; this.client = new OriginSettingClient(client, ML_ORIGIN); - this.settings = settings; } @Override @@ -248,13 +237,15 @@ protected void masterOperation( return; } - ActionListener finalResponseAction = ActionListener.wrap((configToReturn) -> { - finalResponseListener.onResponse(new PutTrainedModelAction.Response(configToReturn)); - }, finalResponseListener::onFailure); + ActionListener finalResponseAction = ActionListener.wrap( + (configToReturn) -> finalResponseListener.onResponse(new Response(configToReturn)), + finalResponseListener::onFailure + ); - ActionListener verifyClusterAndModelArchitectures = ActionListener.wrap((configToReturn) -> { - verifyMlNodesAndModelArchitectures(configToReturn, client, threadPool, finalResponseAction); - }, finalResponseListener::onFailure); + ActionListener verifyClusterAndModelArchitectures = ActionListener.wrap( + (configToReturn) -> verifyMlNodesAndModelArchitectures(configToReturn, client, threadPool, finalResponseAction), + finalResponseListener::onFailure + ); ActionListener finishedStoringListener = ActionListener.wrap(bool -> { TrainedModelConfig configToReturn = trainedModelConfig.clearDefinition().build(); @@ -289,7 +280,7 @@ protected void masterOperation( .execute(ActionListener.wrap(stats -> { IndexStats indexStats = stats.getIndices().get(InferenceIndexConstants.nativeDefinitionStore()); if (indexStats != null - && indexStats.getTotal().getStore().getSizeInBytes() > MAX_NATIVE_DEFINITION_INDEX_SIZE.getBytes()) { + && indexStats.getTotal().getStore().sizeInBytes() > MAX_NATIVE_DEFINITION_INDEX_SIZE.getBytes()) { finalResponseListener.onFailure( new ElasticsearchStatusException( "Native model store has exceeded the maximum acceptable size of {}, " @@ -361,7 +352,7 @@ void verifyMlNodesAndModelArchitectures( ThreadPool threadPool, ActionListener configToReturnListener ) { - ActionListener addWarningHeaderOnFailureListener = new ActionListener() { + ActionListener addWarningHeaderOnFailureListener = new ActionListener<>() { @Override public void onResponse(TrainedModelConfig config) { assert Objects.equals(config, configToReturn); @@ -413,38 +404,9 @@ static void checkForExistingTask( }, sendResponseListener::onFailure), timeout); } - private static void getExistingTaskInfo(Client client, String modelId, boolean waitForCompletion, ActionListener listener) { - client.admin() - .cluster() - .prepareListTasks() - .setActions(MlTasks.MODEL_IMPORT_TASK_ACTION) - .setDetailed(true) - .setWaitForCompletion(waitForCompletion) - .setDescriptions(downloadModelTaskDescription(modelId)) - .execute(ActionListener.wrap((response) -> { - var tasks = response.getTasks(); - - if (tasks.size() > 0) { - // there really shouldn't be more than a single task but if there is we'll just use the first one - listener.onResponse(tasks.get(0)); - } else { - listener.onResponse(null); - } - }, e -> { - listener.onFailure( - new ElasticsearchStatusException( - "Unable to retrieve task information for model id [{}]", - RestStatus.INTERNAL_SERVER_ERROR, - e, - modelId - ) - ); - })); - } - private static void getModelInformation(Client client, String modelId, ActionListener listener) { client.execute(GetTrainedModelsAction.INSTANCE, new GetTrainedModelsAction.Request(modelId), ActionListener.wrap(models -> { - if (models.getResources().results().size() == 0) { + if (models.getResources().results().isEmpty()) { listener.onFailure( new ElasticsearchStatusException( "No model information found for a concurrent create model execution for model id [{}]", @@ -563,11 +525,7 @@ static void setTrainedModelConfigFieldsFromPackagedModel( trainedModelConfig.setPlatformArchitecture(resolvedModelPackageConfig.getPlatformArchitecture()); trainedModelConfig.setMetadata(resolvedModelPackageConfig.getMetadata()); trainedModelConfig.setInferenceConfig( - parseInferenceConfigFromModelPackage( - resolvedModelPackageConfig.getInferenceConfigSource(), - xContentRegistry, - LoggingDeprecationHandler.INSTANCE - ) + parseInferenceConfigFromModelPackage(resolvedModelPackageConfig.getInferenceConfigSource(), xContentRegistry) ); trainedModelConfig.setTags(resolvedModelPackageConfig.getTags()); trainedModelConfig.setPrefixStrings(resolvedModelPackageConfig.getPrefixStrings()); @@ -578,29 +536,29 @@ static void setTrainedModelConfigFieldsFromPackagedModel( trainedModelConfig.setLocation(trainedModelConfig.getModelType().getDefaultLocation(trainedModelConfig.getModelId())); } - static InferenceConfig parseInferenceConfigFromModelPackage( - Map source, - NamedXContentRegistry namedXContentRegistry, - DeprecationHandler deprecationHandler - ) throws IOException { - XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().map(source); - XContentParser sourceParser = XContentType.JSON.xContent() - .createParser( - XContentParserConfiguration.EMPTY.withRegistry(namedXContentRegistry).withDeprecationHandler(deprecationHandler), - BytesReference.bytes(xContentBuilder).streamInput() - ); - - XContentParser.Token token = sourceParser.nextToken(); - assert token == XContentParser.Token.START_OBJECT; - token = sourceParser.nextToken(); - assert token == XContentParser.Token.FIELD_NAME; - String currentName = sourceParser.currentName(); - - InferenceConfig inferenceConfig = sourceParser.namedObject(LenientlyParsedInferenceConfig.class, currentName, null); - // consume the end object token - token = sourceParser.nextToken(); - assert token == XContentParser.Token.END_OBJECT; - return inferenceConfig; + static InferenceConfig parseInferenceConfigFromModelPackage(Map source, NamedXContentRegistry namedXContentRegistry) + throws IOException { + try ( + XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().map(source); + XContentParser sourceParser = XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG.withRegistry(namedXContentRegistry), + BytesReference.bytes(xContentBuilder), + XContentType.JSON + ) + ) { + + XContentParser.Token token = sourceParser.nextToken(); + assert token == XContentParser.Token.START_OBJECT; + token = sourceParser.nextToken(); + assert token == XContentParser.Token.FIELD_NAME; + String currentName = sourceParser.currentName(); + + InferenceConfig inferenceConfig = sourceParser.namedObject(LenientlyParsedInferenceConfig.class, currentName, null); + // consume the end object token + token = sourceParser.nextToken(); + assert token == XContentParser.Token.END_OBJECT; + return inferenceConfig; + } } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelAliasAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelAliasAction.java index de760d8fa17ed..79560b8b8e94e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelAliasAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelAliasAction.java @@ -36,14 +36,14 @@ import org.elasticsearch.xpack.core.ml.MachineLearningField; import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction; import org.elasticsearch.xpack.core.ml.action.PutTrainedModelAliasAction; +import org.elasticsearch.xpack.core.ml.inference.ModelAliasMetadata; import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; import org.elasticsearch.xpack.core.ml.inference.TrainedModelType; import org.elasticsearch.xpack.core.ml.inference.assignment.AllocationStatus; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; -import org.elasticsearch.xpack.ml.inference.ModelAliasMetadata; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.inference.persistence.TrainedModelProvider; import org.elasticsearch.xpack.ml.notifications.InferenceAuditor; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportRevertModelSnapshotAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportRevertModelSnapshotAction.java index 5450b2752ab97..c01c1f46b3d13 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportRevertModelSnapshotAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportRevertModelSnapshotAction.java @@ -272,7 +272,7 @@ private static void getModelSnapshot( return; } - provider.getModelSnapshot(request.getJobId(), request.getSnapshotId(), modelSnapshot -> { + provider.getModelSnapshot(request.getJobId(), request.getSnapshotId(), true, modelSnapshot -> { if (modelSnapshot == null) { throw missingSnapshotException(request); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetResetModeAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetResetModeAction.java index 2cb8fc847bb62..7c52e086ec43c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetResetModeAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetResetModeAction.java @@ -18,7 +18,7 @@ import org.elasticsearch.xpack.core.action.SetResetModeActionRequest; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.SetResetModeAction; -import org.elasticsearch.xpack.ml.inference.ModelAliasMetadata; +import org.elasticsearch.xpack.core.ml.inference.ModelAliasMetadata; public class TransportSetResetModeAction extends AbstractTransportSetResetModeAction { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java index 65ef493f664f9..f8b2179eadb31 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java @@ -13,6 +13,7 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.ParentTaskAssigningClient; @@ -190,6 +191,13 @@ protected void masterOperation( return; } + // The datafeed will run its with the configured headers, + // preserve the response headers. + var responseHeaderPreservingListener = ContextPreservingActionListener.wrapPreservingContext( + listener, + threadPool.getThreadContext() + ); + AtomicReference datafeedConfigHolder = new AtomicReference<>(); PersistentTasksCustomMetadata tasks = state.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); @@ -197,7 +205,7 @@ protected void masterOperation( new ActionListener<>() { @Override public void onResponse(PersistentTasksCustomMetadata.PersistentTask persistentTask) { - waitForDatafeedStarted(persistentTask.getId(), params, listener); + waitForDatafeedStarted(persistentTask.getId(), params, responseHeaderPreservingListener); } @Override @@ -209,7 +217,7 @@ public void onFailure(Exception e) { RestStatus.CONFLICT ); } - listener.onFailure(e); + responseHeaderPreservingListener.onFailure(e); } }; @@ -228,9 +236,9 @@ public void onFailure(Exception e) { ), ActionListener.wrap(response -> { if (response.isSuccess() == false) { - listener.onFailure(createUnlicensedError(params.getDatafeedId(), response)); + responseHeaderPreservingListener.onFailure(createUnlicensedError(params.getDatafeedId(), response)); } else if (remoteClusterClient == false) { - listener.onFailure( + responseHeaderPreservingListener.onFailure( ExceptionsHelper.badRequestException( Messages.getMessage( Messages.DATAFEED_NEEDS_REMOTE_CLUSTER_SEARCH, @@ -254,7 +262,7 @@ public void onFailure(Exception e) { createDataExtractor(task, job, datafeedConfigHolder.get(), params, waitForTaskListener); } }, - e -> listener.onFailure( + e -> responseHeaderPreservingListener.onFailure( createUnknownLicenseError( params.getDatafeedId(), RemoteClusterLicenseChecker.remoteIndices(params.getDatafeedIndices()), @@ -273,7 +281,7 @@ public void onFailure(Exception e) { validate(job, datafeedConfigHolder.get(), tasks, xContentRegistry); auditDeprecations(datafeedConfigHolder.get(), job, auditor, xContentRegistry); createDataExtractor.accept(job); - }, listener::onFailure); + }, responseHeaderPreservingListener::onFailure); ActionListener datafeedListener = ActionListener.wrap(datafeedBuilder -> { DatafeedConfig datafeedConfig = datafeedBuilder.build(); @@ -283,7 +291,7 @@ public void onFailure(Exception e) { datafeedConfigHolder.set(datafeedConfig); jobConfigProvider.getJob(datafeedConfig.getJobId(), null, jobListener); - }, listener::onFailure); + }, responseHeaderPreservingListener::onFailure); datafeedConfigProvider.getDatafeedConfig(params.getDatafeedId(), null, datafeedListener); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java index 4a569b374582a..0a5641836df4a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java @@ -33,6 +33,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.inference.TaskType; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; @@ -43,8 +44,8 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.inference.action.GetInferenceModelAction; import org.elasticsearch.xpack.core.ml.MachineLearningField; import org.elasticsearch.xpack.core.ml.action.CreateTrainedModelAssignmentAction; import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction; @@ -56,13 +57,13 @@ import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingInfo; import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingState; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.core.ml.inference.persistence.InferenceIndexConstants; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.IndexLocation; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.ml.utils.TransportVersionUtils; import org.elasticsearch.xpack.ml.MachineLearning; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentService; import org.elasticsearch.xpack.ml.inference.persistence.TrainedModelDefinitionDoc; import org.elasticsearch.xpack.ml.notifications.InferenceAuditor; @@ -108,7 +109,6 @@ public TransportStartTrainedModelDeploymentAction( XPackLicenseState licenseState, IndexNameExpressionResolver indexNameExpressionResolver, TrainedModelAssignmentService trainedModelAssignmentService, - NamedXContentRegistry xContentRegistry, MlMemoryTracker memoryTracker, InferenceAuditor auditor ) { @@ -222,6 +222,8 @@ protected void masterOperation( ); }, listener::onFailure); + GetTrainedModelsAction.Request getModelWithDeploymentId = new GetTrainedModelsAction.Request(request.getDeploymentId()); + ActionListener getModelListener = ActionListener.wrap(getModelResponse -> { if (getModelResponse.getResources().results().size() > 1) { listener.onFailure( @@ -254,44 +256,60 @@ protected void masterOperation( return; } + ActionListener checkDeploymentIdDoesntAlreadyExist = ActionListener.wrap( + response -> listener.onFailure( + ExceptionsHelper.badRequestException( + "Deployment id [{}] is the same as an another model which is not the model being deployed. " + + "Deployment id can be the same as the model being deployed but cannot match a different model", + request.getDeploymentId(), + request.getModelId() + ) + ), + error -> { + if (ExceptionsHelper.unwrapCause(error) instanceof ResourceNotFoundException) { + // no name clash, continue with the deployment + checkFullModelDefinitionIsPresent(client, trainedModelConfig, true, request.getTimeout(), modelSizeListener); + } else { + listener.onFailure(error); + } + } + ); + // If the model id isn't the same id as the deployment id - // check there isn't another model with deployment id + // check there isn't another model with that deployment id if (request.getModelId().equals(request.getDeploymentId()) == false) { - GetTrainedModelsAction.Request getModelWithDeploymentId = new GetTrainedModelsAction.Request(request.getDeploymentId()); - client.execute( - GetTrainedModelsAction.INSTANCE, - getModelWithDeploymentId, - ActionListener.wrap( - response -> listener.onFailure( - ExceptionsHelper.badRequestException( - "Deployment id [{}] is the same as an another model which is not the model being deployed. " - + "Deployment id can be the same as the model being deployed but cannot match a different model", - request.getDeploymentId(), - request.getModelId() - ) - ), - error -> { - if (ExceptionsHelper.unwrapCause(error) instanceof ResourceNotFoundException) { - // no name clash, continue with the deployment - checkFullModelDefinitionIsPresent( - client, - trainedModelConfig, - true, - request.getTimeout(), - modelSizeListener - ); - } else { - listener.onFailure(error); - } - } - ) - ); + client.execute(GetTrainedModelsAction.INSTANCE, getModelWithDeploymentId, checkDeploymentIdDoesntAlreadyExist); } else { checkFullModelDefinitionIsPresent(client, trainedModelConfig, true, request.getTimeout(), modelSizeListener); } }, listener::onFailure); + ActionListener getInferenceModelListener = ActionListener.wrap((getInferenceModelResponse) -> { + if (getInferenceModelResponse.getModels().isEmpty() == false) { + listener.onFailure( + ExceptionsHelper.badRequestException(Messages.MODEL_ID_MATCHES_EXISTING_MODEL_IDS_BUT_MUST_NOT, request.getModelId()) + ); + } else { + getTrainedModelRequestExecution(request, getModelListener); + } + }, error -> { + if (ExceptionsHelper.unwrapCause(error) instanceof ResourceNotFoundException) { + // no name clash, continue with the deployment + getTrainedModelRequestExecution(request, getModelListener); + } else { + listener.onFailure(error); + } + }); + + GetInferenceModelAction.Request getModelRequest = new GetInferenceModelAction.Request(request.getModelId(), TaskType.ANY); + client.execute(GetInferenceModelAction.INSTANCE, getModelRequest, getInferenceModelListener); + } + + private void getTrainedModelRequestExecution( + StartTrainedModelDeploymentAction.Request request, + ActionListener getModelListener + ) { GetTrainedModelsAction.Request getModelRequest = new GetTrainedModelsAction.Request(request.getModelId()); client.execute(GetTrainedModelsAction.INSTANCE, getModelRequest, getModelListener); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopTrainedModelDeploymentAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopTrainedModelDeploymentAction.java index 6e90d097d1e9f..a3eb15a372d2a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopTrainedModelDeploymentAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopTrainedModelDeploymentAction.java @@ -17,8 +17,6 @@ import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.tasks.TransportTasksAction; -import org.elasticsearch.client.internal.Client; -import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -35,10 +33,10 @@ import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ml.action.StopTrainedModelDeploymentAction; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentClusterService; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.inference.deployment.TrainedModelDeploymentTask; import org.elasticsearch.xpack.ml.notifications.InferenceAuditor; @@ -48,7 +46,6 @@ import java.util.Set; import static org.elasticsearch.core.Strings.format; -import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; import static org.elasticsearch.xpack.ml.action.TransportDeleteTrainedModelAction.getModelAliases; import static org.elasticsearch.xpack.ml.action.TransportDeleteTrainedModelAction.getReferencedModelKeys; @@ -66,7 +63,6 @@ public class TransportStopTrainedModelDeploymentAction extends TransportTasksAct private static final Logger logger = LogManager.getLogger(TransportStopTrainedModelDeploymentAction.class); - private final Client client; private final IngestService ingestService; private final TrainedModelAssignmentClusterService trainedModelAssignmentClusterService; private final InferenceAuditor auditor; @@ -76,7 +72,6 @@ public TransportStopTrainedModelDeploymentAction( ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, - Client client, IngestService ingestService, TrainedModelAssignmentClusterService trainedModelAssignmentClusterService, InferenceAuditor auditor @@ -91,7 +86,6 @@ public TransportStopTrainedModelDeploymentAction( StopTrainedModelDeploymentAction.Response::new, EsExecutors.DIRECT_EXECUTOR_SERVICE ); - this.client = new OriginSettingClient(client, ML_ORIGIN); this.ingestService = ingestService; this.trainedModelAssignmentClusterService = trainedModelAssignmentClusterService; this.auditor = Objects.requireNonNull(auditor); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateFilterAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateFilterAction.java index c1e77d953ab54..22d22cb9d0f73 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateFilterAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateFilterAction.java @@ -18,15 +18,13 @@ import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.internal.Client; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; @@ -42,7 +40,6 @@ import org.elasticsearch.xpack.ml.job.JobManager; import java.io.IOException; -import java.io.InputStream; import java.util.Collections; import java.util.SortedSet; import java.util.TreeSet; @@ -60,8 +57,7 @@ public TransportUpdateFilterAction( TransportService transportService, ActionFilters actionFilters, Client client, - JobManager jobManager, - ClusterService clusterService + JobManager jobManager ) { super( UpdateFilterAction.NAME, @@ -167,11 +163,12 @@ private void getFilterWithVersion(String filterId, ActionListener { try { if (getDocResponse.isExists()) { - BytesReference docSource = getDocResponse.getSourceAsBytesRef(); try ( - InputStream stream = docSource.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream) + XContentParser parser = XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG, + getDocResponse.getSourceAsBytesRef(), + XContentType.JSON + ) ) { MlFilter filter = MlFilter.LENIENT_PARSER.apply(parser, null).build(); l.onResponse(new FilterWithSeqNo(filter, getDocResponse)); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateModelSnapshotAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateModelSnapshotAction.java index f0872bccc8378..097be745996ab 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateModelSnapshotAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateModelSnapshotAction.java @@ -71,7 +71,8 @@ protected void doExecute( ActionListener listener ) { logger.debug("Received request to update model snapshot [{}] for job [{}]", request.getSnapshotId(), request.getJobId()); - jobResultsProvider.getModelSnapshot(request.getJobId(), request.getSnapshotId(), modelSnapshot -> { + // Even though the quantiles can be large we have to fetch them initially so that the updated document is complete + jobResultsProvider.getModelSnapshot(request.getJobId(), request.getSnapshotId(), true, modelSnapshot -> { if (modelSnapshot == null) { listener.onFailure( new ResourceNotFoundException( @@ -81,8 +82,7 @@ protected void doExecute( } else { Result updatedSnapshot = applyUpdate(request, modelSnapshot); indexModelSnapshot(updatedSnapshot, b -> { - // The quantiles can be large, and totally dominate the output - - // it's clearer to remove them + // The quantiles can be large, and totally dominate the output - it's clearer to remove them at this stage listener.onResponse( new UpdateModelSnapshotAction.Response(new ModelSnapshot.Builder(updatedSnapshot.result).setQuantiles(null).build()) ); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpgradeJobModelSnapshotAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpgradeJobModelSnapshotAction.java index 3f6193c124a9a..15c1d53f7bdf8 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpgradeJobModelSnapshotAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpgradeJobModelSnapshotAction.java @@ -223,6 +223,7 @@ protected void masterOperation(Task task, Request request, ClusterState state, A jobResultsProvider.getModelSnapshot( request.getJobId(), request.getSnapshotId(), + false, getSnapshotHandler::onResponse, getSnapshotHandler::onFailure ); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/CategorizeTextAggregationBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/CategorizeTextAggregationBuilder.java index 6fce8aa20ed16..9262ac65b5cfd 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/CategorizeTextAggregationBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/CategorizeTextAggregationBuilder.java @@ -23,7 +23,6 @@ import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.ml.job.config.CategorizationAnalyzerConfig; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; @@ -85,10 +84,6 @@ public class CategorizeTextAggregationBuilder extends AbstractAggregationBuilder PARSER.declareInt(CategorizeTextAggregationBuilder::size, REQUIRED_SIZE_FIELD_NAME); } - public static CategorizeTextAggregationBuilder parse(String aggregationName, XContentParser parser) throws IOException { - return PARSER.parse(parser, new CategorizeTextAggregationBuilder(aggregationName), null); - } - private TermsAggregator.BucketCountThresholds bucketCountThresholds = new TermsAggregator.BucketCountThresholds( DEFAULT_BUCKET_COUNT_THRESHOLDS ); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointAggregator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointAggregator.java index 613b36882f919..eebe4e49776e5 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointAggregator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointAggregator.java @@ -7,13 +7,15 @@ package org.elasticsearch.xpack.ml.aggs.changepoint; +import org.apache.commons.math3.distribution.UniformRealDistribution; import org.apache.commons.math3.exception.NotStrictlyPositiveException; +import org.apache.commons.math3.random.RandomGeneratorFactory; import org.apache.commons.math3.special.Beta; import org.apache.commons.math3.stat.inference.KolmogorovSmirnovTest; import org.apache.commons.math3.stat.regression.SimpleRegression; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.core.Tuple; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.search.aggregations.AggregationReduceContext; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.InternalAggregation; @@ -21,11 +23,12 @@ import org.elasticsearch.search.aggregations.pipeline.BucketHelpers; import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator; import org.elasticsearch.xpack.ml.aggs.MlAggsHelper; +import org.elasticsearch.xpack.ml.aggs.changepoint.ChangeType.Indeterminable; import java.util.Arrays; -import java.util.HashSet; import java.util.Map; import java.util.Optional; +import java.util.Random; import java.util.Set; import java.util.function.IntToDoubleFunction; import java.util.stream.IntStream; @@ -37,18 +40,51 @@ public class ChangePointAggregator extends SiblingPipelineAggregator { private static final Logger logger = LogManager.getLogger(ChangePointAggregator.class); - static final double P_VALUE_THRESHOLD = 0.025; + static final double P_VALUE_THRESHOLD = 0.01; private static final int MINIMUM_BUCKETS = 10; private static final int MAXIMUM_CANDIDATE_CHANGE_POINTS = 1000; + private static final int MAXIMUM_SAMPLE_SIZE_FOR_KS_TEST = 500; private static final KolmogorovSmirnovTest KOLMOGOROV_SMIRNOV_TEST = new KolmogorovSmirnovTest(); - static Tuple candidateChangePoints(double[] values) { + private static double changePValueThreshold(int nValues) { + // This was obtained by simulating the test power for a fixed size effect as a + // function of the bucket value count. + return P_VALUE_THRESHOLD * Math.exp(-0.04 * (double) (nValues - 2 * (MINIMUM_BUCKETS + 1))); + } + + private static int lowerBound(int[] x, int start, int end, int xs) { + int retVal = Arrays.binarySearch(x, start, end, xs); + if (retVal < 0) { + retVal = -1 - retVal; + } + return retVal; + } + + private record SampleData(double[] values, double[] weights, Integer[] changePoints) {} + + private record DataStats(double nValues, double mean, double var, int nCandidateChangePoints) { + boolean varianceZeroToWorkingPrecision() { + // Our variance calculation is only accurate to ulp(length * mean)^(1/2), + // i.e. we compute it using the difference of squares method and don't use + // the Kahan correction. We treat anything as zero to working precision as + // zero. We should at some point switch to a more numerically stable approach + // for computing data statistics. + return var < Math.sqrt(Math.ulp(2.0 * nValues * mean)); + } + + @Override + public String toString() { + return "DataStats{nValues=" + nValues + ", mean=" + mean + ", var=" + var + ", nCandidates=" + nCandidateChangePoints + "}"; + } + } + + static int[] computeCandidateChangePoints(double[] values) { int minValues = Math.max((int) (0.1 * values.length + 0.5), MINIMUM_BUCKETS); if (values.length - 2 * minValues <= MAXIMUM_CANDIDATE_CHANGE_POINTS) { - return Tuple.tuple(IntStream.range(minValues, values.length - minValues).toArray(), 1); + return IntStream.range(minValues, values.length - minValues).toArray(); } else { int step = (int) Math.ceil((double) (values.length - 2 * minValues) / MAXIMUM_CANDIDATE_CHANGE_POINTS); - return Tuple.tuple(IntStream.range(minValues, values.length - minValues).filter(i -> i % step == 0).toArray(), step); + return IntStream.range(minValues, values.length - minValues).filter(i -> i % step == 0).toArray(); } } @@ -87,190 +123,150 @@ public InternalAggregation doReduce(Aggregations aggregations, AggregationReduce ) ); } - Tuple candidatePoints = candidateChangePoints(bucketValues.getValues()); - ChangeType changeType = changePValue(bucketValues, candidatePoints, P_VALUE_THRESHOLD); - if (changeType.pValue() > P_VALUE_THRESHOLD) { - try { - SpikeAndDipDetector detect = new SpikeAndDipDetector(bucketValues.getValues()); - changeType = detect.at(P_VALUE_THRESHOLD); - } catch (NotStrictlyPositiveException nspe) { - logger.debug("failure calculating spikes", nspe); - } + + ChangeType spikeOrDip = testForSpikeOrDip(bucketValues, P_VALUE_THRESHOLD); + + // Test for change step, trend and distribution changes. + ChangeType change = testForChange(bucketValues, changePValueThreshold(bucketValues.getValues().length)); + logger.trace("change p-value: [{}]", change.pValue()); + + if (spikeOrDip.pValue() < change.pValue()) { + change = spikeOrDip; } + ChangePointBucket changePointBucket = null; - if (changeType.changePoint() >= 0) { - changePointBucket = extractBucket(bucketsPaths()[0], aggregations, changeType.changePoint()).map( + if (change.changePoint() >= 0) { + changePointBucket = extractBucket(bucketsPaths()[0], aggregations, change.changePoint()).map( b -> new ChangePointBucket(b.getKey(), b.getDocCount(), (InternalAggregations) b.getAggregations()) ).orElse(null); } - return new InternalChangePointAggregation(name(), metadata(), changePointBucket, changeType); + return new InternalChangePointAggregation(name(), metadata(), changePointBucket, change); } - static ChangeType changePValue( - MlAggsHelper.DoubleBucketValues bucketValues, - Tuple candidateChangePointsAndStep, - double pValueThreshold - ) { - double[] timeWindow = bucketValues.getValues(); - double totalUnweightedVariance = RunningStats.from(timeWindow, i -> 1.0).variance(); - ChangeType changeType = new ChangeType.Stationary(); - if (totalUnweightedVariance == 0.0) { - return changeType; + static ChangeType testForSpikeOrDip(MlAggsHelper.DoubleBucketValues bucketValues, double pValueThreshold) { + try { + SpikeAndDipDetector detect = new SpikeAndDipDetector(bucketValues.getValues()); + ChangeType result = detect.at(pValueThreshold); + logger.trace("spike or dip p-value: [{}]", result.pValue()); + return result; + } catch (NotStrictlyPositiveException nspe) { + logger.debug("failure testing for dips and spikes", nspe); } + return new Indeterminable("failure testing for dips and spikes"); + } + + static ChangeType testForChange(MlAggsHelper.DoubleBucketValues bucketValues, double pValueThreshold) { + double[] timeWindow = bucketValues.getValues(); + return testForChange(timeWindow, pValueThreshold).changeType(bucketValues, slope(timeWindow)); + } + + static TestStats testForChange(double[] timeWindow, double pValueThreshold) { + + int[] candidateChangePoints = computeCandidateChangePoints(timeWindow); + logger.trace("candidatePoints: [{}]", Arrays.toString(candidateChangePoints)); + double[] timeWindowWeights = outlierWeights(timeWindow); - int[] candidateChangePoints = candidateChangePointsAndStep.v1(); - int step = candidateChangePointsAndStep.v2(); - double totalVariance = RunningStats.from(timeWindow, i -> timeWindowWeights[i]).variance(); - double vNull = totalVariance; - if (totalVariance == 0.0) { - return changeType; - } - double n = timeWindow.length; - double dfNull = n - 1; - LeastSquaresOnlineRegression allLeastSquares = new LeastSquaresOnlineRegression(2); - for (int i = 0; i < timeWindow.length; i++) { - allLeastSquares.add(i, timeWindow[i], timeWindowWeights[i]); - } - double rValue = allLeastSquares.rSquared(); - - double vAlt = totalVariance * (1 - Math.abs(rValue)); - double dfAlt = n - 3; - double pValueVsNull = fTestPValue(vNull, dfNull, vAlt, dfAlt); - if (pValueVsNull < pValueThreshold && Math.abs(rValue) >= 0.5) { - double pValueVsStationary = fTestPValue(totalVariance, n - 1, vAlt, dfAlt); - SimpleRegression regression = new SimpleRegression(); - for (int i = 0; i < timeWindow.length; i++) { - regression.addData(i, timeWindow[i]); - } - double slope = regression.getSlope(); - changeType = new ChangeType.NonStationary(pValueVsStationary, rValue, slope < 0 ? "decreasing" : "increasing"); - vNull = vAlt; - dfNull = dfAlt; - } - RunningStats lowerRange = new RunningStats(); - RunningStats upperRange = new RunningStats(); - // Initialize running stats so that they are only missing the individual changepoint values - upperRange.addValues(timeWindow, i -> timeWindowWeights[i], candidateChangePoints[0], timeWindow.length); - lowerRange.addValues(timeWindow, i -> timeWindowWeights[i], 0, candidateChangePoints[0]); - vAlt = Double.MAX_VALUE; - Set discoveredChangePoints = new HashSet<>(3, 1.0f); - int changePoint = candidateChangePoints[candidateChangePoints.length - 1] + 1; - for (int cp : candidateChangePoints) { - double maybeVAlt = (cp * lowerRange.variance() + (n - cp) * upperRange.variance()) / n; - if (maybeVAlt < vAlt) { - vAlt = maybeVAlt; - changePoint = cp; - } - lowerRange.addValues(timeWindow, i -> timeWindowWeights[i], cp, cp + step); - upperRange.removeValues(timeWindow, i -> timeWindowWeights[i], cp, cp + step); - } - discoveredChangePoints.add(changePoint); - dfAlt = n - 2; + logger.trace("timeWindow: [{}]", Arrays.toString(timeWindow)); + logger.trace("timeWindowWeights: [{}]", Arrays.toString(timeWindowWeights)); + RunningStats dataRunningStats = RunningStats.from(timeWindow, i -> timeWindowWeights[i]); + DataStats dataStats = new DataStats( + dataRunningStats.count(), + dataRunningStats.mean(), + dataRunningStats.variance(), + candidateChangePoints.length + ); + logger.trace("dataStats: [{}]", dataStats); + TestStats stationary = new TestStats(Type.STATIONARY, 1.0, dataStats.var(), 1.0, dataStats); - pValueVsNull = independentTrialsPValue(fTestPValue(vNull, dfNull, vAlt, dfAlt), candidateChangePoints.length); - if (pValueVsNull < pValueThreshold) { - changeType = new ChangeType.StepChange(pValueVsNull, bucketValues.getBucketIndex(changePoint)); - vNull = vAlt; - dfNull = dfAlt; + if (dataStats.varianceZeroToWorkingPrecision()) { + return stationary; } - VarianceAndRValue vAndR = new VarianceAndRValue(Double.MAX_VALUE, Double.MAX_VALUE); - changePoint = candidateChangePoints[candidateChangePoints.length - 1] + 1; - lowerRange = new RunningStats(); - upperRange = new RunningStats(); - // Initialize running stats so that they are only missing the individual changepoint values - upperRange.addValues(timeWindow, i -> timeWindowWeights[i], candidateChangePoints[0], timeWindow.length); - lowerRange.addValues(timeWindow, i -> timeWindowWeights[i], 0, candidateChangePoints[0]); - LeastSquaresOnlineRegression lowerLeastSquares = new LeastSquaresOnlineRegression(2); - LeastSquaresOnlineRegression upperLeastSquares = new LeastSquaresOnlineRegression(2); - for (int i = 0; i < candidateChangePoints[0]; i++) { - lowerLeastSquares.add(i, timeWindow[i], timeWindowWeights[i]); - } - for (int i = candidateChangePoints[0], x = 0; i < timeWindow.length; i++, x++) { - upperLeastSquares.add(x, timeWindow[i], timeWindowWeights[i]); - } - int upperMovingWindow = 0; - for (int cp : candidateChangePoints) { - double lowerRangeVar = lowerRange.variance(); - double upperRangeVar = upperRange.variance(); - double rv1 = lowerLeastSquares.rSquared(); - double rv2 = upperLeastSquares.rSquared(); - double v1 = lowerRangeVar * (1 - Math.abs(rv1)); - double v2 = upperRangeVar * (1 - Math.abs(rv2)); - VarianceAndRValue varianceAndRValue = new VarianceAndRValue((cp * v1 + (n - cp) * v2) / n, (cp * rv1 + (n - cp) * rv2) / n); - if (varianceAndRValue.compareTo(vAndR) < 0) { - vAndR = varianceAndRValue; - changePoint = cp; - } - for (int i = 0; i < step; i++) { - lowerRange.addValue(timeWindow[i + cp], timeWindowWeights[i + cp]); - upperRange.removeValue(timeWindow[i + cp], timeWindowWeights[i + cp]); - lowerLeastSquares.add(i + cp, timeWindow[i + cp], timeWindowWeights[i + cp]); - upperLeastSquares.remove(i + upperMovingWindow, timeWindow[i + cp], timeWindowWeights[i + cp]); - upperMovingWindow++; + TestStats trendVsStationary = testTrendVs(stationary, timeWindow, timeWindowWeights); + logger.trace("trend vs stationary: [{}]", trendVsStationary); + + TestStats best = stationary; + Set discoveredChangePoints = Sets.newHashSetWithExpectedSize(4); + if (trendVsStationary.accept(pValueThreshold)) { + // Check if there is a change in the trend. + TestStats trendChangeVsTrend = testTrendChangeVs(trendVsStationary, timeWindow, timeWindowWeights, candidateChangePoints); + discoveredChangePoints.add(trendChangeVsTrend.changePoint()); + logger.trace("trend change vs trend: [{}]", trendChangeVsTrend); + + if (trendChangeVsTrend.accept(pValueThreshold)) { + // Check if modeling a trend change adds much over modeling a step change. + best = testVsStepChange(trendChangeVsTrend, timeWindow, timeWindowWeights, candidateChangePoints, pValueThreshold); + } else { + best = trendVsStationary; } - } - discoveredChangePoints.add(changePoint); - dfAlt = n - 6; - pValueVsNull = independentTrialsPValue(fTestPValue(vNull, dfNull, vAndR.variance, dfAlt), candidateChangePoints.length); - if (pValueVsNull < pValueThreshold && Math.abs(vAndR.rValue) >= 0.5) { - double pValueVsStationary = independentTrialsPValue( - fTestPValue(totalVariance, n - 1, vAndR.variance, dfAlt), - candidateChangePoints.length - ); - changeType = new ChangeType.TrendChange(pValueVsStationary, vAndR.rValue, bucketValues.getBucketIndex(changePoint)); - } - - if (changeType.pValue() > 1e-5) { - double diff = 0.0; - changePoint = -1; - lowerRange = new RunningStats(); - upperRange = new RunningStats(); - // Initialize running stats so that they are only missing the individual changepoint values - upperRange.addValues(timeWindow, i -> timeWindowWeights[i], candidateChangePoints[0], timeWindow.length); - lowerRange.addValues(timeWindow, i -> timeWindowWeights[i], 0, candidateChangePoints[0]); - for (int cp : candidateChangePoints) { - double otherDiff = Math.min(cp, timeWindow.length - cp) * (0.9 * Math.abs(lowerRange.mean() - upperRange.mean())) + 0.1 - * Math.abs(lowerRange.std() - upperRange.std()); - if (otherDiff >= diff) { - changePoint = cp; - diff = otherDiff; + } else { + // Check if there is a step change. + TestStats stepChangeVsStationary = testStepChangeVs(stationary, timeWindow, timeWindowWeights, candidateChangePoints); + discoveredChangePoints.add(stepChangeVsStationary.changePoint()); + logger.trace("step change vs stationary: [{}]", stepChangeVsStationary); + + if (stepChangeVsStationary.accept(pValueThreshold)) { + // Check if modeling a trend change adds much over modeling a step change. + TestStats trendChangeVsStepChange = testTrendChangeVs( + stepChangeVsStationary, + timeWindow, + timeWindowWeights, + candidateChangePoints + ); + discoveredChangePoints.add(stepChangeVsStationary.changePoint()); + logger.trace("trend change vs step change: [{}]", trendChangeVsStepChange); + if (trendChangeVsStepChange.accept(pValueThreshold)) { + best = trendChangeVsStepChange; + } else { + best = stepChangeVsStationary; } - lowerRange.addValues(timeWindow, i -> timeWindowWeights[i], cp, cp + step); - upperRange.removeValues(timeWindow, i -> timeWindowWeights[i], cp, cp + step); - } - discoveredChangePoints.add(changePoint); - double pValue = 1; - for (int i : discoveredChangePoints) { - double[] x = Arrays.copyOfRange(timeWindow, 0, i); - double[] y = Arrays.copyOfRange(timeWindow, i, timeWindow.length); - double statistic = KOLMOGOROV_SMIRNOV_TEST.kolmogorovSmirnovStatistic(x, y); - double ksTestPValue = x.length > 10_000 - ? KOLMOGOROV_SMIRNOV_TEST.approximateP(statistic, x.length, y.length) - : KOLMOGOROV_SMIRNOV_TEST.exactP(statistic, x.length, y.length, false); - if (ksTestPValue < pValue) { - changePoint = i; - pValue = ksTestPValue; + + } else { + // Check if there is a trend change. + TestStats trendChangeVsStationary = testTrendChangeVs(stationary, timeWindow, timeWindowWeights, candidateChangePoints); + discoveredChangePoints.add(stepChangeVsStationary.changePoint()); + logger.trace("trend change vs stationary: [{}]", trendChangeVsStationary); + if (trendChangeVsStationary.accept(pValueThreshold)) { + best = trendChangeVsStationary; } } - pValue = independentTrialsPValue(pValue, candidateChangePoints.length); - if (pValue < Math.min(pValueThreshold, 0.1 * changeType.pValue())) { - changeType = new ChangeType.DistributionChange(pValue, bucketValues.getBucketIndex(changePoint)); + } + + logger.trace("best: [{}]", best.pValueVsStationary()); + + // We're not very confident in the change point, so check if a distribution change + // fits the data better. + if (best.pValueVsStationary() > 1e-5) { + TestStats distChange = testDistributionChange( + dataStats, + timeWindow, + timeWindowWeights, + candidateChangePoints, + discoveredChangePoints + ); + logger.trace("distribution change: [{}]", distChange); + if (distChange.pValue() < Math.min(pValueThreshold, 0.1 * best.pValueVsStationary())) { + best = distChange; } } - return changeType; + + return best; } static double[] outlierWeights(double[] values) { int i = (int) Math.ceil(0.025 * values.length); double[] weights = Arrays.copyOf(values, values.length); Arrays.sort(weights); + // We have to be careful here if we have a lot of duplicate values. To avoid marking + // runs of duplicates as outliers we define outliers to be the smallest (largest) + // value strictly less (greater) than the value at i (values.length - i - 1). This + // means if i lands in a run of duplicates the entire run will be marked as inliers. double a = weights[i]; - double b = weights[values.length - i]; + double b = weights[values.length - i - 1]; for (int j = 0; j < values.length; j++) { - if (values[j] < b && values[j] >= a) { + if (values[j] <= b && values[j] >= a) { weights[j] = 1.0; } else { weights[j] = 0.01; @@ -279,22 +275,303 @@ static double[] outlierWeights(double[] values) { return weights; } + static double slope(double[] values) { + SimpleRegression regression = new SimpleRegression(); + for (int i = 0; i < values.length; i++) { + regression.addData(i, values[i]); + } + return regression.getSlope(); + } + static double independentTrialsPValue(double pValue, int nTrials) { return pValue > 1e-10 ? 1.0 - Math.pow(1.0 - pValue, nTrials) : nTrials * pValue; } - static double fTestPValue(double vNull, double dfNull, double varianceAlt, double dfAlt) { - if (varianceAlt == vNull) { + static TestStats testTrendVs(TestStats H0, double[] values, double[] weights) { + LeastSquaresOnlineRegression allLeastSquares = new LeastSquaresOnlineRegression(2); + for (int i = 0; i < values.length; i++) { + allLeastSquares.add(i, values[i], weights[i]); + } + double vTrend = H0.dataStats().var() * (1.0 - allLeastSquares.rSquared()); + double pValue = fTestNestedPValue(H0.dataStats().nValues(), H0.var(), H0.nParams(), vTrend, 3.0); + return new TestStats(Type.NON_STATIONARY, pValue, vTrend, 3.0, H0.dataStats()); + } + + static TestStats testStepChangeVs(TestStats H0, double[] values, double[] weights, int[] candidateChangePoints) { + + double vStep = Double.MAX_VALUE; + int changePoint = -1; + + // Initialize running stats so that they are only missing the individual changepoint values + RunningStats lowerRange = new RunningStats(); + RunningStats upperRange = new RunningStats(); + upperRange.addValues(values, i -> weights[i], candidateChangePoints[0], values.length); + lowerRange.addValues(values, i -> weights[i], 0, candidateChangePoints[0]); + double mean = H0.dataStats().mean(); + int last = candidateChangePoints[0]; + for (int cp : candidateChangePoints) { + lowerRange.addValues(values, i -> weights[i], last, cp); + upperRange.removeValues(values, i -> weights[i], last, cp); + last = cp; + double nl = lowerRange.count(); + double nu = upperRange.count(); + double ml = lowerRange.mean(); + double mu = upperRange.mean(); + double vl = lowerRange.variance(); + double vu = upperRange.variance(); + double v = (nl * vl + nu * vu) / (nl + nu); + if (v < vStep) { + vStep = v; + changePoint = cp; + } + } + + double pValue = independentTrialsPValue( + fTestNestedPValue(H0.dataStats().nValues(), H0.var(), H0.nParams(), vStep, 2.0), + candidateChangePoints.length + ); + + return new TestStats(Type.STEP_CHANGE, pValue, vStep, 2.0, changePoint, H0.dataStats()); + } + + static TestStats testTrendChangeVs(TestStats H0, double[] values, double[] weights, int[] candidateChangePoints) { + + double vChange = Double.MAX_VALUE; + int changePoint = -1; + + // Initialize running stats so that they are only missing the individual changepoint values + RunningStats lowerRange = new RunningStats(); + RunningStats upperRange = new RunningStats(); + lowerRange.addValues(values, i -> weights[i], 0, candidateChangePoints[0]); + upperRange.addValues(values, i -> weights[i], candidateChangePoints[0], values.length); + LeastSquaresOnlineRegression lowerLeastSquares = new LeastSquaresOnlineRegression(2); + LeastSquaresOnlineRegression upperLeastSquares = new LeastSquaresOnlineRegression(2); + int first = candidateChangePoints[0]; + int last = candidateChangePoints[0]; + for (int i = 0; i < candidateChangePoints[0]; i++) { + lowerLeastSquares.add(i, values[i], weights[i]); + } + for (int i = candidateChangePoints[0]; i < values.length; i++) { + upperLeastSquares.add(i - first, values[i], weights[i]); + } + for (int cp : candidateChangePoints) { + for (int i = last; i < cp; i++) { + lowerRange.addValue(values[i], weights[i]); + upperRange.removeValue(values[i], weights[i]); + lowerLeastSquares.add(i, values[i], weights[i]); + upperLeastSquares.remove(i - first, values[i], weights[i]); + } + last = cp; + double nl = lowerRange.count(); + double nu = upperRange.count(); + double rl = lowerLeastSquares.rSquared(); + double ru = upperLeastSquares.rSquared(); + double vl = lowerRange.variance() * (1.0 - rl); + double vu = upperRange.variance() * (1.0 - ru); + double v = (nl * vl + nu * vu) / (nl + nu); + if (v < vChange) { + vChange = v; + changePoint = cp; + } + } + + double pValue = independentTrialsPValue( + fTestNestedPValue(H0.dataStats().nValues(), H0.var(), H0.nParams(), vChange, 6.0), + candidateChangePoints.length + ); + + return new TestStats(Type.TREND_CHANGE, pValue, vChange, 6.0, changePoint, H0.dataStats()); + } + + static TestStats testVsStepChange( + TestStats trendChange, + double[] values, + double[] weights, + int[] candidateChangePoints, + double pValueThreshold + ) { + DataStats dataStats = trendChange.dataStats(); + TestStats stationary = new TestStats(Type.STATIONARY, 1.0, dataStats.var(), 1.0, dataStats); + TestStats stepChange = testStepChangeVs(stationary, values, weights, candidateChangePoints); + double n = dataStats.nValues(); + double pValue = fTestNestedPValue(n, stepChange.var(), 2.0, trendChange.var(), 6.0); + return pValue < pValueThreshold ? trendChange : stepChange; + } + + static double fTestNestedPValue(double n, double vNull, double pNull, double vAlt, double pAlt) { + if (vAlt == vNull) { return 1.0; } - if (varianceAlt == 0.0) { + if (vAlt == 0.0) { return 0.0; } - double F = dfAlt / dfNull * vNull / varianceAlt; - double sf = fDistribSf(dfNull, dfAlt, F); + double F = (vNull - vAlt) / (pAlt - pNull) * (n - pAlt) / vAlt; + double sf = fDistribSf(pAlt - pNull, n - pAlt, F); return Math.min(2 * sf, 1.0); } + static SampleData sample(double[] values, double[] weights, Set changePoints) { + Integer[] adjChangePoints = changePoints.toArray(new Integer[changePoints.size()]); + if (values.length <= MAXIMUM_SAMPLE_SIZE_FOR_KS_TEST) { + return new SampleData(values, weights, adjChangePoints); + } + + // Just want repeatable random numbers. + Random rng = new Random(126832678); + UniformRealDistribution uniform = new UniformRealDistribution(RandomGeneratorFactory.createRandomGenerator(rng), 0.0, 0.99999); + + // Fisher–Yates shuffle (why isn't this in Arrays?). + int[] choice = IntStream.range(0, values.length).toArray(); + for (int i = 0; i < MAXIMUM_SAMPLE_SIZE_FOR_KS_TEST; ++i) { + int index = i + (int) Math.floor(uniform.sample() * (values.length - i)); + int tmp = choice[i]; + choice[i] = choice[index]; + choice[index] = tmp; + } + + double[] sample = new double[MAXIMUM_SAMPLE_SIZE_FOR_KS_TEST]; + double[] sampleWeights = new double[MAXIMUM_SAMPLE_SIZE_FOR_KS_TEST]; + Arrays.sort(choice, 0, MAXIMUM_SAMPLE_SIZE_FOR_KS_TEST); + for (int i = 0; i < MAXIMUM_SAMPLE_SIZE_FOR_KS_TEST; ++i) { + sample[i] = values[choice[i]]; + sampleWeights[i] = weights[choice[i]]; + } + for (int i = 0; i < adjChangePoints.length; ++i) { + adjChangePoints[i] = lowerBound(choice, 0, MAXIMUM_SAMPLE_SIZE_FOR_KS_TEST, adjChangePoints[i].intValue()); + } + + return new SampleData(sample, sampleWeights, adjChangePoints); + } + + static TestStats testDistributionChange( + DataStats stats, + double[] values, + double[] weights, + int[] candidateChangePoints, + Set discoveredChangePoints + ) { + + double maxDiff = 0.0; + int changePoint = -1; + + // Initialize running stats so that they are only missing the individual changepoint values + RunningStats lowerRange = new RunningStats(); + RunningStats upperRange = new RunningStats(); + upperRange.addValues(values, i -> weights[i], candidateChangePoints[0], values.length); + lowerRange.addValues(values, i -> weights[i], 0, candidateChangePoints[0]); + int last = candidateChangePoints[0]; + for (int cp : candidateChangePoints) { + lowerRange.addValues(values, i -> weights[i], last, cp); + upperRange.removeValues(values, i -> weights[i], last, cp); + last = cp; + double scale = Math.min(cp, values.length - cp); + double meanDiff = Math.abs(lowerRange.mean() - upperRange.mean()); + double stdDiff = Math.abs(lowerRange.std() - upperRange.std()); + double diff = scale * (meanDiff + stdDiff); + if (diff >= maxDiff) { + maxDiff = diff; + changePoint = cp; + } + } + discoveredChangePoints.add(changePoint); + + // Note that statistical tests become increasingly powerful as the number of samples + // increases. We are not interested in detecting visually small distribution changes + // in splits of long windows so we randomly downsample the data if it is too large + // before we run the tests. + SampleData sampleData = sample(values, weights, discoveredChangePoints); + final double[] sampleValues = sampleData.values(); + final double[] sampleWeights = sampleData.weights(); + + double pValue = 1; + for (int cp : sampleData.changePoints()) { + double[] x = Arrays.copyOfRange(sampleValues, 0, cp); + double[] y = Arrays.copyOfRange(sampleValues, cp, sampleValues.length); + double statistic = KOLMOGOROV_SMIRNOV_TEST.kolmogorovSmirnovStatistic(x, y); + double ksTestPValue = KOLMOGOROV_SMIRNOV_TEST.exactP(statistic, x.length, y.length, false); + if (ksTestPValue < pValue) { + changePoint = cp; + pValue = ksTestPValue; + } + } + + // We start to get false positives if we have too many candidate change points. This + // is the classic p-value hacking problem. However, the Sidak style correction we use + // elsewhere is too conservative because test statistics for different split positions + // are strongly correlated. We assume that we have some effective number of independent + // trials equal to f * n for f < 1. Simulation shows the f = 1/50 yields low Type I + // error rates. + pValue = independentTrialsPValue(pValue, (sampleValues.length + 49) / 50); + logger.trace("distribution change p-value: [{}]", pValue); + + return new TestStats(Type.DISTRIBUTION_CHANGE, pValue, changePoint, stats); + } + + enum Type { + STATIONARY, + NON_STATIONARY, + STEP_CHANGE, + TREND_CHANGE, + DISTRIBUTION_CHANGE + } + + record TestStats(Type type, double pValue, double var, double nParams, int changePoint, DataStats dataStats) { + TestStats(Type type, double pValue, int changePoint, DataStats dataStats) { + this(type, pValue, 0.0, 0.0, changePoint, dataStats); + } + + TestStats(Type type, double pValue, double var, double nParams, DataStats dataStats) { + this(type, pValue, var, nParams, -1, dataStats); + } + + boolean accept(double pValueThreshold) { + // Check the change is: + // 1. Statistically significant. + // 2. That we explain enough of the data variance overall. + return pValue < pValueThreshold && rSquared() >= 0.5; + } + + double rSquared() { + return 1.0 - var / dataStats.var(); + } + + double pValueVsStationary() { + return independentTrialsPValue( + fTestNestedPValue(dataStats.nValues(), dataStats.var(), 1.0, var, nParams), + dataStats.nCandidateChangePoints() + ); + } + + ChangeType changeType(MlAggsHelper.DoubleBucketValues bucketValues, double slope) { + switch (type) { + case STATIONARY: + return new ChangeType.Stationary(); + case NON_STATIONARY: + return new ChangeType.NonStationary(pValueVsStationary(), rSquared(), slope < 0.0 ? "decreasing" : "increasing"); + case STEP_CHANGE: + return new ChangeType.StepChange(pValueVsStationary(), bucketValues.getBucketIndex(changePoint)); + case TREND_CHANGE: + return new ChangeType.TrendChange(pValueVsStationary(), rSquared(), bucketValues.getBucketIndex(changePoint)); + case DISTRIBUTION_CHANGE: + return new ChangeType.DistributionChange(pValue, changePoint); + } + throw new RuntimeException("Unknown change type [" + type + "]."); + } + + @Override + public String toString() { + return "TestStats{" + + ("type=" + type) + + (", dataStats=" + dataStats) + + (", var=" + var) + + (", rSquared=" + rSquared()) + + (", pValue=" + pValue) + + (", nParams=" + nParams) + + (", changePoint=" + changePoint) + + '}'; + } + } + static class RunningStats { double sumOfSqrs; double sum; @@ -306,14 +583,18 @@ static RunningStats from(double[] values, IntToDoubleFunction weightFunction) { RunningStats() {} - double variance() { - return Math.max((sumOfSqrs - ((sum * sum) / count)) / count, 0.0); + double count() { + return count; } double mean() { return sum / count; } + double variance() { + return Math.max((sumOfSqrs - ((sum * sum) / count)) / count, 0.0); + } + double std() { return Math.sqrt(variance()); } @@ -347,28 +628,11 @@ RunningStats removeValues(double[] value, IntToDoubleFunction weightFunction, in } } - record VarianceAndRValue(double variance, double rValue) implements Comparable { - @Override - public int compareTo(VarianceAndRValue o) { - int v = Double.compare(variance, o.variance); - if (v == 0) { - return Double.compare(rValue, o.rValue); - } - return v; - } - - public VarianceAndRValue min(VarianceAndRValue other) { - if (this.compareTo(other) <= 0) { - return this; - } - return other; - } - } - static double fDistribSf(double numeratorDegreesOfFreedom, double denominatorDegreesOfFreedom, double x) { if (x <= 0) { return 1; - } else if (Double.isInfinite(x) || Double.isNaN(x)) { + } + if (Double.isInfinite(x) || Double.isNaN(x)) { return 0; } @@ -378,5 +642,4 @@ static double fDistribSf(double numeratorDegreesOfFreedom, double denominatorDeg 0.5 * numeratorDegreesOfFreedom ); } - } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/LeastSquaresOnlineRegression.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/LeastSquaresOnlineRegression.java index 077ffa3ba58b5..0cd74b6395b8e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/LeastSquaresOnlineRegression.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/LeastSquaresOnlineRegression.java @@ -16,7 +16,7 @@ class LeastSquaresOnlineRegression { - private static final double SINGLE_VALUE_DECOMPOSITION_EPS = 1e+15; + private static final double SINGLE_VALUE_DECOMPOSITION_MAX_COND = 1e+15; private final RunningStatistics statistics; private final Array2DRowRealMatrix Nx; @@ -33,9 +33,8 @@ class LeastSquaresOnlineRegression { } double rSquared() { - double result = 0; if (statistics.count <= 0.0) { - return result; + return 0.0; } double var = statistics.stats[3 * N - 1] - statistics.stats[2 * N - 1] * statistics.stats[2 * N - 1]; double residualVariance = var; @@ -43,7 +42,7 @@ class LeastSquaresOnlineRegression { boolean done = false; while (--n > 0 && done == false) { if (n == 1) { - return result; + return 0.0; } else if (n == this.N) { OptionalDouble maybeResidualVar = residualVariance(N, Nx, Ny, Nz); if (maybeResidualVar.isPresent()) { @@ -54,7 +53,7 @@ class LeastSquaresOnlineRegression { Array2DRowRealMatrix x = new Array2DRowRealMatrix(n, n); Array2DRowRealMatrix y = new Array2DRowRealMatrix(n, 1); Array2DRowRealMatrix z = new Array2DRowRealMatrix(n, 1); - OptionalDouble maybeResidualVar = residualVariance(N, Nx, Ny, Nz); + OptionalDouble maybeResidualVar = residualVariance(n, x, y, z); if (maybeResidualVar.isPresent()) { residualVariance = maybeResidualVar.getAsDouble(); done = true; @@ -71,7 +70,7 @@ private double[] statisticAdj(double x, double y) { d[i] = xi; d[i + 2 * N - 1] = xi * y; } - for (int i = 3; i < 2 * N - 1; ++i, xi *= x) { + for (int i = N; i < 2 * N - 1; ++i, xi *= x) { d[i] = xi; } d[3 * N - 1] = y * y; @@ -90,6 +89,7 @@ private OptionalDouble residualVariance(int n, Array2DRowRealMatrix x, Array2DRo if (n == 1) { return OptionalDouble.of(statistics.stats[3 * N - 1] - statistics.stats[2 * N - 1] * statistics.stats[2 * N - 1]); } + for (int i = 0; i < n; ++i) { x.setEntry(i, i, statistics.stats[i + i]); y.setEntry(i, 0, statistics.stats[i + 2 * N - 1]); @@ -102,7 +102,7 @@ private OptionalDouble residualVariance(int n, Array2DRowRealMatrix x, Array2DRo SingularValueDecomposition svd = new SingularValueDecomposition(x); double[] singularValues = svd.getSingularValues(); - if (singularValues[0] > SINGLE_VALUE_DECOMPOSITION_EPS * singularValues[n - 1]) { + if (singularValues[0] > SINGLE_VALUE_DECOMPOSITION_MAX_COND * singularValues[n - 1]) { return OptionalDouble.empty(); } RealMatrix r = svd.getSolver().solve(y); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/CountingItemSetTraverser.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/CountingItemSetTraverser.java index 35a0bd9e4c43f..a651b0c85eb40 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/CountingItemSetTraverser.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/CountingItemSetTraverser.java @@ -7,8 +7,6 @@ package org.elasticsearch.xpack.ml.aggs.frequentitemsets; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; import org.elasticsearch.xpack.ml.aggs.frequentitemsets.TransactionStore.TopItemIds; @@ -31,7 +29,6 @@ * if [a, b] is not in T, [a, b, c] can not be in T either */ final class CountingItemSetTraverser implements Releasable { - private static final Logger logger = LogManager.getLogger(CountingItemSetTraverser.class); // start size and size increment for the occurences stack private static final int OCCURENCES_SIZE_INCREMENT = 10; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/heuristic/PValueScore.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/heuristic/PValueScore.java index 324da870b1a40..8fe9f1ccd5415 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/heuristic/PValueScore.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/heuristic/PValueScore.java @@ -11,11 +11,9 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.bucket.terms.heuristic.NXYSignificanceHeuristic; -import org.elasticsearch.search.aggregations.bucket.terms.heuristic.SignificanceHeuristic; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Objects; @@ -104,10 +102,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - public static SignificanceHeuristic parse(XContentParser parser) throws IOException { - return PARSER.apply(parser, null); - } - /** * This finds the p-value that the frequency of a category is unchanged on set subset assuming * we observe subsetFreq out of subset values in total relative to set supersetFreq where it accounts @@ -200,28 +194,4 @@ private static double eps(double value) { return Math.max(0.05 * value + 0.5, 1.0); } - public static class PValueScoreBuilder extends NXYBuilder { - private final long normalizeAbove; - - public PValueScoreBuilder(boolean backgroundIsSuperset, Long normalizeAbove) { - super(true, backgroundIsSuperset); - this.normalizeAbove = normalizeAbove == null ? 0L : normalizeAbove; - if (normalizeAbove != null && normalizeAbove <= 0) { - throw new IllegalArgumentException( - "[" + NORMALIZE_ABOVE.getPreferredName() + "] must be a positive value, provided [" + normalizeAbove + "]" - ); - } - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(NAME); - builder.field(BACKGROUND_IS_SUPERSET.getPreferredName(), backgroundIsSuperset); - if (normalizeAbove > 0) { - builder.field(NORMALIZE_ABOVE.getPreferredName(), normalizeAbove); - } - builder.endObject(); - return builder; - } - } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingContext.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingContext.java index 57d0084065fa5..cca59f27d5c76 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingContext.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingContext.java @@ -17,11 +17,11 @@ import org.elasticsearch.xpack.core.ml.inference.assignment.AllocationStatus; import org.elasticsearch.xpack.core.ml.inference.assignment.AssignmentState; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.core.ml.job.config.JobState; import org.elasticsearch.xpack.core.ml.job.snapshot.upgrade.SnapshotUpgradeState; import org.elasticsearch.xpack.core.ml.job.snapshot.upgrade.SnapshotUpgradeTaskParams; import org.elasticsearch.xpack.ml.MachineLearning; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; import java.util.Collection; import java.util.List; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlProcessorAutoscalingDecider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlProcessorAutoscalingDecider.java index 5605a80a7454c..44cf1188b09a2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlProcessorAutoscalingDecider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlProcessorAutoscalingDecider.java @@ -17,7 +17,7 @@ import org.elasticsearch.xpack.autoscaling.capacity.AutoscalingDeciderContext; import org.elasticsearch.xpack.core.ml.inference.assignment.Priority; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.utils.MlProcessors; import java.time.Instant; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/NodeFakeAvailabilityZoneMapper.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/NodeFakeAvailabilityZoneMapper.java index df2f66f6c5a42..1eb6fe3d03cd2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/NodeFakeAvailabilityZoneMapper.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/NodeFakeAvailabilityZoneMapper.java @@ -7,8 +7,6 @@ package org.elasticsearch.xpack.ml.autoscaling; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; @@ -27,8 +25,6 @@ */ public class NodeFakeAvailabilityZoneMapper extends AbstractNodeAvailabilityZoneMapper { - private static final Logger logger = LogManager.getLogger(NodeFakeAvailabilityZoneMapper.class); - public NodeFakeAvailabilityZoneMapper(Settings settings, ClusterSettings clusterSettings) { this(settings, clusterSettings, null); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java index 999d85b6dd549..ede57764a0813 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java @@ -20,7 +20,6 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.license.RemoteClusterLicenseChecker; -import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; @@ -100,7 +99,6 @@ public DatafeedManager( public void putDatafeed( PutDatafeedAction.Request request, ClusterState state, - XPackLicenseState licenseState, SecurityContext securityContext, ThreadPool threadPool, ActionListener listener diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java index 56c34aa590fb9..e226056217351 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.IndexNotFoundException; @@ -62,7 +63,6 @@ import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; import java.io.IOException; -import java.io.InputStream; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; @@ -575,12 +575,8 @@ private static QueryBuilder buildDatafeedJobIdsQuery(Collection jobIds) } private void parseLenientlyFromSource(BytesReference source, ActionListener datafeedConfigListener) { - try ( - InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, stream) - ) { - datafeedConfigListener.onResponse(DatafeedConfig.LENIENT_PARSER.apply(parser, null)); + try { + datafeedConfigListener.onResponse(parseLenientlyFromSource(source)); } catch (Exception e) { datafeedConfigListener.onFailure(e); } @@ -588,11 +584,14 @@ private void parseLenientlyFromSource(BytesReference source, ActionListener memoryLimitById; + public DataFrameAnalyticsManager( Settings settings, NodeClient client, @@ -84,6 +93,37 @@ public DataFrameAnalyticsManager( ResultsPersisterService resultsPersisterService, ModelLoadingService modelLoadingService, String[] destIndexAllowedSettings + ) { + this( + settings, + client, + threadPool, + clusterService, + configProvider, + processManager, + auditor, + expressionResolver, + resultsPersisterService, + modelLoadingService, + destIndexAllowedSettings, + new ConcurrentHashMap<>() + ); + } + + // For testing only + public DataFrameAnalyticsManager( + Settings settings, + NodeClient client, + ThreadPool threadPool, + ClusterService clusterService, + DataFrameAnalyticsConfigProvider configProvider, + AnalyticsProcessManager processManager, + DataFrameAnalyticsAuditor auditor, + IndexNameExpressionResolver expressionResolver, + ResultsPersisterService resultsPersisterService, + ModelLoadingService modelLoadingService, + String[] destIndexAllowedSettings, + Map memoryLimitById ) { this.settings = Objects.requireNonNull(settings); this.client = Objects.requireNonNull(client); @@ -96,11 +136,13 @@ public DataFrameAnalyticsManager( this.resultsPersisterService = Objects.requireNonNull(resultsPersisterService); this.modelLoadingService = Objects.requireNonNull(modelLoadingService); this.destIndexAllowedSettings = Objects.requireNonNull(destIndexAllowedSettings); + this.memoryLimitById = Objects.requireNonNull(memoryLimitById); } public void execute(DataFrameAnalyticsTask task, ClusterState clusterState, TimeValue masterNodeTimeout) { // With config in hand, determine action to take ActionListener configListener = ActionListener.wrap(config -> { + memoryLimitById.put(config.getId(), config.getModelMemoryLimit()); // Check if existing destination index is incompatible. // If it is, we delete it and start from reindexing. IndexMetadata destIndex = clusterState.getMetadata().index(config.getDest().getIndex()); @@ -224,6 +266,7 @@ private void executeStep(DataFrameAnalyticsTask task, DataFrameAnalyticsConfig c case FINAL -> { LOGGER.info("[{}] Marking task completed", config.getId()); task.markAsCompleted(); + memoryLimitById.remove(config.getId()); } default -> task.markAsFailed(ExceptionsHelper.serverError("Unknown step [{}]", step)); } @@ -291,4 +334,34 @@ public boolean isNodeShuttingDown() { public void markNodeAsShuttingDown() { nodeShuttingDown.set(true); } + + /** + * Get the memory limit for a data frame analytics job if known. + * The memory limit will only be known if it is running on the + * current node, or has been very recently. + * @param id Data frame analytics job ID. + * @return The {@link ByteSizeValue} representing the memory limit, if known, otherwise {@link Optional#empty}. + */ + public Optional getMemoryLimitIfKnown(String id) { + return Optional.ofNullable(memoryLimitById.get(id)); + } + + /** + * Finds the memory used by data frame analytics jobs that are active on the current node. + * This includes jobs that are in the reindexing state, even though they don't have a running + * process, because we want to ensure that when they get as far as needing to run a process + * there'll be space for it. + * @param tasks Persistent tasks metadata. + * @return Memory used by data frame analytics jobs that are active on the current node. + */ + public ByteSizeValue getActiveTaskMemoryUsage(PersistentTasksCustomMetadata tasks) { + long memoryUsedBytes = 0; + for (Map.Entry entry : memoryLimitById.entrySet()) { + DataFrameAnalyticsState state = MlTasks.getDataFrameAnalyticsState(entry.getKey(), tasks); + if (state.consumesMemory()) { + memoryUsedBytes += entry.getValue().getBytes() + DataFrameAnalyticsConfig.PROCESS_MEMORY_OVERHEAD.getBytes(); + } + } + return ByteSizeValue.ofBytes(memoryUsedBytes); + } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DestinationIndex.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DestinationIndex.java index 8c6b78f41285c..81de8add4ae2e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DestinationIndex.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DestinationIndex.java @@ -13,8 +13,8 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.get.GetIndexResponse; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.action.admin.indices.mapping.put.TransportPutMappingAction; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsAction; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; @@ -370,7 +370,7 @@ public static void updateMappingsToDestIndex( config.getHeaders(), ML_ORIGIN, client, - PutMappingAction.INSTANCE, + TransportPutMappingAction.TYPE, putMappingRequest, listener ); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/persistence/DataFrameAnalyticsConfigProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/persistence/DataFrameAnalyticsConfigProvider.java index 618cbc075bd99..5469c6a7a7d87 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/persistence/DataFrameAnalyticsConfigProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/persistence/DataFrameAnalyticsConfigProvider.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexNotFoundException; @@ -57,7 +58,6 @@ import org.elasticsearch.xpack.ml.notifications.DataFrameAnalyticsAuditor; import java.io.IOException; -import java.io.InputStream; import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; @@ -176,14 +176,8 @@ public void update( // Parse the original config DataFrameAnalyticsConfig originalConfig; - try { - try ( - InputStream stream = getResponse.getSourceAsBytesRef().streamInput(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, stream) - ) { - originalConfig = DataFrameAnalyticsConfig.LENIENT_PARSER.apply(parser, null).build(); - } + try (XContentParser parser = createParser(getResponse.getSourceAsBytesRef())) { + originalConfig = DataFrameAnalyticsConfig.LENIENT_PARSER.apply(parser, null).build(); } catch (IOException e) { listener.onFailure(new ElasticsearchParseException("Failed to parse data frame analytics configuration [" + id + "]", e)); return; @@ -332,12 +326,7 @@ public void onResponse(SearchResponse searchResponse) { SearchHit[] hits = searchResponse.getHits().getHits(); List configs = new ArrayList<>(hits.length); for (SearchHit hit : hits) { - BytesReference sourceBytes = hit.getSourceRef(); - try ( - InputStream stream = sourceBytes.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, stream) - ) { + try (XContentParser parser = createParser(hit.getSourceRef())) { configs.add(DataFrameAnalyticsConfig.LENIENT_PARSER.apply(parser, null).build()); } catch (IOException e) { delegate.onFailure(e); @@ -355,4 +344,12 @@ public void onResponse(SearchResponse searchResponse) { client::search ); } + + private XContentParser createParser(BytesReference sourceBytes) throws IOException { + return XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG.withRegistry(xContentRegistry), + sourceBytes, + XContentType.JSON + ); + } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterService.java index fe4462d6556ee..637ad9d7bbbb2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterService.java @@ -44,6 +44,7 @@ import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingInfo; import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingState; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.ml.utils.MlPlatformArchitecturesUtil; @@ -67,8 +68,8 @@ import java.util.stream.Collectors; import static org.elasticsearch.core.Strings.format; -import static org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentUtils.NODES_CHANGED_REASON; -import static org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentUtils.createShuttingDownRoute; +import static org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentUtils.NODES_CHANGED_REASON; +import static org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentUtils.createShuttingDownRoute; public class TrainedModelAssignmentClusterService implements ClusterStateListener { @@ -77,8 +78,6 @@ public class TrainedModelAssignmentClusterService implements ClusterStateListene private static final TransportVersion RENAME_ALLOCATION_TO_ASSIGNMENT_TRANSPORT_VERSION = TransportVersions.V_8_3_0; public static final TransportVersion DISTRIBUTED_MODEL_ALLOCATION_TRANSPORT_VERSION = TransportVersions.V_8_4_0; - private static final TransportVersion NEW_ALLOCATION_MEMORY_VERSION = TransportVersions.V_8_500_064; - private final ClusterService clusterService; private final ThreadPool threadPool; private final NodeLoadDetector nodeLoadDetector; @@ -1099,31 +1098,61 @@ static boolean haveMlNodesChanged(ClusterChangedEvent event, TrainedModelAssignm // it may get re-allocated to that node when another node is added/removed... boolean nodesShutdownChanged = event.changedCustomMetadataSet().contains(NodesShutdownMetadata.TYPE); if (event.nodesChanged() || nodesShutdownChanged) { + // This is just to track the various log messages that happen in this function to help with debugging in the future + // so that we can reasonably assume they're all related + // If the log messages printed from this method get interlaced across nodes it can make debugging difficult + var eventIdentity = Long.toHexString(System.nanoTime()); + Set shuttingDownNodes = nodesShuttingDown(event.state()); DiscoveryNodes.Delta nodesDelta = event.nodesDelta(); Set removedNodes = nodesDelta.removedNodes().stream().map(DiscoveryNode::getId).collect(Collectors.toSet()); Set addedNodes = nodesDelta.addedNodes().stream().map(DiscoveryNode::getId).collect(Collectors.toSet()); + logger.debug( + () -> format( + "Initial node change info; identity: %s; removed nodes: %s; added nodes: %s; shutting down nodes: %s", + eventIdentity, + removedNodes, + addedNodes, + shuttingDownNodes + ) + ); + Set exitingShutDownNodes; if (nodesShutdownChanged) { Set previousShuttingDownNodes = nodesShuttingDown(event.previousState()); + Set presentNodes = event.state().nodes().stream().map(DiscoveryNode::getId).collect(Collectors.toSet()); // Add nodes that where marked for shutdown in the previous state // but are no longer marked as shutdown in the current state. - Set returningShutDownNodes = Sets.difference(previousShuttingDownNodes, shuttingDownNodes); + // The intersection is to only include the nodes that actually exist + Set returningShutDownNodes = Sets.intersection( + presentNodes, + Sets.difference(previousShuttingDownNodes, shuttingDownNodes) + ); addedNodes.addAll(returningShutDownNodes); // and nodes that are marked for shutdown in this event only exitingShutDownNodes = Sets.difference(shuttingDownNodes, previousShuttingDownNodes); removedNodes.addAll(exitingShutDownNodes); + + logger.debug( + () -> format( + "Shutting down nodes were changed; identity: %s; previous shutting down nodes: %s; returning nodes: %s", + eventIdentity, + previousShuttingDownNodes, + returningShutDownNodes + ) + ); } else { exitingShutDownNodes = Collections.emptySet(); } logger.debug( () -> format( - "added nodes %s; removed nodes %s; shutting down nodes %s; exiting shutdown nodes %s", + "identity: %s; added nodes %s; removed nodes %s; shutting down nodes %s; exiting shutdown nodes %s", + eventIdentity, addedNodes, removedNodes, shuttingDownNodes, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java index fdb007862cfdc..3fac7c387b12e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java @@ -41,6 +41,7 @@ import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingState; import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingStateAndReason; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.core.ml.inference.persistence.InferenceIndexConstants; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfig; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; @@ -66,8 +67,8 @@ import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.xpack.core.ml.MlTasks.TRAINED_MODEL_ASSIGNMENT_TASK_ACTION; import static org.elasticsearch.xpack.core.ml.MlTasks.TRAINED_MODEL_ASSIGNMENT_TASK_TYPE; +import static org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentUtils.NODE_IS_SHUTTING_DOWN; import static org.elasticsearch.xpack.ml.MachineLearning.ML_PYTORCH_MODEL_INFERENCE_FEATURE; -import static org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentUtils.NODE_IS_SHUTTING_DOWN; public class TrainedModelAssignmentNodeService implements ClusterStateListener { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancer.java index 6e6b447fcea3d..a1142796558f4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancer.java @@ -19,6 +19,7 @@ import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingInfo; import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingState; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.inference.assignment.planning.AssignmentPlan; import org.elasticsearch.xpack.ml.inference.assignment.planning.AssignmentPlanner; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentService.java index 1a5b5481704a4..0609e0e6ff916 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentService.java @@ -33,6 +33,7 @@ import org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction; import org.elasticsearch.xpack.core.ml.action.UpdateTrainedModelAssignmentRoutingInfoAction; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import java.util.Objects; import java.util.function.Predicate; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlan.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlan.java index 1dce7f0bb46ba..d9cb0f08a6cd0 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlan.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlan.java @@ -437,10 +437,6 @@ public Builder assignModelToNode(Deployment deployment, Node node, int allocatio return this; } - private boolean isAlreadyAssigned(Deployment deployment, Node node) { - return deployment.currentAllocationsByNodeId().containsKey(node.id()) || assignments.get(deployment).get(node) > 0; - } - private int getAssignedAllocations(Deployment deployment, Node node) { int currentAllocations = getCurrentAllocations(deployment, node); int assignmentAllocations = assignments.get(deployment).get(node); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java index f48e67f377817..597a97134a1e7 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java @@ -17,8 +17,10 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.IdsQueryBuilder; import org.elasticsearch.inference.InferenceResults; @@ -26,9 +28,7 @@ import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; -import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction; import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; @@ -53,7 +53,6 @@ import org.elasticsearch.xpack.ml.inference.pytorch.results.ThreadSettings; import java.io.IOException; -import java.io.InputStream; import java.time.Instant; import java.util.Objects; import java.util.Optional; @@ -200,9 +199,18 @@ public void startDeployment(TrainedModelDeploymentTask task, ActionListener processContext.startAndLoad(modelConfig.getLocation(), modelLoadedListener) - ); + executorServiceForDeployment.execute(new AbstractRunnable() { + + @Override + public void onFailure(Exception e) { + failedDeploymentListener.onFailure(e); + } + + @Override + protected void doRun() { + processContext.startAndLoad(modelConfig.getLocation(), modelLoadedListener); + } + }); }, failedDeploymentListener::onFailure) ); } else { @@ -284,13 +292,11 @@ private SearchRequest vocabSearchRequest(VocabularyConfig vocabularyConfig, Stri Vocabulary parseVocabularyDocLeniently(SearchHit hit) throws IOException { try ( - InputStream stream = hit.getSourceRef().streamInput(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser( - XContentParserConfiguration.EMPTY.withRegistry(xContentRegistry) - .withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), - stream - ) + XContentParser parser = XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG.withRegistry(xContentRegistry), + hit.getSourceRef(), + XContentType.JSON + ) ) { return Vocabulary.PARSER.apply(parser, null); } catch (IOException e) { @@ -496,7 +502,14 @@ synchronized void startAndLoad(TrainedModelLocation modelLocation, ActionListene } logger.debug("[{}] start and load", task.getDeploymentId()); - process.set(pyTorchProcessFactory.createProcess(task, executorServiceForProcess, this::onProcessCrash)); + process.set( + pyTorchProcessFactory.createProcess( + task, + executorServiceForProcess, + () -> resultProcessor.awaitCompletion(COMPLETION_TIMEOUT.getMinutes(), TimeUnit.MINUTES), + this::onProcessCrash + ) + ); startTime = Instant.now(); logger.debug("[{}] process started", task.getDeploymentId()); try { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingService.java index e9b7a1a3e137b..5994c61f46297 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingService.java @@ -37,6 +37,7 @@ import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction; +import org.elasticsearch.xpack.core.ml.inference.ModelAliasMetadata; import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; import org.elasticsearch.xpack.core.ml.inference.TrainedModelType; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ClassificationConfig; @@ -46,7 +47,6 @@ import org.elasticsearch.xpack.core.ml.inference.trainedmodel.inference.InferenceDefinition; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.ml.MachineLearning; -import org.elasticsearch.xpack.ml.inference.ModelAliasMetadata; import org.elasticsearch.xpack.ml.inference.TrainedModelStatsService; import org.elasticsearch.xpack.ml.inference.ingest.InferenceProcessor; import org.elasticsearch.xpack.ml.inference.persistence.TrainedModelProvider; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerBuilder.java index 038f3fb08adbf..11676cc4a1599 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerBuilder.java @@ -89,7 +89,7 @@ public static LearningToRankRescorerBuilder fromXContent(XContentParser parser, public LearningToRankRescorerBuilder(StreamInput input, LearningToRankService learningToRankService) throws IOException { super(input); this.modelId = input.readString(); - this.params = input.readMap(); + this.params = input.readGenericMap(); this.learningToRankConfig = (LearningToRankConfig) input.readOptionalNamedWriteable(InferenceConfig.class); this.learningToRankService = learningToRankService; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankService.java index 177099801e0a5..bec162d141eba 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankService.java @@ -189,13 +189,13 @@ private QueryExtractorBuilder applyParams(QueryExtractorBuilder queryExtractorBu try { Script script = new Script(ScriptType.INLINE, DEFAULT_TEMPLATE_LANG, templateSource, SCRIPT_OPTIONS, Collections.emptyMap()); String parsedTemplate = scriptService.compile(script, TemplateScript.CONTEXT).newInstance(params).execute(); - XContentParser parser = XContentType.JSON.xContent().createParser(parserConfiguration, parsedTemplate); - - return new QueryExtractorBuilder( - queryExtractorBuilder.featureName(), - QueryProvider.fromXContent(parser, false, INFERENCE_CONFIG_QUERY_BAD_FORMAT), - queryExtractorBuilder.defaultScore() - ); + try (XContentParser parser = XContentType.JSON.xContent().createParser(parserConfiguration, parsedTemplate)) { + return new QueryExtractorBuilder( + queryExtractorBuilder.featureName(), + QueryProvider.fromXContent(parser, false, INFERENCE_CONFIG_QUERY_BAD_FORMAT), + queryExtractorBuilder.defaultScore() + ); + } } catch (GeneralScriptException e) { if (e.getRootCause().getClass().getName().equals(MustacheInvalidParameterException.class.getName())) { // Can't use instanceof since it return unexpected result. diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/ChunkedTrainedModelRestorer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/ChunkedTrainedModelRestorer.java index 0a34915083982..f56c589aea19a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/ChunkedTrainedModelRestorer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/ChunkedTrainedModelRestorer.java @@ -20,6 +20,7 @@ import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; @@ -30,7 +31,6 @@ import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.xcontent.NamedXContentRegistry; -import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; @@ -39,7 +39,6 @@ import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import java.io.IOException; -import java.io.InputStream; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; @@ -298,9 +297,11 @@ public static TrainedModelDefinitionDoc parseModelDefinitionDocLenientlyFromSour ) throws IOException { try ( - InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, stream) + XContentParser parser = XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG.withRegistry(xContentRegistry), + source, + XContentType.JSON + ) ) { return TrainedModelDefinitionDoc.fromXContent(parser, true).build(); } catch (IOException e) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java index 2be4fe12884b0..d267966a1d795 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java @@ -40,6 +40,7 @@ import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.IndexNotFoundException; @@ -64,7 +65,6 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.action.util.ExpandedIdsMatcher; @@ -72,6 +72,7 @@ import org.elasticsearch.xpack.core.ml.MlStatsIndex; import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction; import org.elasticsearch.xpack.core.ml.inference.InferenceToXContentCompressor; +import org.elasticsearch.xpack.core.ml.inference.ModelAliasMetadata; import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; import org.elasticsearch.xpack.core.ml.inference.TrainedModelType; import org.elasticsearch.xpack.core.ml.inference.persistence.InferenceIndexConstants; @@ -85,11 +86,9 @@ import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; import org.elasticsearch.xpack.ml.MachineLearning; -import org.elasticsearch.xpack.ml.inference.ModelAliasMetadata; import org.elasticsearch.xpack.ml.inference.nlp.Vocabulary; import java.io.IOException; -import java.io.InputStream; import java.net.URL; import java.time.Instant; import java.util.ArrayList; @@ -1202,7 +1201,7 @@ TrainedModelConfig.Builder loadModelFromResource(String modelId, boolean nullOut } try ( XContentParser parser = JsonXContent.jsonXContent.createParser( - XContentParserConfiguration.EMPTY.withRegistry(xContentRegistry).withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG.withRegistry(xContentRegistry), getClass().getResourceAsStream(MODEL_RESOURCE_PATH + modelId + MODEL_RESOURCE_FILE_EXT) ) ) { @@ -1322,15 +1321,7 @@ public static List chunkDefinitionWithSize(BytesReference defini } private TrainedModelConfig.Builder parseModelConfigLenientlyFromSource(BytesReference source, String modelId) throws IOException { - try ( - InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser( - XContentParserConfiguration.EMPTY.withRegistry(xContentRegistry) - .withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), - stream - ) - ) { + try (XContentParser parser = createParser(source)) { TrainedModelConfig.Builder builder = TrainedModelConfig.fromXContent(parser, true); if (builder.getModelType() == null) { @@ -1348,15 +1339,7 @@ private TrainedModelConfig.Builder parseModelConfigLenientlyFromSource(BytesRefe } private TrainedModelMetadata parseMetadataLenientlyFromSource(BytesReference source, String modelId) throws IOException { - try ( - InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser( - XContentParserConfiguration.EMPTY.withRegistry(xContentRegistry) - .withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), - stream - ) - ) { + try (XContentParser parser = createParser(source)) { return TrainedModelMetadata.fromXContent(parser, true); } catch (IOException e) { logger.error(() -> "[" + modelId + "] failed to parse model metadata", e); @@ -1364,6 +1347,14 @@ private TrainedModelMetadata parseMetadataLenientlyFromSource(BytesReference sou } } + private XContentParser createParser(BytesReference source) throws IOException { + return XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG.withRegistry(xContentRegistry), + source, + XContentType.JSON + ); + } + private static IndexRequest createRequest(String docId, String index, ToXContentObject body, boolean allowOverwriting) { return createRequest(new IndexRequest(index), docId, body, allowOverwriting); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/NativePyTorchProcess.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/NativePyTorchProcess.java index 5908c550d318f..d2e5369ef4bd3 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/NativePyTorchProcess.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/NativePyTorchProcess.java @@ -20,6 +20,7 @@ import java.nio.file.Path; import java.util.Iterator; import java.util.List; +import java.util.concurrent.TimeoutException; import java.util.function.Consumer; public class NativePyTorchProcess extends AbstractNativeProcess implements PyTorchProcess { @@ -27,6 +28,7 @@ public class NativePyTorchProcess extends AbstractNativeProcess implements PyTor private static final String NAME = "pytorch_inference"; private final ProcessResultsParser resultsParser; + private final PyTorchProcessFactory.TimeoutRunnable afterInStreamClose; protected NativePyTorchProcess( String jobId, @@ -34,9 +36,11 @@ protected NativePyTorchProcess( ProcessPipes processPipes, int numberOfFields, List filesToDelete, + PyTorchProcessFactory.TimeoutRunnable afterInStreamClose, Consumer onProcessCrash ) { super(jobId, nativeController, processPipes, numberOfFields, filesToDelete, onProcessCrash); + this.afterInStreamClose = afterInStreamClose; this.resultsParser = new ProcessResultsParser<>(PyTorchResult.PARSER, NamedXContentRegistry.EMPTY); } @@ -71,4 +75,9 @@ public void writeInferenceRequest(BytesReference jsonRequest) throws IOException processInStream().write('\n'); processInStream().flush(); } + + @Override + protected void afterProcessInStreamClose() throws TimeoutException { + afterInStreamClose.run(); + } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/NativePyTorchProcessFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/NativePyTorchProcessFactory.java index 4585ca29e8d14..e538a6c686881 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/NativePyTorchProcessFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/NativePyTorchProcessFactory.java @@ -56,6 +56,7 @@ void setProcessConnectTimeout(TimeValue processConnectTimeout) { public NativePyTorchProcess createProcess( TrainedModelDeploymentTask task, ExecutorService executorService, + TimeoutRunnable afterInStreamClose, Consumer onProcessCrash ) { ProcessPipes processPipes = new ProcessPipes( @@ -80,6 +81,7 @@ public NativePyTorchProcess createProcess( processPipes, 0, Collections.emptyList(), + afterInStreamClose, onProcessCrash ); @@ -87,7 +89,7 @@ public NativePyTorchProcess createProcess( process.start(executorService); } catch (IOException | EsRejectedExecutionException e) { String msg = "Failed to connect to pytorch process for job " + task.getDeploymentId(); - logger.error(msg); + logger.error(msg, e); try { IOUtils.close(process); } catch (IOException ioe) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchProcessFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchProcessFactory.java index 07d9e8faa22ea..507c6115a392d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchProcessFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchProcessFactory.java @@ -10,9 +10,19 @@ import org.elasticsearch.xpack.ml.inference.deployment.TrainedModelDeploymentTask; import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeoutException; import java.util.function.Consumer; public interface PyTorchProcessFactory { - PyTorchProcess createProcess(TrainedModelDeploymentTask task, ExecutorService executorService, Consumer onProcessCrash); + interface TimeoutRunnable { + void run() throws TimeoutException; + } + + PyTorchProcess createProcess( + TrainedModelDeploymentTask task, + ExecutorService executorService, + TimeoutRunnable afterInStreamClose, + Consumer onProcessCrash + ); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchProcessManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchProcessManager.java deleted file mode 100644 index c812e490217ed..0000000000000 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchProcessManager.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.ml.inference.pytorch.process; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; - -public class PyTorchProcessManager { - - private static final Logger logger = LogManager.getLogger(PyTorchProcessManager.class); - - public PyTorchProcessManager() { - - } - - public void start(String taskId) { - - } -} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java index 035f4864ebace..7532ae4317830 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java @@ -478,24 +478,27 @@ private void validate(Job job, JobUpdate jobUpdate, ActionListener handler private void validateModelSnapshotIdUpdate(Job job, String modelSnapshotId, VoidChainTaskExecutor voidChainTaskExecutor) { if (modelSnapshotId != null && ModelSnapshot.isTheEmptySnapshot(modelSnapshotId) == false) { - voidChainTaskExecutor.add(listener -> jobResultsProvider.getModelSnapshot(job.getId(), modelSnapshotId, newModelSnapshot -> { - if (newModelSnapshot == null) { - String message = Messages.getMessage(Messages.REST_NO_SUCH_MODEL_SNAPSHOT, modelSnapshotId, job.getId()); - listener.onFailure(new ResourceNotFoundException(message)); - return; - } - jobResultsProvider.getModelSnapshot(job.getId(), job.getModelSnapshotId(), oldModelSnapshot -> { - if (oldModelSnapshot != null && newModelSnapshot.result.getTimestamp().before(oldModelSnapshot.result.getTimestamp())) { - String message = "Job [" - + job.getId() - + "] has a more recent model snapshot [" - + oldModelSnapshot.result.getSnapshotId() - + "]"; - listener.onFailure(new IllegalArgumentException(message)); + voidChainTaskExecutor.add( + listener -> jobResultsProvider.getModelSnapshot(job.getId(), modelSnapshotId, false, newModelSnapshot -> { + if (newModelSnapshot == null) { + String message = Messages.getMessage(Messages.REST_NO_SUCH_MODEL_SNAPSHOT, modelSnapshotId, job.getId()); + listener.onFailure(new ResourceNotFoundException(message)); + return; } - listener.onResponse(null); - }, listener::onFailure); - }, listener::onFailure)); + jobResultsProvider.getModelSnapshot(job.getId(), job.getModelSnapshotId(), false, oldModelSnapshot -> { + if (oldModelSnapshot != null + && newModelSnapshot.result.getTimestamp().before(oldModelSnapshot.result.getTimestamp())) { + String message = "Job [" + + job.getId() + + "] has a more recent model snapshot [" + + oldModelSnapshot.result.getSnapshotId() + + "]"; + listener.onFailure(new IllegalArgumentException(message)); + } + listener.onResponse(null); + }, listener::onFailure); + }, listener::onFailure) + ); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/NodeLoadDetector.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/NodeLoadDetector.java index 548c95d1ddd50..f2bf180943b82 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/NodeLoadDetector.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/NodeLoadDetector.java @@ -16,10 +16,10 @@ import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingInfo; import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingState; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.core.ml.utils.MemoryTrackedTaskState; import org.elasticsearch.xpack.core.ml.utils.MlTaskParams; import org.elasticsearch.xpack.ml.MachineLearning; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.process.MlMemoryTracker; import org.elasticsearch.xpack.ml.utils.NativeMemoryCalculator; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedBucketsIterator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedBucketsIterator.java index 9ae361a9b18c5..1423881a80877 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedBucketsIterator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedBucketsIterator.java @@ -8,18 +8,15 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.client.internal.OriginSettingClient; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.search.SearchHit; -import org.elasticsearch.xcontent.NamedXContentRegistry; -import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.ml.job.results.Bucket; import org.elasticsearch.xpack.core.ml.job.results.Result; import java.io.IOException; -import java.io.InputStream; class BatchedBucketsIterator extends BatchedResultsIterator { @@ -29,14 +26,17 @@ class BatchedBucketsIterator extends BatchedResultsIterator { @Override protected Result map(SearchHit hit) { - BytesReference source = hit.getSourceRef(); - try ( - InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream) - ) { - Bucket bucket = Bucket.LENIENT_PARSER.apply(parser, null); - return new Result<>(hit.getIndex(), bucket); + try { + try ( + XContentParser parser = XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG, + hit.getSourceRef(), + XContentType.JSON + ) + ) { + Bucket bucket = Bucket.LENIENT_PARSER.apply(parser, null); + return new Result<>(hit.getIndex(), bucket); + } } catch (IOException e) { throw new ElasticsearchParseException("failed to parse bucket", e); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedInfluencersIterator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedInfluencersIterator.java index b4f12e35b7a24..94d85bd9b7178 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedInfluencersIterator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedInfluencersIterator.java @@ -8,18 +8,15 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.client.internal.OriginSettingClient; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.search.SearchHit; -import org.elasticsearch.xcontent.NamedXContentRegistry; -import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.ml.job.results.Influencer; import org.elasticsearch.xpack.core.ml.job.results.Result; import java.io.IOException; -import java.io.InputStream; class BatchedInfluencersIterator extends BatchedResultsIterator { BatchedInfluencersIterator(OriginSettingClient client, String jobId) { @@ -28,11 +25,12 @@ class BatchedInfluencersIterator extends BatchedResultsIterator { @Override protected Result map(SearchHit hit) { - BytesReference source = hit.getSourceRef(); try ( - InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream) + XContentParser parser = XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG, + hit.getSourceRef(), + XContentType.JSON + ) ) { Influencer influencer = Influencer.LENIENT_PARSER.apply(parser, null); return new Result<>(hit.getIndex(), influencer); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedRecordsIterator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedRecordsIterator.java index f5fe290661b88..2562217c44a22 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedRecordsIterator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedRecordsIterator.java @@ -8,18 +8,15 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.client.internal.OriginSettingClient; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.search.SearchHit; -import org.elasticsearch.xcontent.NamedXContentRegistry; -import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.ml.job.results.AnomalyRecord; import org.elasticsearch.xpack.core.ml.job.results.Result; import java.io.IOException; -import java.io.InputStream; class BatchedRecordsIterator extends BatchedResultsIterator { @@ -29,11 +26,12 @@ class BatchedRecordsIterator extends BatchedResultsIterator { @Override protected Result map(SearchHit hit) { - BytesReference source = hit.getSourceRef(); try ( - InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream) + XContentParser parser = XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG, + hit.getSourceRef(), + XContentType.JSON + ) ) { AnomalyRecord record = AnomalyRecord.LENIENT_PARSER.apply(parser, null); return new Result<>(hit.getIndex(), record); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobConfigProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobConfigProvider.java index b77401f06f507..8493513f40bd6 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobConfigProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobConfigProvider.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.engine.VersionConflictEngineException; @@ -53,7 +54,6 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.action.util.ExpandedIdsMatcher; import org.elasticsearch.xpack.core.ml.MlConfigIndex; @@ -73,7 +73,6 @@ import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; import java.io.IOException; -import java.io.InputStream; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -760,12 +759,8 @@ static Collection matchingJobIdsWithTasks(String[] jobIdPatterns, Persis } private static void parseJobLenientlyFromSource(BytesReference source, ActionListener jobListener) { - try ( - InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), stream) - ) { - jobListener.onResponse(Job.LENIENT_PARSER.apply(parser, null)); + try { + jobListener.onResponse(parseJobLenientlyFromSource(source)); } catch (Exception e) { jobListener.onFailure(e); } @@ -773,9 +768,11 @@ private static void parseJobLenientlyFromSource(BytesReference source, ActionLis private static Job.Builder parseJobLenientlyFromSource(BytesReference source) throws IOException { try ( - InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), stream) + XContentParser parser = XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG, + source, + XContentType.JSON + ) ) { return Job.LENIENT_PARSER.apply(parser, null); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java index 7b41f3e055874..becbffefff8c8 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java @@ -53,6 +53,7 @@ import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.mapper.MapperService; @@ -88,7 +89,6 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.action.util.QueryPage; @@ -133,7 +133,6 @@ import org.elasticsearch.xpack.ml.utils.persistence.MlParserUtils; import java.io.IOException; -import java.io.InputStream; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -859,14 +858,7 @@ public void buckets( List results = new ArrayList<>(); for (SearchHit hit : hits.getHits()) { BytesReference source = hit.getSourceRef(); - try ( - InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser( - XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), - stream - ) - ) { + try (XContentParser parser = createParser(source)) { Bucket bucket = Bucket.LENIENT_PARSER.apply(parser, null); results.add(bucket); } catch (IOException e) { @@ -1075,14 +1067,7 @@ public void categoryDefinitions( List results = new ArrayList<>(hits.length); for (SearchHit hit : hits) { BytesReference source = hit.getSourceRef(); - try ( - InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser( - XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), - stream - ) - ) { + try (XContentParser parser = createParser(source)) { CategoryDefinition categoryDefinition = CategoryDefinition.LENIENT_PARSER.apply(parser, null); // Check if parent task is cancelled as augmentation of many categories is a non-trivial task if (parentTask != null && parentTask.isCancelled()) { @@ -1150,14 +1135,7 @@ public void records( List results = new ArrayList<>(); for (SearchHit hit : searchResponse.getHits().getHits()) { BytesReference source = hit.getSourceRef(); - try ( - InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser( - XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), - stream - ) - ) { + try (XContentParser parser = createParser(source)) { results.add(AnomalyRecord.LENIENT_PARSER.apply(parser, null)); } catch (IOException e) { throw new ElasticsearchParseException("failed to parse records", e); @@ -1221,14 +1199,7 @@ public void influencers( List influencers = new ArrayList<>(); for (SearchHit hit : response.getHits().getHits()) { BytesReference source = hit.getSourceRef(); - try ( - InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser( - XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), - stream - ) - ) { + try (XContentParser parser = createParser(source)) { influencers.add(Influencer.LENIENT_PARSER.apply(parser, null)); } catch (IOException e) { throw new ElasticsearchParseException("failed to parse influencer", e); @@ -1257,11 +1228,13 @@ public BatchedResultsIterator newBatchedInfluencersIterator(String j } /** - * Get a job's model snapshot by its id + * Get a job's model snapshot by its id. + * Quantiles should only be included when strictly required, because they can be very large and consume a lot of heap. */ public void getModelSnapshot( String jobId, @Nullable String modelSnapshotId, + boolean includeQuantiles, Consumer> handler, Consumer errorHandler ) { @@ -1271,6 +1244,9 @@ public void getModelSnapshot( } String resultsIndex = AnomalyDetectorsIndex.jobResultsAliasedName(jobId); SearchRequestBuilder search = createDocIdSearch(resultsIndex, ModelSnapshot.documentId(jobId, modelSnapshotId)); + if (includeQuantiles == false) { + search.setFetchSource(null, ModelSnapshot.QUANTILES.getPreferredName()); + } searchSingleResult( jobId, ModelSnapshot.TYPE.getPreferredName(), @@ -1427,11 +1403,7 @@ public QueryPage modelPlot(String jobId, int from, int size) { for (SearchHit hit : searchResponse.getHits().getHits()) { BytesReference source = hit.getSourceRef(); - try ( - InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), stream) - ) { + try (XContentParser parser = createParser(source)) { ModelPlot modelPlot = ModelPlot.LENIENT_PARSER.apply(parser, null); results.add(modelPlot); } catch (IOException e) { @@ -1464,11 +1436,7 @@ public QueryPage categorizerStats(String jobId, int from, int for (SearchHit hit : searchResponse.getHits().getHits()) { BytesReference source = hit.getSourceRef(); - try ( - InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), stream) - ) { + try (XContentParser parser = createParser(source)) { CategorizerStats categorizerStats = CategorizerStats.LENIENT_PARSER.apply(parser, null).build(); results.add(categorizerStats); } catch (IOException e) { @@ -1990,14 +1958,7 @@ public void onResponse(GetResponse getDocResponse) { try { if (getDocResponse.isExists()) { BytesReference docSource = getDocResponse.getSourceAsBytesRef(); - try ( - InputStream stream = docSource.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser( - XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), - stream - ) - ) { + try (XContentParser parser = createParser(docSource)) { Calendar calendar = Calendar.LENIENT_PARSER.apply(parser, null).build(); listener.onResponse(calendar); } @@ -2020,6 +1981,10 @@ public void onFailure(Exception e) { }, client::get); } + private static XContentParser createParser(BytesReference docSource) throws IOException { + return XContentHelper.createParserNotCompressed(LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG, docSource, XContentType.JSON); + } + /** * Returns information needed to decide how to restart a job from a datafeed * @param jobId the job id diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/SearchAfterJobsIterator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/SearchAfterJobsIterator.java index df66b5b256955..aab2415251713 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/SearchAfterJobsIterator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/SearchAfterJobsIterator.java @@ -10,12 +10,11 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.sort.FieldSortBuilder; -import org.elasticsearch.xcontent.NamedXContentRegistry; -import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.ml.MlConfigIndex; @@ -23,7 +22,6 @@ import org.elasticsearch.xpack.ml.utils.persistence.SearchAfterDocumentsIterator; import java.io.IOException; -import java.io.InputStream; public class SearchAfterJobsIterator extends SearchAfterDocumentsIterator { @@ -60,9 +58,11 @@ protected void extractSearchAfterFields(SearchHit lastSearchHit) { @Override protected Job.Builder map(SearchHit hit) { try ( - InputStream stream = hit.getSourceRef().streamInput(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream) + XContentParser parser = XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG, + hit.getSourceRef(), + XContentType.JSON + ) ) { return Job.LENIENT_PARSER.apply(parser, null); } catch (IOException e) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java index 8deac327c065e..658db2997485d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java @@ -1062,4 +1062,24 @@ public void clusterChanged(ClusterChangedEvent event) { resetInProgress = MlMetadata.getMlMetadata(event.state()).isResetMode(); } + /** + * Finds the memory used by open autodetect processes on the current node. + * @return Memory used by open autodetect processes on the current node. + */ + public ByteSizeValue getOpenProcessMemoryUsage() { + long memoryUsedBytes = 0; + for (ProcessContext processContext : processByAllocation.values()) { + if (processContext.getState() == ProcessContext.ProcessStateName.RUNNING) { + ModelSizeStats modelSizeStats = processContext.getAutodetectCommunicator().getModelSizeStats(); + ModelSizeStats.AssignmentMemoryBasis basis = modelSizeStats.getAssignmentMemoryBasis(); + memoryUsedBytes += switch (basis != null ? basis : ModelSizeStats.AssignmentMemoryBasis.MODEL_MEMORY_LIMIT) { + case MODEL_MEMORY_LIMIT -> modelSizeStats.getModelBytesMemoryLimit(); + case CURRENT_MODEL_BYTES -> modelSizeStats.getModelBytes(); + case PEAK_MODEL_BYTES -> Optional.ofNullable(modelSizeStats.getPeakModelBytes()).orElse(modelSizeStats.getModelBytes()); + }; + memoryUsedBytes += Job.PROCESS_MEMORY_OVERHEAD.getBytes(); + } + } + return ByteSizeValue.ofBytes(memoryUsedBytes); + } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectResultProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectResultProcessor.java index 459e6f6dee4bd..ae6e21156fdcb 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectResultProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectResultProcessor.java @@ -58,8 +58,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; -import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; import static org.elasticsearch.xpack.core.ml.job.messages.Messages.JOB_FORECAST_NATIVE_PROCESS_KILLED; /** @@ -506,19 +504,27 @@ protected void updateModelSnapshotOnJob(ModelSnapshot modelSnapshot) { return; } - executeAsyncWithOrigin(client, ML_ORIGIN, UpdateJobAction.INSTANCE, updateRequest, new ActionListener() { - @Override - public void onResponse(PutJobAction.Response response) { - updateModelSnapshotSemaphore.release(); - logger.debug("[{}] Updated job with model snapshot id [{}]", jobId, modelSnapshot.getSnapshotId()); - } + RetryableUpdateModelSnapshotAction updateModelSnapshotAction = new RetryableUpdateModelSnapshotAction( + client, + updateRequest, + new ActionListener<>() { + @Override + public void onResponse(PutJobAction.Response response) { + updateModelSnapshotSemaphore.release(); + logger.debug("[{}] Updated job with model snapshot id [{}]", jobId, modelSnapshot.getSnapshotId()); + } - @Override - public void onFailure(Exception e) { - updateModelSnapshotSemaphore.release(); - logger.error("[" + jobId + "] Failed to update job with new model snapshot id [" + modelSnapshot.getSnapshotId() + "]", e); + @Override + public void onFailure(Exception e) { + updateModelSnapshotSemaphore.release(); + logger.error( + "[" + jobId + "] Failed to update job with new model snapshot id [" + modelSnapshot.getSnapshotId() + "]", + e + ); + } } - }); + ); + updateModelSnapshotAction.run(); } public void awaitCompletion() throws TimeoutException { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/RetryableUpdateModelSnapshotAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/RetryableUpdateModelSnapshotAction.java new file mode 100644 index 0000000000000..8823eaaa2032f --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/RetryableUpdateModelSnapshotAction.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.job.process.autodetect.output; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.RetryableAction; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.xpack.core.ml.action.PutJobAction; +import org.elasticsearch.xpack.core.ml.action.UpdateJobAction; +import org.elasticsearch.xpack.ml.MachineLearning; + +import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; + +/** + * Class to retry updates to the model snapshot ID on the job config after a new model snapshot result + * is seen. Prior to the introduction of this functionality we saw cases where this particular job config + * update would fail, so that the job would have persisted a perfectly valid model snapshot and yet it + * would not be used if the job failed over to another node, leading to wasted work rerunning from an + * older snapshot. + */ +public class RetryableUpdateModelSnapshotAction extends RetryableAction { + + private static final Logger logger = LogManager.getLogger(RetryableUpdateModelSnapshotAction.class); + + private final Client client; + private final UpdateJobAction.Request updateRequest; + private volatile boolean hasFailedAtLeastOnce; + + public RetryableUpdateModelSnapshotAction( + Client client, + UpdateJobAction.Request updateRequest, + ActionListener listener + ) { + super( + logger, + client.threadPool(), + // First retry after 15 seconds + TimeValue.timeValueSeconds(15), + // Never wait more than 2 minutes between retries + TimeValue.timeValueMinutes(2), + // Retry for 5 minutes in total. If the node is shutting down then we cannot wait longer than 10 + // minutes, and there is other work to do as well. If the node is not shutting down then persisting + // the snapshot is less important, as we'll try again if the node does shut down. Therefore, 5 minutes + // is a reasonable compromise between preventing excess rework on failover and delaying processing + // unnecessarily. + TimeValue.timeValueMinutes(5), + listener, + client.threadPool().executor(MachineLearning.UTILITY_THREAD_POOL_NAME) + ); + this.client = client; + this.updateRequest = updateRequest; + } + + @Override + public void tryAction(ActionListener listener) { + executeAsyncWithOrigin(client, ML_ORIGIN, UpdateJobAction.INSTANCE, updateRequest, listener); + } + + @Override + public boolean shouldRetry(Exception e) { + if (hasFailedAtLeastOnce == false) { + hasFailedAtLeastOnce = true; + logger.warn(() -> "[" + updateRequest.getJobId() + "] Failed to update job with new model snapshot id; attempting retry", e); + } + return true; + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/output/NormalizerResultHandler.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/output/NormalizerResultHandler.java index 7c3ac8d6fab79..0b4f6765431da 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/output/NormalizerResultHandler.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/output/NormalizerResultHandler.java @@ -10,7 +10,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.CompositeBytesReference; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; -import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.xcontent.XContent; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; @@ -79,8 +79,11 @@ private BytesReference parseResults(XContent xContent, BytesReference bytesRef) private void parseResult(XContent xContent, BytesReference bytesRef) throws IOException { try ( - InputStream stream = bytesRef.streamInput(); - XContentParser parser = xContent.createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream) + XContentParser parser = XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG, + bytesRef, + xContent.type() + ) ) { NormalizerResult result = NormalizerResult.PARSER.apply(parser, null); normalizedResults.add(result); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java index db712def11eac..654ce87fc5e30 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java @@ -16,6 +16,7 @@ import org.elasticsearch.action.support.ThreadedActionListener; import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; @@ -31,8 +32,6 @@ import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xcontent.NamedXContentRegistry; -import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.ml.job.config.Job; @@ -49,7 +48,6 @@ import org.elasticsearch.xpack.ml.utils.MlIndicesUtils; import java.io.IOException; -import java.io.InputStream; import java.time.Instant; import java.time.ZoneOffset; import java.time.ZonedDateTime; @@ -205,9 +203,11 @@ static void latestBucketTime(OriginSettingClient client, TaskId parentTaskId, St } else { try ( - InputStream stream = hits[0].getSourceRef().streamInput(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream) + XContentParser parser = XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG, + hits[0].getSourceRef(), + XContentType.JSON + ) ) { Bucket bucket = Bucket.LENIENT_PARSER.apply(parser, null); listener.onResponse(bucket.getTimestamp().getTime()); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/snapshot/upgrader/SnapshotUpgradeTaskExecutor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/snapshot/upgrader/SnapshotUpgradeTaskExecutor.java index 69b926876302a..cc3f8f0dd1e67 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/snapshot/upgrader/SnapshotUpgradeTaskExecutor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/snapshot/upgrader/SnapshotUpgradeTaskExecutor.java @@ -329,6 +329,6 @@ private void deleteSnapshotAndFailTask(AllocatedPersistentTask task, String jobI ); }); - jobResultsProvider.getModelSnapshot(jobId, snapshotId, modelSnapshotListener::onResponse, modelSnapshotListener::onFailure); + jobResultsProvider.getModelSnapshot(jobId, snapshotId, false, modelSnapshotListener::onResponse, modelSnapshotListener::onFailure); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/AbstractNativeProcess.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/AbstractNativeProcess.java index 8ea85208a2de8..dd71800bd4f90 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/AbstractNativeProcess.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/AbstractNativeProcess.java @@ -223,7 +223,7 @@ public void close() throws IOException { * Implementations can override this if they need to perform extra processing * immediately after the native process's input stream is closed. */ - protected void afterProcessInStreamClose() { + protected void afterProcessInStreamClose() throws TimeoutException { // no-op by default } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/MlMemoryTracker.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/MlMemoryTracker.java index 5ba577eb90ab7..3f502c4d95cc9 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/MlMemoryTracker.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/MlMemoryTracker.java @@ -26,12 +26,12 @@ import org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.core.ml.job.config.AnalysisLimits; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.snapshot.upgrade.SnapshotUpgradeTaskParams; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.dataframe.persistence.DataFrameAnalyticsConfigProvider; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.job.JobManager; import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/StateToProcessWriterHelper.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/StateToProcessWriterHelper.java index baac0aa301087..4c42609c44833 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/StateToProcessWriterHelper.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/StateToProcessWriterHelper.java @@ -6,8 +6,6 @@ */ package org.elasticsearch.xpack.ml.process; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.BytesRefIterator; import org.elasticsearch.common.bytes.BytesReference; import java.io.IOException; @@ -23,17 +21,12 @@ private StateToProcessWriterHelper() {} public static void writeStateToStream(BytesReference source, OutputStream stream) throws IOException { // The source bytes are already UTF-8. The C++ process wants UTF-8, so we // can avoid converting to a Java String only to convert back again. - BytesRefIterator iterator = source.iterator(); - for (BytesRef ref = iterator.next(); ref != null; ref = iterator.next()) { - // There's a complication that the source can already have trailing 0 bytes - int length = ref.bytes.length; - while (length > 0 && ref.bytes[length - 1] == 0) { - --length; - } - if (length > 0) { - stream.write(ref.bytes, 0, length); - } + int length = source.length(); + // There's a complication that the source can already have trailing 0 bytes + while (length > 0 && source.get(length - 1) == 0) { + --length; } + source.slice(0, length).writeTo(stream); // This is dictated by RapidJSON on the C++ side; it treats a '\0' as end-of-file // even when it's not really end-of-file, and this is what we need because we're // sending multiple JSON documents via the same named pipe. diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageHandler.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageHandler.java index e73be1839d3e6..0416f4740f6ee 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageHandler.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageHandler.java @@ -17,7 +17,7 @@ import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; -import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.xcontent.XContent; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParseException; @@ -264,8 +264,11 @@ private BytesReference parseMessages(XContent xContent, BytesReference bytesRef) private void parseMessage(XContent xContent, BytesReference bytesRef) { try ( - InputStream stream = bytesRef.streamInput(); - XContentParser parser = xContent.createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream) + XContentParser parser = XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG, + bytesRef, + xContent.type() + ) ) { CppLogMessage msg = CppLogMessage.PARSER.apply(parser, null); Level level = Level.getLevel(msg.getLevel()); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/queries/TextExpansionQueryBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/queries/TextExpansionQueryBuilder.java index 91f57637ab092..197132084b855 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/queries/TextExpansionQueryBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/queries/TextExpansionQueryBuilder.java @@ -144,7 +144,7 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws if (weightedTokensSupplier.get() == null) { return this; } - return weightedTokensToQuery(fieldName, weightedTokensSupplier.get(), queryRewriteContext); + return weightedTokensToQuery(fieldName, weightedTokensSupplier.get()); } CoordinatedInferenceAction.Request inferRequest = CoordinatedInferenceAction.Request.forTextInput( @@ -196,11 +196,7 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws return new TextExpansionQueryBuilder(this, textExpansionResultsSupplier); } - private QueryBuilder weightedTokensToQuery( - String fieldName, - TextExpansionResults textExpansionResults, - QueryRewriteContext queryRewriteContext - ) { + private QueryBuilder weightedTokensToQuery(String fieldName, TextExpansionResults textExpansionResults) { if (tokenPruningConfig != null) { WeightedTokensQueryBuilder weightedTokensQueryBuilder = new WeightedTokensQueryBuilder( fieldName, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/queries/TokenPruningConfig.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/queries/TokenPruningConfig.java index d789a645fd9c4..90fb9291b3b82 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/queries/TokenPruningConfig.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/queries/TokenPruningConfig.java @@ -47,8 +47,8 @@ public TokenPruningConfig(float tokensFreqRatioThreshold, float tokensWeightThre throw new IllegalArgumentException( "[" + TOKENS_FREQ_RATIO_THRESHOLD.getPreferredName() - + "] must be between [1.0] and [" - + String.format(Locale.ROOT, "%.1f", MAX_TOKENS_FREQ_RATIO_THRESHOLD) + + "] must be between [1] and [" + + String.format(Locale.ROOT, "%d", (int) MAX_TOKENS_FREQ_RATIO_THRESHOLD) + "], got " + tokensFreqRatioThreshold ); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/RestDeleteExpiredDataAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/RestDeleteExpiredDataAction.java index 1fb45c07c5818..37731fcbfb10b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/RestDeleteExpiredDataAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/RestDeleteExpiredDataAction.java @@ -49,7 +49,9 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient DeleteExpiredDataAction.Request request; if (restRequest.hasContent()) { - request = DeleteExpiredDataAction.Request.parseRequest(jobId, restRequest.contentParser()); + try (var parser = restRequest.contentParser()) { + request = DeleteExpiredDataAction.Request.parseRequest(jobId, parser); + } } else { request = new DeleteExpiredDataAction.Request(); request.setJobId(jobId); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/datafeeds/RestPutDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/datafeeds/RestPutDatafeedAction.java index a5f98763d3245..64981805717a1 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/datafeeds/RestPutDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/datafeeds/RestPutDatafeedAction.java @@ -47,8 +47,10 @@ public String getName() { protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { String datafeedId = restRequest.param(DatafeedConfig.ID.getPreferredName()); IndicesOptions indicesOptions = IndicesOptions.fromRequest(restRequest, SearchRequest.DEFAULT_INDICES_OPTIONS); - XContentParser parser = restRequest.contentParser(); - PutDatafeedAction.Request putDatafeedRequest = PutDatafeedAction.Request.parseRequest(datafeedId, indicesOptions, parser); + PutDatafeedAction.Request putDatafeedRequest; + try (XContentParser parser = restRequest.contentParser()) { + putDatafeedRequest = PutDatafeedAction.Request.parseRequest(datafeedId, indicesOptions, parser); + } putDatafeedRequest.timeout(restRequest.paramAsTime("timeout", putDatafeedRequest.timeout())); putDatafeedRequest.masterNodeTimeout(restRequest.paramAsTime("master_timeout", putDatafeedRequest.masterNodeTimeout())); return channel -> client.execute(PutDatafeedAction.INSTANCE, putDatafeedRequest, new RestToXContentListener<>(channel)); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/datafeeds/RestUpdateDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/datafeeds/RestUpdateDatafeedAction.java index f0260a9301edc..97e1514441441 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/datafeeds/RestUpdateDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/datafeeds/RestUpdateDatafeedAction.java @@ -53,8 +53,10 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient || restRequest.hasParam("ignore_throttled")) { indicesOptions = IndicesOptions.fromRequest(restRequest, SearchRequest.DEFAULT_INDICES_OPTIONS); } - XContentParser parser = restRequest.contentParser(); - UpdateDatafeedAction.Request updateDatafeedRequest = UpdateDatafeedAction.Request.parseRequest(datafeedId, indicesOptions, parser); + UpdateDatafeedAction.Request updateDatafeedRequest; + try (XContentParser parser = restRequest.contentParser()) { + updateDatafeedRequest = UpdateDatafeedAction.Request.parseRequest(datafeedId, indicesOptions, parser); + } updateDatafeedRequest.timeout(restRequest.paramAsTime("timeout", updateDatafeedRequest.timeout())); updateDatafeedRequest.masterNodeTimeout(restRequest.paramAsTime("master_timeout", updateDatafeedRequest.masterNodeTimeout())); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/dataframe/RestPostDataFrameAnalyticsUpdateAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/dataframe/RestPostDataFrameAnalyticsUpdateAction.java index 48a820360e61b..52a3d83eeb11a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/dataframe/RestPostDataFrameAnalyticsUpdateAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/dataframe/RestPostDataFrameAnalyticsUpdateAction.java @@ -48,8 +48,10 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient } String id = restRequest.param(DataFrameAnalyticsConfig.ID.getPreferredName()); - XContentParser parser = restRequest.contentParser(); - UpdateDataFrameAnalyticsAction.Request updateRequest = UpdateDataFrameAnalyticsAction.Request.parseRequest(id, parser); + UpdateDataFrameAnalyticsAction.Request updateRequest; + try (XContentParser parser = restRequest.contentParser()) { + updateRequest = UpdateDataFrameAnalyticsAction.Request.parseRequest(id, parser); + } updateRequest.timeout(restRequest.paramAsTime("timeout", updateRequest.timeout())); return channel -> client.execute(UpdateDataFrameAnalyticsAction.INSTANCE, updateRequest, new RestToXContentListener<>(channel)); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/dataframe/RestPutDataFrameAnalyticsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/dataframe/RestPutDataFrameAnalyticsAction.java index 9a3d958bd3a09..896b1dfdb6df2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/dataframe/RestPutDataFrameAnalyticsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/dataframe/RestPutDataFrameAnalyticsAction.java @@ -57,8 +57,10 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient } String id = restRequest.param(DataFrameAnalyticsConfig.ID.getPreferredName()); - XContentParser parser = restRequest.contentParser(); - PutDataFrameAnalyticsAction.Request putRequest = PutDataFrameAnalyticsAction.Request.parseRequest(id, parser); + PutDataFrameAnalyticsAction.Request putRequest; + try (XContentParser parser = restRequest.contentParser()) { + putRequest = PutDataFrameAnalyticsAction.Request.parseRequest(id, parser); + } putRequest.timeout(restRequest.paramAsTime("timeout", putRequest.timeout())); return channel -> client.execute(PutDataFrameAnalyticsAction.INSTANCE, putRequest, new RestToXContentListener<>(channel)); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/inference/RestInferTrainedModelAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/inference/RestInferTrainedModelAction.java index 4afd07479a3eb..78b02871c3c57 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/inference/RestInferTrainedModelAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/inference/RestInferTrainedModelAction.java @@ -47,7 +47,10 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient if (restRequest.hasContent() == false) { throw ExceptionsHelper.badRequestException("requires body"); } - InferModelAction.Request.Builder request = InferModelAction.Request.parseRequest(modelId, restRequest.contentParser()); + InferModelAction.Request.Builder request; + try (var parser = restRequest.contentParser()) { + request = InferModelAction.Request.parseRequest(modelId, parser); + } if (restRequest.hasParam(InferModelAction.Request.TIMEOUT.getPreferredName())) { TimeValue inferTimeout = restRequest.paramAsTime( diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/MlParserUtils.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/MlParserUtils.java index 8661497593815..36607e894edef 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/MlParserUtils.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/MlParserUtils.java @@ -10,14 +10,14 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.search.SearchHit; import org.elasticsearch.xcontent.NamedXContentRegistry; -import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import java.io.IOException; -import java.io.InputStream; import java.util.function.BiFunction; public final class MlParserUtils { @@ -33,9 +33,12 @@ private MlParserUtils() {} public static T parse(SearchHit hit, BiFunction objectParser) { BytesReference source = hit.getSourceRef(); try ( - InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream) + XContentParser parser = XContentHelper.createParserNotCompressed( + XContentParserConfiguration.EMPTY.withRegistry(NamedXContentRegistry.EMPTY) + .withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), + source, + XContentType.JSON + ) ) { return objectParser.apply(parser, null); } catch (IOException e) { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlIndexTemplateRegistryTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlIndexTemplateRegistryTests.java index e29f1cbe2064a..eafe568d09da5 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlIndexTemplateRegistryTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlIndexTemplateRegistryTests.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.ml; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.AdminClient; import org.elasticsearch.client.internal.Client; @@ -55,7 +55,7 @@ public class MlIndexTemplateRegistryTests extends ESTestCase { private ClusterService clusterService; private ThreadPool threadPool; private Client client; - private ArgumentCaptor putIndexTemplateRequestCaptor; + private ArgumentCaptor putIndexTemplateRequestCaptor; @Before public void setUpMocks() { @@ -81,7 +81,7 @@ public void setUpMocks() { ) ); - putIndexTemplateRequestCaptor = ArgumentCaptor.forClass(PutComposableIndexTemplateAction.Request.class); + putIndexTemplateRequestCaptor = ArgumentCaptor.forClass(TransportPutComposableIndexTemplateAction.Request.class); } public void testStateTemplate() { @@ -96,9 +96,13 @@ public void testStateTemplate() { registry.clusterChanged(createClusterChangedEvent(nodes)); - verify(client, times(4)).execute(same(PutComposableIndexTemplateAction.INSTANCE), putIndexTemplateRequestCaptor.capture(), any()); + verify(client, times(4)).execute( + same(TransportPutComposableIndexTemplateAction.TYPE), + putIndexTemplateRequestCaptor.capture(), + any() + ); - PutComposableIndexTemplateAction.Request req = putIndexTemplateRequestCaptor.getAllValues() + TransportPutComposableIndexTemplateAction.Request req = putIndexTemplateRequestCaptor.getAllValues() .stream() .filter(r -> r.name().equals(AnomalyDetectorsIndexFields.STATE_INDEX_PREFIX)) .findFirst() @@ -120,9 +124,13 @@ public void testStatsTemplate() { registry.clusterChanged(createClusterChangedEvent(nodes)); - verify(client, times(4)).execute(same(PutComposableIndexTemplateAction.INSTANCE), putIndexTemplateRequestCaptor.capture(), any()); + verify(client, times(4)).execute( + same(TransportPutComposableIndexTemplateAction.TYPE), + putIndexTemplateRequestCaptor.capture(), + any() + ); - PutComposableIndexTemplateAction.Request req = putIndexTemplateRequestCaptor.getAllValues() + TransportPutComposableIndexTemplateAction.Request req = putIndexTemplateRequestCaptor.getAllValues() .stream() .filter(r -> r.name().equals(MlStatsIndex.TEMPLATE_NAME)) .findFirst() diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlLifeCycleServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlLifeCycleServiceTests.java index a7a9122c96606..2b206de4cf42f 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlLifeCycleServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlLifeCycleServiceTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingInfo; import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingState; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.core.ml.job.config.JobState; import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.core.ml.job.snapshot.upgrade.SnapshotUpgradeState; @@ -35,7 +36,6 @@ import org.elasticsearch.xpack.core.ml.job.snapshot.upgrade.SnapshotUpgradeTaskState; import org.elasticsearch.xpack.ml.datafeed.DatafeedRunner; import org.elasticsearch.xpack.ml.dataframe.DataFrameAnalyticsManager; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager; import org.elasticsearch.xpack.ml.process.MlController; import org.elasticsearch.xpack.ml.process.MlMemoryTracker; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlMetricsTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlMetricsTests.java new file mode 100644 index 0000000000000..2262c21070e75 --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlMetricsTests.java @@ -0,0 +1,183 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml; + +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.ml.MlTasks; +import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; +import org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; +import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig; +import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsState; +import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingInfo; +import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingState; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; +import org.elasticsearch.xpack.core.ml.job.config.JobState; +import org.elasticsearch.xpack.ml.autoscaling.MlMemoryAutoscalingDeciderTests; +import org.elasticsearch.xpack.ml.dataframe.DataFrameAnalyticsManager; +import org.elasticsearch.xpack.ml.dataframe.persistence.DataFrameAnalyticsConfigProvider; +import org.elasticsearch.xpack.ml.dataframe.process.AnalyticsProcessManager; +import org.elasticsearch.xpack.ml.inference.loadingservice.ModelLoadingService; +import org.elasticsearch.xpack.ml.notifications.DataFrameAnalyticsAuditor; +import org.elasticsearch.xpack.ml.utils.persistence.ResultsPersisterService; + +import java.util.Map; + +import static org.elasticsearch.xpack.ml.MachineLearning.NATIVE_EXECUTABLE_CODE_OVERHEAD; +import static org.elasticsearch.xpack.ml.job.JobNodeSelector.AWAITING_LAZY_ASSIGNMENT; +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; + +public class MlMetricsTests extends ESTestCase { + + public void testFindTaskStatuses() { + + PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + MlMemoryAutoscalingDeciderTests.addJobTask("job1", "node1", JobState.OPENED, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addJobTask("job2", "node1", JobState.OPENED, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addJobTask("job3", "node2", JobState.FAILED, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addJobTask("job4", null, JobState.OPENING, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addJobTask("job5", "node1", JobState.CLOSING, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addJobTask("job6", "node2", JobState.OPENED, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addJobTask("job7", "node2", JobState.OPENING, tasksBuilder); + addDatafeedTask("datafeed1", "node1", DatafeedState.STARTED, tasksBuilder); + addDatafeedTask("datafeed2", "node1", DatafeedState.STARTED, tasksBuilder); + addDatafeedTask("datafeed5", "node1", DatafeedState.STOPPING, tasksBuilder); + addDatafeedTask("datafeed6", "node2", DatafeedState.STARTED, tasksBuilder); + addDatafeedTask("datafeed7", "node2", DatafeedState.STARTING, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addAnalyticsTask("dfa1", "node1", DataFrameAnalyticsState.STARTED, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addAnalyticsTask("dfa2", "node2", DataFrameAnalyticsState.STARTED, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addAnalyticsTask("dfa3", "node1", DataFrameAnalyticsState.FAILED, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addAnalyticsTask("dfa4", "node2", DataFrameAnalyticsState.REINDEXING, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addAnalyticsTask("dfa5", null, DataFrameAnalyticsState.STARTING, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addAnalyticsTask("dfa6", "node1", DataFrameAnalyticsState.ANALYZING, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addAnalyticsTask("dfa7", "node1", DataFrameAnalyticsState.STOPPING, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addAnalyticsTask("dfa8", "node1", DataFrameAnalyticsState.STARTED, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addAnalyticsTask("dfa9", null, DataFrameAnalyticsState.FAILED, tasksBuilder); + + MlMetrics.MlTaskStatusCounts counts = MlMetrics.findTaskStatuses(tasksBuilder.build()); + assertThat(counts.adOpeningCount(), is(2)); + assertThat(counts.adOpenedCount(), is(3)); + assertThat(counts.adClosingCount(), is(1)); + assertThat(counts.adFailedCount(), is(1)); + assertThat(counts.datafeedStartingCount(), is(1)); + assertThat(counts.datafeedStartedCount(), is(3)); + assertThat(counts.datafeedStoppingCount(), is(1)); + assertThat(counts.dfaStartingCount(), is(1)); + assertThat(counts.dfaStartedCount(), is(3)); + assertThat(counts.dfaReindexingCount(), is(1)); + assertThat(counts.dfaAnalyzingCount(), is(1)); + assertThat(counts.dfaStoppingCount(), is(1)); + assertThat(counts.dfaFailedCount(), is(2)); + } + + public void testFindDfaMemoryUsage() { + + PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + MlMemoryAutoscalingDeciderTests.addAnalyticsTask("dfa1", "node1", DataFrameAnalyticsState.STARTED, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addAnalyticsTask("dfa2", "node2", DataFrameAnalyticsState.STARTED, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addAnalyticsTask("dfa3", "node1", DataFrameAnalyticsState.FAILED, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addAnalyticsTask("dfa4", "node2", DataFrameAnalyticsState.REINDEXING, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addAnalyticsTask("dfa5", null, DataFrameAnalyticsState.STARTING, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addAnalyticsTask("dfa6", "node1", DataFrameAnalyticsState.ANALYZING, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addAnalyticsTask("dfa7", "node1", DataFrameAnalyticsState.STOPPING, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addAnalyticsTask("dfa8", "node1", DataFrameAnalyticsState.STARTED, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addAnalyticsTask("dfa9", null, DataFrameAnalyticsState.FAILED, tasksBuilder); + + DataFrameAnalyticsManager dfaManager = new DataFrameAnalyticsManager( + Settings.EMPTY, + mock(NodeClient.class), + mock(ThreadPool.class), + mock(ClusterService.class), + mock(DataFrameAnalyticsConfigProvider.class), + mock(AnalyticsProcessManager.class), + mock(DataFrameAnalyticsAuditor.class), + mock(IndexNameExpressionResolver.class), + mock(ResultsPersisterService.class), + mock(ModelLoadingService.class), + new String[] {}, + Map.of( + "dfa1", + ByteSizeValue.ofGb(1), + "dfa3", + ByteSizeValue.ofGb(2), + "dfa6", + ByteSizeValue.ofGb(4), + "dfa7", + ByteSizeValue.ofGb(8), + "dfa8", + ByteSizeValue.ofGb(16) + ) + ); + + long bytesUsed = MlMetrics.findDfaMemoryUsage(dfaManager, tasksBuilder.build()); + assertThat(bytesUsed, is(ByteSizeValue.ofGb(29).getBytes() + 4 * DataFrameAnalyticsConfig.PROCESS_MEMORY_OVERHEAD.getBytes())); + } + + public void testFindTrainedModelAllocationCounts() { + + TrainedModelAssignmentMetadata.Builder metadataBuilder = TrainedModelAssignmentMetadata.Builder.empty(); + metadataBuilder.addNewAssignment( + "model1", + TrainedModelAssignment.Builder.empty(mock(StartTrainedModelDeploymentAction.TaskParams.class)) + .addRoutingEntry("node1", new RoutingInfo(1, 1, RoutingState.STARTED, "")) + .addRoutingEntry("node2", new RoutingInfo(0, 1, RoutingState.FAILED, "")) + ); + metadataBuilder.addNewAssignment( + "model2", + TrainedModelAssignment.Builder.empty(mock(StartTrainedModelDeploymentAction.TaskParams.class)) + .addRoutingEntry("node1", new RoutingInfo(2, 2, RoutingState.STARTED, "")) + ); + metadataBuilder.addNewAssignment( + "model3", + TrainedModelAssignment.Builder.empty(mock(StartTrainedModelDeploymentAction.TaskParams.class)) + .addRoutingEntry("node2", new RoutingInfo(0, 1, RoutingState.STARTING, "")) + ); + + MlMetrics.TrainedModelAllocationCounts counts = MlMetrics.findTrainedModelAllocationCounts(metadataBuilder.build()); + assertThat(counts.trainedModelsTargetAllocations(), is(5)); + assertThat(counts.trainedModelsCurrentAllocations(), is(3)); + assertThat(counts.trainedModelsFailedAllocations(), is(1)); + } + + public void testFindNativeMemoryFree() { + + long bytesFree = MlMetrics.findNativeMemoryFree( + ByteSizeValue.ofMb(4000).getBytes(), + ByteSizeValue.ofMb(500).getBytes(), + ByteSizeValue.ofMb(1000).getBytes(), + ByteSizeValue.ofMb(2000).getBytes() + ); + assertThat(bytesFree, is(ByteSizeValue.ofMb(500).getBytes() - NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes())); + } + + public static void addDatafeedTask( + String datafeedId, + String nodeId, + DatafeedState datafeedState, + PersistentTasksCustomMetadata.Builder builder + ) { + builder.addTask( + MlTasks.datafeedTaskId(datafeedId), + MlTasks.DATAFEED_TASK_NAME, + new StartDatafeedAction.DatafeedParams(datafeedId, System.currentTimeMillis()), + nodeId == null ? AWAITING_LAZY_ASSIGNMENT : new PersistentTasksCustomMetadata.Assignment(nodeId, "test assignment") + ); + if (datafeedState != null) { + builder.updateTaskState(MlTasks.datafeedTaskId(datafeedId), datafeedState); + } + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlSingleNodeTestCase.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlSingleNodeTestCase.java index 9151a88ef482d..18cbf1728b0e4 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlSingleNodeTestCase.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlSingleNodeTestCase.java @@ -9,7 +9,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.snapshots.features.ResetFeatureStateAction; import org.elasticsearch.action.admin.cluster.snapshots.features.ResetFeatureStateRequest; -import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.TimeValue; @@ -27,6 +27,7 @@ import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptEngine; import org.elasticsearch.search.SearchModule; +import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -119,10 +120,11 @@ public void tearDown() throws Exception { protected void waitForMlTemplates() throws Exception { // block until the templates are installed - assertBusy(() -> { - ClusterState state = clusterAdmin().prepareState().get().getState(); - assertTrue("Timed out waiting for the ML templates to be installed", MachineLearning.criticalTemplatesInstalled(state)); - }); + ClusterServiceUtils.awaitClusterState( + logger, + MachineLearning::criticalTemplatesInstalled, + getInstanceFromNode(ClusterService.class) + ); } protected void blockingCall(Consumer> function, AtomicReference response, AtomicReference error) diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportDeleteForecastActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportDeleteForecastActionTests.java index e9129c450d56f..92ceb536cfd43 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportDeleteForecastActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportDeleteForecastActionTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.util.Maps; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.job.config.JobState; import org.elasticsearch.xpack.core.ml.job.results.ForecastRequestStats; @@ -34,7 +35,7 @@ public void testValidateForecastStateWithAllFailedFinished() { // This should not throw. TransportDeleteForecastAction.extractForecastIds( - forecastRequestStatsHits.toArray(new SearchHit[0]), + forecastRequestStatsHits.toArray(SearchHits.EMPTY), randomFrom(JobState.values()), randomAlphaOfLength(10) ); @@ -53,7 +54,7 @@ public void testValidateForecastStateWithSomeFailedFinished() { JobState jobState = randomFrom(JobState.CLOSED, JobState.CLOSING, JobState.FAILED); try { TransportDeleteForecastAction.extractForecastIds( - forecastRequestStatsHits.toArray(new SearchHit[0]), + forecastRequestStatsHits.toArray(SearchHits.EMPTY), jobState, randomAlphaOfLength(10) ); @@ -66,7 +67,7 @@ public void testValidateForecastStateWithSomeFailedFinished() { expectThrows( ElasticsearchStatusException.class, () -> TransportDeleteForecastAction.extractForecastIds( - forecastRequestStatsHits.toArray(new SearchHit[0]), + forecastRequestStatsHits.toArray(SearchHits.EMPTY), jobState, randomAlphaOfLength(10) ) diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsActionTests.java index c54ac8ba3b84d..bf6d13ada0f94 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsActionTests.java @@ -28,7 +28,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ml.MachineLearningField; -import org.elasticsearch.xpack.ml.inference.ModelAliasMetadata; +import org.elasticsearch.xpack.core.ml.inference.ModelAliasMetadata; import org.elasticsearch.xpack.ml.inference.ingest.InferenceProcessor; import org.junit.Before; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelActionTests.java index 84c49ba95b522..6ed7a3311c94a 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelActionTests.java @@ -16,8 +16,6 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.rest.RestStatus; @@ -115,8 +113,7 @@ public void testParseInferenceConfigFromModelPackage() throws IOException { assertNotNull(inferenceConfigMap); InferenceConfig parsedInferenceConfig = TransportPutTrainedModelAction.parseInferenceConfigFromModelPackage( Collections.singletonMap(inferenceConfig.getWriteableName(), inferenceConfigMap), - xContentRegistry(), - LoggingDeprecationHandler.INSTANCE + xContentRegistry() ); assertEquals(inferenceConfig, parsedInferenceConfig); @@ -278,7 +275,6 @@ private TransportPutTrainedModelAction createTransportPutTrainedModelAction() { doReturn(threadPool).when(mockClient).threadPool(); return new TransportPutTrainedModelAction( - Settings.EMPTY, mockTransportService, mockClusterService, threadPool, diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointAggregatorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointAggregatorTests.java index 442641db8c4ed..e12baeab68f7b 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointAggregatorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointAggregatorTests.java @@ -7,8 +7,11 @@ package org.elasticsearch.xpack.ml.aggs.changepoint; +import org.apache.commons.math3.distribution.GammaDistribution; import org.apache.commons.math3.distribution.NormalDistribution; import org.apache.commons.math3.random.RandomGeneratorFactory; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.tests.index.RandomIndexWriter; @@ -34,10 +37,15 @@ import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.lessThan; +import static org.junit.Assert.assertThat; public class ChangePointAggregatorTests extends AggregatorTestCase { + private static final Logger logger = LogManager.getLogger(ChangePointAggregator.class); + @Override protected List getSearchPlugins() { return List.of(MachineLearningTests.createTrialLicensedMachineLearning(Settings.EMPTY)); @@ -47,7 +55,197 @@ protected List getSearchPlugins() { private static final String NUMERIC_FIELD_NAME = "value"; private static final String TIME_FIELD_NAME = "timestamp"; - public void testNoChange() throws IOException { + public void testStationaryFalsePositiveRate() throws IOException { + NormalDistribution normal = new NormalDistribution(RandomGeneratorFactory.createRandomGenerator(Randomness.get()), 0, 2); + int fp = 0; + for (int i = 0; i < 100; i++) { + double[] bucketValues = DoubleStream.generate(() -> 10 + normal.sample()).limit(40).toArray(); + ChangePointAggregator.TestStats test = ChangePointAggregator.testForChange(bucketValues, 1e-3); + fp += test.type() == ChangePointAggregator.Type.STATIONARY ? 0 : 1; + } + assertThat(fp, lessThan(5)); + + fp = 0; + GammaDistribution gamma = new GammaDistribution(RandomGeneratorFactory.createRandomGenerator(Randomness.get()), 1, 2); + for (int i = 0; i < 100; i++) { + double[] bucketValues = DoubleStream.generate(() -> gamma.sample()).limit(40).toArray(); + ChangePointAggregator.TestStats test = ChangePointAggregator.testForChange(bucketValues, 1e-3); + fp += test.type() == ChangePointAggregator.Type.STATIONARY ? 0 : 1; + } + assertThat(fp, lessThan(5)); + } + + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103848") + public void testSampledDistributionTestFalsePositiveRate() throws IOException { + NormalDistribution normal = new NormalDistribution(RandomGeneratorFactory.createRandomGenerator(Randomness.get()), 0.0, 1.0); + int fp = 0; + for (int i = 0; i < 100; i++) { + double[] bucketValues = DoubleStream.generate(() -> 10 + normal.sample()).limit(5000).toArray(); + ChangePointAggregator.TestStats test = ChangePointAggregator.testForChange(bucketValues, 0.05); + fp += test.type() == ChangePointAggregator.Type.STATIONARY ? 0 : 1; + } + assertThat(fp, lessThan(5)); + } + + public void testNonStationaryFalsePositiveRate() throws IOException { + NormalDistribution normal = new NormalDistribution(RandomGeneratorFactory.createRandomGenerator(Randomness.get()), 0, 2); + int fp = 0; + for (int i = 0; i < 100; i++) { + AtomicInteger j = new AtomicInteger(); + double[] bucketValues = DoubleStream.generate(() -> j.incrementAndGet() + normal.sample()).limit(40).toArray(); + ChangePointAggregator.TestStats test = ChangePointAggregator.testForChange(bucketValues, 1e-3); + fp += test.type() == ChangePointAggregator.Type.NON_STATIONARY ? 0 : 1; + } + assertThat(fp, lessThan(5)); + + fp = 0; + GammaDistribution gamma = new GammaDistribution(RandomGeneratorFactory.createRandomGenerator(Randomness.get()), 1, 2); + for (int i = 0; i < 100; i++) { + AtomicInteger j = new AtomicInteger(); + double[] bucketValues = DoubleStream.generate(() -> j.incrementAndGet() + gamma.sample()).limit(40).toArray(); + ChangePointAggregator.TestStats test = ChangePointAggregator.testForChange(bucketValues, 1e-3); + fp += test.type() == ChangePointAggregator.Type.NON_STATIONARY ? 0 : 1; + } + assertThat(fp, lessThan(5)); + } + + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103847") + public void testStepChangePower() throws IOException { + NormalDistribution normal = new NormalDistribution(RandomGeneratorFactory.createRandomGenerator(Randomness.get()), 0, 2); + int tp = 0; + for (int i = 0; i < 100; i++) { + double[] bucketValues = DoubleStream.concat( + DoubleStream.generate(() -> normal.sample()).limit(20), + DoubleStream.generate(() -> 10 + normal.sample()).limit(20) + ).toArray(); + ChangePointAggregator.TestStats test = ChangePointAggregator.testForChange(bucketValues, 0.05); + tp += test.type() == ChangePointAggregator.Type.STEP_CHANGE ? 1 : 0; + } + assertThat(tp, greaterThan(90)); + + tp = 0; + GammaDistribution gamma = new GammaDistribution(RandomGeneratorFactory.createRandomGenerator(Randomness.get()), 1, 2); + for (int i = 0; i < 100; i++) { + double[] bucketValues = DoubleStream.concat( + DoubleStream.generate(() -> gamma.sample()).limit(20), + DoubleStream.generate(() -> 10 + gamma.sample()).limit(20) + ).toArray(); + ChangePointAggregator.TestStats test = ChangePointAggregator.testForChange(bucketValues, 0.05); + tp += test.type() == ChangePointAggregator.Type.STEP_CHANGE ? 1 : 0; + } + assertThat(tp, greaterThan(90)); + } + + public void testTrendChangePower() throws IOException { + NormalDistribution normal = new NormalDistribution(RandomGeneratorFactory.createRandomGenerator(Randomness.get()), 0, 2); + int tp = 0; + for (int i = 0; i < 100; i++) { + AtomicInteger j = new AtomicInteger(); + double[] bucketValues = DoubleStream.concat( + DoubleStream.generate(() -> j.incrementAndGet() + normal.sample()).limit(20), + DoubleStream.generate(() -> 2.0 * j.incrementAndGet() + normal.sample()).limit(20) + ).toArray(); + ChangePointAggregator.TestStats test = ChangePointAggregator.testForChange(bucketValues, 0.05); + tp += test.type() == ChangePointAggregator.Type.TREND_CHANGE ? 1 : 0; + } + assertThat(tp, greaterThan(90)); + + tp = 0; + GammaDistribution gamma = new GammaDistribution(RandomGeneratorFactory.createRandomGenerator(Randomness.get()), 1, 2); + for (int i = 0; i < 100; i++) { + AtomicInteger j = new AtomicInteger(); + double[] bucketValues = DoubleStream.concat( + DoubleStream.generate(() -> j.incrementAndGet() + gamma.sample()).limit(20), + DoubleStream.generate(() -> 2.0 * j.incrementAndGet() + gamma.sample()).limit(20) + ).toArray(); + ChangePointAggregator.TestStats test = ChangePointAggregator.testForChange(bucketValues, 0.05); + tp += test.type() == ChangePointAggregator.Type.TREND_CHANGE ? 1 : 0; + } + assertThat(tp, greaterThan(90)); + } + + public void testDistributionChangeTestPower() throws IOException { + NormalDistribution normal1 = new NormalDistribution(RandomGeneratorFactory.createRandomGenerator(Randomness.get()), 0.0, 1.0); + NormalDistribution normal2 = new NormalDistribution(RandomGeneratorFactory.createRandomGenerator(Randomness.get()), 0.0, 10.0); + int tp = 0; + for (int i = 0; i < 100; i++) { + double[] bucketValues = DoubleStream.concat( + DoubleStream.generate(() -> 10 + normal1.sample()).limit(50), + DoubleStream.generate(() -> 10 + normal2.sample()).limit(50) + ).toArray(); + ChangePointAggregator.TestStats test = ChangePointAggregator.testForChange(bucketValues, 0.05); + tp += test.type() == ChangePointAggregator.Type.DISTRIBUTION_CHANGE ? 1 : 0; + } + assertThat(tp, greaterThan(90)); + } + + public void testMultipleChanges() throws IOException { + NormalDistribution normal1 = new NormalDistribution(RandomGeneratorFactory.createRandomGenerator(Randomness.get()), 78.0, 3.0); + NormalDistribution normal2 = new NormalDistribution(RandomGeneratorFactory.createRandomGenerator(Randomness.get()), 40.0, 6.0); + NormalDistribution normal3 = new NormalDistribution(RandomGeneratorFactory.createRandomGenerator(Randomness.get()), 1.0, 0.3); + int tp = 0; + for (int i = 0; i < 100; i++) { + double[] bucketValues = DoubleStream.concat( + DoubleStream.concat( + DoubleStream.generate(() -> normal1.sample()).limit(7), + DoubleStream.generate(() -> normal2.sample()).limit(6) + ), + DoubleStream.generate(() -> normal3.sample()).limit(23) + ).toArray(); + ChangePointAggregator.TestStats result = ChangePointAggregator.testForChange(bucketValues, 0.05); + tp += result.type() == ChangePointAggregator.Type.TREND_CHANGE ? 1 : 0; + } + assertThat(tp, greaterThan(90)); + } + + public void testProblemDistributionChange() throws IOException { + double[] bucketValues = new double[] { + 546.3651753325270, + 550.872738079514, + 551.1312487618040, + 550.3323904749380, + 549.2652495378930, + 548.9761274963630, + 549.3433969743010, + 549.0935313531350, + 551.1762550747600, + 551.3772184469220, + 548.6163495094490, + 548.5866591594080, + 546.9364791288570, + 548.1167839989470, + 549.3484016149320, + 550.4242803917040, + 551.2316023050940, + 548.4713993534340, + 546.0254901960780, + 548.4376996805110, + 561.1920529801320, + 557.3930041152260, + 565.8497217068650, + 566.787072243346, + 546.6094890510950, + 530.5905797101450, + 556.7340823970040, + 557.3857677902620, + 543.0754716981130, + 574.3297101449280, + 559.2962962962960, + 549.5202952029520, + 531.7217741935480, + 551.4333333333330, + 557.637168141593, + 545.1880733944950, + 564.6893203883500, + 543.0204081632650, + 571.820809248555, + 541.2589928057550, + 520.4387755102040 }; + ChangePointAggregator.TestStats result = ChangePointAggregator.testForChange(bucketValues, 0.05); + assertThat(result.type(), equalTo(ChangePointAggregator.Type.DISTRIBUTION_CHANGE)); + } + + public void testConstant() throws IOException { double[] bucketValues = DoubleStream.generate(() -> 10).limit(100).toArray(); testChangeType( bucketValues, @@ -55,6 +253,7 @@ public void testNoChange() throws IOException { ); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103926") public void testSlopeUp() throws IOException { NormalDistribution normal = new NormalDistribution(RandomGeneratorFactory.createRandomGenerator(Randomness.get()), 0, 2); AtomicInteger i = new AtomicInteger(); @@ -137,14 +336,22 @@ public void testStepChange() throws IOException { DoubleStream.generate(() -> 30 + normal.sample()).limit(20) ).toArray(); testChangeType(bucketValues, changeType -> { - assertThat(Arrays.toString(bucketValues), changeType, instanceOf(ChangeType.StepChange.class)); + assertThat( + Arrays.toString(bucketValues), + changeType, + anyOf( + // Due to the random nature of the values generated, either of these could be detected + instanceOf(ChangeType.StepChange.class), + instanceOf(ChangeType.TrendChange.class) + ) + ); assertThat(changeType.changePoint(), equalTo(20)); }); } public void testDistributionChange() throws IOException { - NormalDistribution first = new NormalDistribution(RandomGeneratorFactory.createRandomGenerator(Randomness.get()), 50, 1); - NormalDistribution second = new NormalDistribution(RandomGeneratorFactory.createRandomGenerator(Randomness.get()), 50, 5); + NormalDistribution first = new NormalDistribution(RandomGeneratorFactory.createRandomGenerator(Randomness.get()), 0, 1); + NormalDistribution second = new NormalDistribution(RandomGeneratorFactory.createRandomGenerator(Randomness.get()), 0, 5); double[] bucketValues = DoubleStream.concat( DoubleStream.generate(first::sample).limit(50), DoubleStream.generate(second::sample).limit(50) @@ -168,8 +375,14 @@ public void testDistributionChange() throws IOException { } public void testZeroDeviation() throws IOException { - double[] bucketValues = DoubleStream.generate(() -> 4243.1621621621625).limit(30).toArray(); - testChangeType(bucketValues, changeType -> { assertThat(changeType, instanceOf(ChangeType.Stationary.class)); }); + { + double[] bucketValues = DoubleStream.generate(() -> 4243.1621621621625).limit(30).toArray(); + testChangeType(bucketValues, changeType -> { assertThat(changeType, instanceOf(ChangeType.Stationary.class)); }); + } + { + double[] bucketValues = DoubleStream.generate(() -> -4243.1621621621625).limit(30).toArray(); + testChangeType(bucketValues, changeType -> { assertThat(changeType, instanceOf(ChangeType.Stationary.class)); }); + } } public void testStepChangeEdgeCaseScenarios() throws IOException { @@ -203,6 +416,158 @@ public void testStepChangeEdgeCaseScenarios() throws IOException { }); } + public void testSpikeSelectionVsChange() throws IOException { + double[] bucketValues = new double[] { + 3443.0, + 3476.0, + 3466.0, + 3567.0, + 3658.0, + 3445.0, + 3523.0, + 3477.0, + 3585.0, + 3645.0, + 3371.0, + 3361.0, + 3542.0, + 3471.0, + 3511.0, + 3485.0, + 3400.0, + 3386.0, + 3405.0, + 3387.0, + 3523.0, + 3492.0, + 3543.0, + 3374.0, + 3327.0, + 3320.0, + 3432.0, + 3413.0, + 3439.0, + 3378.0, + 3595.0, + 3364.0, + 3461.0, + 3418.0, + 3410.0, + 3410.0, + 3429.0, + 3504.0, + 3485.0, + 3514.0, + 3413.0, + 3482.0, + 3390.0, + 3337.0, + 3548.0, + 3446.0, + 3409.0, + 3359.0, + 3358.0, + 3543.0, + 3441.0, + 3545.0, + 3491.0, + 3424.0, + 3375.0, + 3413.0, + 3403.0, + 3500.0, + 3415.0, + 3453.0, + 3404.0, + 3466.0, + 3448.0, + 3603.0, + 3479.0, + 3295.0, + 3322.0, + 3445.0, + 3482.0, + 3393.0, + 3520.0, + 3413.0, + 7568.0, + 4747.0, + 3386.0, + 3406.0, + 3444.0, + 3494.0, + 3375.0, + 3305.0, + 3434.0, + 3429.0, + 3867.0, + 5147.0, + 3560.0, + 3359.0, + 3347.0, + 3391.0, + 3338.0, + 3278.0, + 3251.0, + 3373.0, + 3450.0, + 3356.0, + 3285.0, + 3357.0, + 3338.0, + 3361.0, + 3400.0, + 3281.0, + 3346.0, + 3345.0, + 3380.0, + 3383.0, + 3405.0, + 3308.0, + 3286.0, + 3356.0, + 3384.0, + 3326.0, + 3441.0, + 3445.0, + 3377.0, + 3379.0, + 3473.0, + 3366.0, + 3317.0, + 3352.0, + 3267.0, + 3345.0, + 3465.0, + 3309.0, + 3455.0, + 3379.0, + 3305.0, + 3287.0, + 3442.0, + 3389.0, + 3365.0, + 3442.0, + 3339.0, + 3298.0, + 3348.0, + 3377.0, + 3371.0, + 3428.0, + 3460.0, + 3376.0, + 3306.0, + 3300.0, + 3404.0, + 3469.0, + 3393.0, + 3302.0 }; + testChangeType(bucketValues, changeType -> { + assertThat(changeType, instanceOf(ChangeType.Spike.class)); + assertThat(Arrays.toString(bucketValues), changeType.changePoint(), equalTo(72)); + }); + } + void testChangeType(double[] bucketValues, Consumer changeTypeAssertions) throws IOException { FilterAggregationBuilder dummy = AggregationBuilders.filter("dummy", new MatchAllQueryBuilder()) .subAggregation( diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDeciderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDeciderTests.java index 0b3851012d0e8..a916900b199ce 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDeciderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDeciderTests.java @@ -61,6 +61,8 @@ import java.util.function.LongSupplier; import static java.lang.Math.min; +import static org.elasticsearch.test.hamcrest.OptionalMatchers.isEmpty; +import static org.elasticsearch.test.hamcrest.OptionalMatchers.isPresent; import static org.elasticsearch.xpack.ml.MachineLearning.MACHINE_MEMORY_NODE_ATTR; import static org.elasticsearch.xpack.ml.MachineLearning.MAX_JVM_SIZE_NODE_ATTR; import static org.elasticsearch.xpack.ml.MachineLearning.NATIVE_EXECUTABLE_CODE_OVERHEAD; @@ -72,7 +74,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.mockito.ArgumentMatchers.any; @@ -978,7 +979,7 @@ public void testScaleDown() { ByteSizeValue.ofGb(5).getBytes() - PER_NODE_OVERHEAD ) ); - assertThat(result.isEmpty(), is(false)); + assertThat(result, isPresent()); MlMemoryAutoscalingCapacity deciderResult = result.get(); // Four times due to 25% ML memory assertThat(deciderResult.nodeSize().getBytes(), equalTo(4 * ByteSizeValue.ofGb(1).getBytes())); @@ -1013,7 +1014,7 @@ public void testScaleDown() { ByteSizeValue.ofGb(1).getBytes() - PER_NODE_OVERHEAD ) ); - assertThat(result.isEmpty(), is(false)); + assertThat(result, isPresent()); MlMemoryAutoscalingCapacity deciderResult = result.get(); // Four times due to 25% ML memory assertThat(deciderResult.nodeSize().getBytes(), equalTo(4 * ByteSizeValue.ofMb(100).getBytes())); @@ -1048,7 +1049,7 @@ public void testScaleDown() { ByteSizeValue.ofMb(100).getBytes() - PER_NODE_OVERHEAD ) ); - assertThat(result.isEmpty(), is(true)); + assertThat(result, isEmpty()); } } @@ -1210,7 +1211,7 @@ public void testFutureAvailableCapacity() { Collection mlNodesInCluster = clusterState.getNodes().getNodes().values(); Optional nativeMemoryCapacity = decider.calculateFutureAvailableCapacity(mlNodesInCluster, clusterState); - assertThat(nativeMemoryCapacity.isEmpty(), is(false)); + assertThat(nativeMemoryCapacity, isPresent()); assertThat(nativeMemoryCapacity.get().getNodeMlNativeMemoryRequirementExcludingOverhead(), greaterThanOrEqualTo(TEST_JOB_SIZE)); assertThat( nativeMemoryCapacity.get().getNodeMlNativeMemoryRequirementExcludingOverhead(), diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlProcessorAutoscalingDeciderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlProcessorAutoscalingDeciderTests.java index a56ad515690cf..97fd66e284010 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlProcessorAutoscalingDeciderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlProcessorAutoscalingDeciderTests.java @@ -25,8 +25,8 @@ import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingInfo; import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingState; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.MachineLearning; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.junit.Before; import java.util.Map; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorTests.java index b0b391f92b527..12ce45a186d62 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorTests.java @@ -576,8 +576,7 @@ private SearchResponse createSearchResponse(long totalHits, long earliestTime, l private SearchResponse createNullSearchResponse() { SearchResponse searchResponse = mock(SearchResponse.class); when(searchResponse.status()).thenReturn(RestStatus.OK); - SearchHit[] hits = new SearchHit[0]; - SearchHits searchHits = new SearchHits(hits, new TotalHits(0, TotalHits.Relation.EQUAL_TO), 1); + SearchHits searchHits = SearchHits.empty(new TotalHits(0, TotalHits.Relation.EQUAL_TO), 1); when(searchResponse.getHits()).thenReturn(searchHits); List aggs = new ArrayList<>(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java index 7ffb3231331a0..bf7aa465ee604 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java @@ -546,7 +546,7 @@ private SearchResponse createSearchResponse(List timestamps, List hit.addDocumentFields(fields, Map.of()); hits.add(hit); } - SearchHits searchHits = new SearchHits(hits.toArray(new SearchHit[0]), new TotalHits(hits.size(), TotalHits.Relation.EQUAL_TO), 1); + SearchHits searchHits = new SearchHits(hits.toArray(SearchHits.EMPTY), new TotalHits(hits.size(), TotalHits.Relation.EQUAL_TO), 1); when(searchResponse.getHits()).thenReturn(searchHits); when(searchResponse.getTook()).thenReturn(TimeValue.timeValueMillis(randomNonNegativeLong())); return searchResponse; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/DestinationIndexTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/DestinationIndexTests.java index d90696761e668..998edd6044bab 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/DestinationIndexTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/DestinationIndexTests.java @@ -14,8 +14,8 @@ import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsAction; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.action.admin.indices.mapping.put.TransportPutMappingAction; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsAction; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; @@ -615,7 +615,7 @@ private Map testUpdateMappingsToDestIndex(DataFrameAnalysis anal ArgumentCaptor fieldCapabilitiesRequestCaptor = ArgumentCaptor.forClass(FieldCapabilitiesRequest.class); doAnswer(callListenerOnResponse(AcknowledgedResponse.TRUE)).when(client) - .execute(eq(PutMappingAction.INSTANCE), putMappingRequestCaptor.capture(), any()); + .execute(eq(TransportPutMappingAction.TYPE), putMappingRequestCaptor.capture(), any()); FieldCapabilitiesResponse fieldCapabilitiesResponse = new FieldCapabilitiesResponse(new String[0], new HashMap<>() { { @@ -638,7 +638,7 @@ private Map testUpdateMappingsToDestIndex(DataFrameAnalysis anal verify(client, atLeastOnce()).threadPool(); verify(client, atMost(1)).execute(eq(TransportFieldCapabilitiesAction.TYPE), any(), any()); - verify(client).execute(eq(PutMappingAction.INSTANCE), any(), any()); + verify(client).execute(eq(TransportPutMappingAction.TYPE), any(), any()); verifyNoMoreInteractions(client); PutMappingRequest putMappingRequest = putMappingRequestCaptor.getValue(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorTests.java index b9fc08349fffe..7bc3d507ecf22 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorTests.java @@ -49,6 +49,7 @@ import java.util.function.Function; import java.util.stream.Collectors; +import static org.elasticsearch.test.hamcrest.OptionalMatchers.isEmpty; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; @@ -134,7 +135,7 @@ public void testTwoPageExtraction() throws IOException { // Third batch should return empty rows = dataExtractor.next(); - assertThat(rows.isEmpty(), is(true)); + assertThat(rows, isEmpty()); assertThat(dataExtractor.hasNext(), is(false)); // Now let's assert we're sending the expected search requests @@ -223,7 +224,7 @@ public void testRecoveryFromErrorOnSearch() throws IOException { // Next batch should return empty rows = dataExtractor.next(); - assertThat(rows.isEmpty(), is(true)); + assertThat(rows, isEmpty()); assertThat(dataExtractor.hasNext(), is(false)); // Notice we've done 4 searches @@ -267,7 +268,7 @@ public void testIncludeSourceIsFalseAndNoSourceFields() throws IOException { assertThat(rows.get().get(0).getValues(), equalTo(new String[] { "11", "21" })); assertThat(dataExtractor.hasNext(), is(true)); - assertThat(dataExtractor.next().isEmpty(), is(true)); + assertThat(dataExtractor.next(), isEmpty()); assertThat(dataExtractor.hasNext(), is(false)); assertThat(dataExtractor.capturedSearchRequests.size(), equalTo(2)); @@ -302,7 +303,7 @@ public void testIncludeSourceIsFalseAndAtLeastOneSourceField() throws IOExceptio assertThat(rows.get().get(0).getValues(), equalTo(new String[] { "11", "21" })); assertThat(dataExtractor.hasNext(), is(true)); - assertThat(dataExtractor.next().isEmpty(), is(true)); + assertThat(dataExtractor.next(), isEmpty()); assertThat(dataExtractor.hasNext(), is(false)); assertThat(dataExtractor.capturedSearchRequests.size(), equalTo(2)); @@ -380,7 +381,7 @@ public void testMissingValues_GivenSupported() throws IOException { // Third batch should return empty rows = dataExtractor.next(); - assertThat(rows.isEmpty(), is(true)); + assertThat(rows, isEmpty()); assertThat(dataExtractor.hasNext(), is(false)); } @@ -414,7 +415,7 @@ public void testMissingValues_GivenNotSupported() throws IOException { // Third batch should return empty rows = dataExtractor.next(); - assertThat(rows.isEmpty(), is(true)); + assertThat(rows, isEmpty()); assertThat(dataExtractor.hasNext(), is(false)); } @@ -652,7 +653,7 @@ private SearchResponse createSearchResponse(List field1Values, List nodes = List.of(new Node("n_1", scaleNodeSize(50), 4)); Deployment deployment = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(51).getBytes(), 4, 1, Map.of(), 0, 0, 0); AssignmentPlan plan = new AssignmentPlanner(nodes, List.of(deployment)).computePlan(); - assertThat(plan.assignments(deployment).isEmpty(), is(true)); + assertThat(plan.assignments(deployment), isEmpty()); } { // With perDeploymentMemory and perAllocationMemory specified List nodes = List.of(new Node("n_1", scaleNodeSize(55), 4)); @@ -58,7 +59,7 @@ public void testModelThatDoesNotFitInMemory() { ByteSizeValue.ofMb(51).getBytes() ); AssignmentPlan plan = new AssignmentPlanner(nodes, List.of(deployment)).computePlan(); - assertThat(plan.assignments(deployment).isEmpty(), is(true)); + assertThat(plan.assignments(deployment), isEmpty()); } } @@ -66,7 +67,7 @@ public void testModelWithThreadsPerAllocationNotFittingOnAnyNode() { List nodes = List.of(new Node("n_1", scaleNodeSize(100), 4), new Node("n_2", scaleNodeSize(100), 5)); Deployment deployment = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(1).getBytes(), 1, 6, Map.of(), 0, 0, 0); AssignmentPlan plan = new AssignmentPlanner(nodes, List.of(deployment)).computePlan(); - assertThat(plan.assignments(deployment).isEmpty(), is(true)); + assertThat(plan.assignments(deployment), isEmpty()); } public void testSingleModelThatFitsFullyOnSingleNode() { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveAllAllocationsTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveAllAllocationsTests.java index c45ce36394109..7f83df5835494 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveAllAllocationsTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveAllAllocationsTests.java @@ -15,9 +15,10 @@ import java.util.List; import java.util.Map; +import static org.elasticsearch.test.hamcrest.OptionalMatchers.isEmpty; +import static org.elasticsearch.test.hamcrest.OptionalMatchers.isPresentWith; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.is; public class PreserveAllAllocationsTests extends ESTestCase { @@ -89,12 +90,12 @@ public void testGivenPreviousAssignments() { AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(deployment1, deployment2)) .assignModelToNode(deployment1, node1, 2) .build(); - assertThat(plan.assignments(deployment1).get(), equalTo(Map.of(node1, 2))); - assertThat(plan.assignments(deployment2).isEmpty(), is(true)); + assertThat(plan.assignments(deployment1), isPresentWith(Map.of(node1, 2))); + assertThat(plan.assignments(deployment2), isEmpty()); plan = preserveAllAllocations.mergePreservedAllocations(plan); - assertThat(plan.assignments(deployment1).get(), equalTo(Map.of(node1, 3))); - assertThat(plan.assignments(deployment2).get(), equalTo(Map.of(node1, 1, node2, 2))); + assertThat(plan.assignments(deployment1), isPresentWith(Map.of(node1, 3))); + assertThat(plan.assignments(deployment2), isPresentWith(Map.of(node1, 1, node2, 2))); // Node 1 already had deployments 1 and 2 assigned to it so adding more allocation doesn't change memory usage. assertThat(plan.getRemainingNodeMemory("n_1"), equalTo(0L)); @@ -174,12 +175,12 @@ public void testGivenPreviousAssignments() { AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(deployment1, deployment2)) .assignModelToNode(deployment1, node1, 2) .build(); - assertThat(plan.assignments(deployment1).get(), equalTo(Map.of(node1, 2))); - assertThat(plan.assignments(deployment2).isEmpty(), is(true)); + assertThat(plan.assignments(deployment1), isPresentWith(Map.of(node1, 2))); + assertThat(plan.assignments(deployment2), isEmpty()); plan = preserveAllAllocations.mergePreservedAllocations(plan); - assertThat(plan.assignments(deployment1).get(), equalTo(Map.of(node1, 3))); - assertThat(plan.assignments(deployment2).get(), equalTo(Map.of(node1, 1, node2, 2))); + assertThat(plan.assignments(deployment1), isPresentWith(Map.of(node1, 3))); + assertThat(plan.assignments(deployment2), isPresentWith(Map.of(node1, 1, node2, 2))); // 1000 - ((30 + 300 + 3*10) + (50 + 300 + 10)) = 280 : deployments use 720 MB on the node 1 assertThat(plan.getRemainingNodeMemory("n_1"), equalTo(ByteSizeValue.ofMb(280).getBytes())); @@ -198,11 +199,10 @@ public void testGivenModelWithPreviousAssignments_AndPlanToMergeHasNoAssignments PreserveAllAllocations preserveAllAllocations = new PreserveAllAllocations(List.of(node), List.of(deployment)); AssignmentPlan plan = AssignmentPlan.builder(List.of(node), List.of(deployment)).build(); - assertThat(plan.assignments(deployment).isEmpty(), is(true)); + assertThat(plan.assignments(deployment), isEmpty()); plan = preserveAllAllocations.mergePreservedAllocations(plan); - assertThat(plan.assignments(deployment).isPresent(), is(true)); - assertThat(plan.assignments(deployment).get(), equalTo(Map.of(node, 2))); + assertThat(plan.assignments(deployment), isPresentWith(Map.of(node, 2))); assertThat(plan.getRemainingNodeMemory("n_1"), equalTo(ByteSizeValue.ofMb(100).getBytes())); assertThat(plan.getRemainingNodeCores("n_1"), equalTo(0)); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveOneAllocationTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveOneAllocationTests.java index f646bf5cb2e9d..d2907eb31160b 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveOneAllocationTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveOneAllocationTests.java @@ -15,10 +15,11 @@ import java.util.List; import java.util.Map; +import static org.elasticsearch.test.hamcrest.OptionalMatchers.isEmpty; +import static org.elasticsearch.test.hamcrest.OptionalMatchers.isPresentWith; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.is; public class PreserveOneAllocationTests extends ESTestCase { @@ -202,11 +203,10 @@ public void testGivenModelWithPreviousAssignments_AndPlanToMergeHasNoAssignments PreserveOneAllocation preserveOneAllocation = new PreserveOneAllocation(List.of(node), List.of(deployment)); AssignmentPlan plan = AssignmentPlan.builder(List.of(node), List.of(deployment)).build(); - assertThat(plan.assignments(deployment).isEmpty(), is(true)); + assertThat(plan.assignments(deployment), isEmpty()); plan = preserveOneAllocation.mergePreservedAllocations(plan); - assertThat(plan.assignments(deployment).isPresent(), is(true)); - assertThat(plan.assignments(deployment).get(), equalTo(Map.of(node, 1))); + assertThat(plan.assignments(deployment), isPresentWith(Map.of(node, 1))); // 400 - (30*2 + 240) = 100 : deployments use 300MB on the node assertThat(plan.getRemainingNodeMemory("n_1"), equalTo(ByteSizeValue.ofMb(100).getBytes())); assertThat(plan.getRemainingNodeCores("n_1"), equalTo(2)); @@ -227,11 +227,10 @@ public void testGivenModelWithPreviousAssignments_AndPlanToMergeHasNoAssignments PreserveOneAllocation preserveOneAllocation = new PreserveOneAllocation(List.of(node), List.of(deployment)); AssignmentPlan plan = AssignmentPlan.builder(List.of(node), List.of(deployment)).build(); - assertThat(plan.assignments(deployment).isEmpty(), is(true)); + assertThat(plan.assignments(deployment), isEmpty()); plan = preserveOneAllocation.mergePreservedAllocations(plan); - assertThat(plan.assignments(deployment).isPresent(), is(true)); - assertThat(plan.assignments(deployment).get(), equalTo(Map.of(node, 1))); + assertThat(plan.assignments(deployment), isPresentWith(Map.of(node, 1))); // 400 - (30 + 300 + 10) = 60 : deployments use 340MB on the node assertThat(plan.getRemainingNodeMemory("n_1"), equalTo(ByteSizeValue.ofMb(60).getBytes())); assertThat(plan.getRemainingNodeCores("n_1"), equalTo(2)); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingServiceTests.java index 2f4640cfa38dc..40b0dd519f7d8 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingServiceTests.java @@ -39,6 +39,7 @@ import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction; +import org.elasticsearch.xpack.core.ml.inference.ModelAliasMetadata; import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; import org.elasticsearch.xpack.core.ml.inference.TrainedModelInput; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ClassificationConfig; @@ -46,7 +47,6 @@ import org.elasticsearch.xpack.core.ml.inference.trainedmodel.inference.InferenceDefinition; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.ml.MachineLearning; -import org.elasticsearch.xpack.ml.inference.ModelAliasMetadata; import org.elasticsearch.xpack.ml.inference.TrainedModelStatsService; import org.elasticsearch.xpack.ml.inference.ingest.InferenceProcessor; import org.elasticsearch.xpack.ml.inference.persistence.TrainedModelProvider; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/NodeLoadDetectorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/NodeLoadDetectorTests.java index ccc7f14d2264e..fef9b07429702 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/NodeLoadDetectorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/NodeLoadDetectorTests.java @@ -21,9 +21,9 @@ import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingInfo; import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingState; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.core.ml.job.config.JobState; import org.elasticsearch.xpack.ml.MachineLearning; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.job.task.OpenJobPersistentTasksExecutorTests; import org.elasticsearch.xpack.ml.process.MlMemoryTracker; import org.junit.Before; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProviderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProviderTests.java index 39f02f71642ed..8179a97955a57 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProviderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProviderTests.java @@ -927,7 +927,7 @@ private static SearchResponse createSearchResponse(List> sou list.add(hit); } - SearchHits hits = new SearchHits(list.toArray(new SearchHit[0]), new TotalHits(source.size(), TotalHits.Relation.EQUAL_TO), 1); + SearchHits hits = new SearchHits(list.toArray(SearchHits.EMPTY), new TotalHits(source.size(), TotalHits.Relation.EQUAL_TO), 1); when(response.getHits()).thenReturn(hits); return response; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java index aff3f006b1a8a..7a314b82024be 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -53,6 +54,7 @@ import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndexFields; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats.AssignmentMemoryBasis; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.Quantiles; import org.elasticsearch.xpack.ml.MachineLearning; @@ -815,6 +817,35 @@ public void testCreate_givenNonZeroCountsAndNoModelSnapshotNorQuantiles() { verifyNoMoreInteractions(auditor); } + public void testGetOpenProcessMemoryUsage() { + modelSnapshot = null; + quantiles = null; + dataCounts = new DataCounts("foo"); + dataCounts.setLatestRecordTimeStamp(new Date(0L)); + dataCounts.incrementProcessedRecordCount(42L); + long modelMemoryLimitBytes = ByteSizeValue.ofMb(randomIntBetween(10, 1000)).getBytes(); + long peakModelBytes = randomLongBetween(100000, modelMemoryLimitBytes - 1); + long modelBytes = randomLongBetween(1, peakModelBytes - 1); + AssignmentMemoryBasis assignmentMemoryBasis = randomFrom(AssignmentMemoryBasis.values()); + modelSizeStats = new ModelSizeStats.Builder("foo").setModelBytesMemoryLimit(modelMemoryLimitBytes) + .setPeakModelBytes(peakModelBytes) + .setModelBytes(modelBytes) + .setAssignmentMemoryBasis(assignmentMemoryBasis) + .build(); + when(autodetectCommunicator.getModelSizeStats()).thenReturn(modelSizeStats); + AutodetectProcessManager manager = createSpyManager(); + JobTask jobTask = mock(JobTask.class); + when(jobTask.getJobId()).thenReturn("foo"); + manager.openJob(jobTask, clusterState, DEFAULT_MASTER_NODE_TIMEOUT, (e, b) -> {}); + + long expectedSizeBytes = Job.PROCESS_MEMORY_OVERHEAD.getBytes() + switch (assignmentMemoryBasis) { + case MODEL_MEMORY_LIMIT -> modelMemoryLimitBytes; + case CURRENT_MODEL_BYTES -> modelBytes; + case PEAK_MODEL_BYTES -> peakModelBytes; + }; + assertThat(manager.getOpenProcessMemoryUsage(), equalTo(ByteSizeValue.ofBytes(expectedSizeBytes))); + } + private AutodetectProcessManager createNonSpyManager(String jobId) { ExecutorService executorService = mock(ExecutorService.class); when(threadPool.executor(anyString())).thenReturn(executorService); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectResultProcessorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectResultProcessorTests.java index ed050a99cd16d..cc4491ff5fffc 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectResultProcessorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectResultProcessorTests.java @@ -63,6 +63,7 @@ import java.util.List; import java.util.concurrent.ScheduledThreadPoolExecutor; +import static org.elasticsearch.common.util.concurrent.EsExecutors.DIRECT_EXECUTOR_SERVICE; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; @@ -102,6 +103,7 @@ public void setUpMocks() { executor = new Scheduler.SafeScheduledThreadPoolExecutor(1); client = mock(Client.class); ThreadPool threadPool = mock(ThreadPool.class); + when(threadPool.executor(any())).thenReturn(DIRECT_EXECUTOR_SERVICE); when(client.threadPool()).thenReturn(threadPool); when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); auditor = mock(AnomalyDetectionAuditor.class); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/RetryableUpdateModelSnapshotActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/RetryableUpdateModelSnapshotActionTests.java new file mode 100644 index 0000000000000..a073da32f3085 --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/RetryableUpdateModelSnapshotActionTests.java @@ -0,0 +1,172 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.job.process.autodetect.output; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.ml.action.PutJobAction; +import org.elasticsearch.xpack.core.ml.action.UpdateJobAction; +import org.elasticsearch.xpack.core.ml.job.config.JobUpdate; +import org.junit.Before; + +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; + +import static org.elasticsearch.common.util.concurrent.EsExecutors.DIRECT_EXECUTOR_SERVICE; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class RetryableUpdateModelSnapshotActionTests extends ESTestCase { + + private static final String JOB_ID = "valid_id"; + + private Client client; + + private ThreadPool threadPool; + + @Before + public void setUpMocks() { + client = mock(Client.class); + threadPool = mock(ThreadPool.class); + when(threadPool.executor(any())).thenReturn(DIRECT_EXECUTOR_SERVICE); + doAnswer(invocationOnMock -> { + Runnable runnable = (Runnable) invocationOnMock.getArguments()[0]; + runnable.run(); + return null; + }).when(threadPool).schedule(any(), any(), any()); + when(client.threadPool()).thenReturn(threadPool); + when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); + } + + public void testFirstTimeSuccess() { + + PutJobAction.Response response = mock(PutJobAction.Response.class); + doAnswer(invocationOnMock -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; + listener.onResponse(response); + return null; + }).when(client).execute(any(), any(), any()); + + AtomicReference storedResponse = new AtomicReference<>(); + + UpdateJobAction.Request updateRequest = new UpdateJobAction.Request(JOB_ID, new JobUpdate.Builder(JOB_ID).build()); + RetryableUpdateModelSnapshotAction updateModelSnapshotAction = new RetryableUpdateModelSnapshotAction( + client, + updateRequest, + new ActionListener<>() { + @Override + public void onResponse(PutJobAction.Response response) { + storedResponse.set(response); + } + + @Override + public void onFailure(Exception e) { + fail(e); + } + } + ); + updateModelSnapshotAction.run(); + + verify(threadPool, never()).schedule(any(), any(), any()); + assertSame(response, storedResponse.get()); + } + + public void testRetriesNeeded() { + + int numRetries = randomIntBetween(1, 5); + + PutJobAction.Response response = mock(PutJobAction.Response.class); + AtomicInteger callCount = new AtomicInteger(0); + doAnswer(invocationOnMock -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; + if (callCount.incrementAndGet() > numRetries) { + listener.onResponse(response); + } else { + listener.onFailure(new Exception()); + } + return null; + }).when(client).execute(any(), any(), any()); + + AtomicReference storedResponse = new AtomicReference<>(); + + UpdateJobAction.Request updateRequest = new UpdateJobAction.Request(JOB_ID, new JobUpdate.Builder(JOB_ID).build()); + RetryableUpdateModelSnapshotAction updateModelSnapshotAction = new RetryableUpdateModelSnapshotAction( + client, + updateRequest, + new ActionListener<>() { + @Override + public void onResponse(PutJobAction.Response response) { + storedResponse.set(response); + } + + @Override + public void onFailure(Exception e) { + fail(e); + } + } + ); + updateModelSnapshotAction.run(); + + verify(threadPool, times(numRetries)).schedule(any(), any(), any()); + assertSame(response, storedResponse.get()); + } + + public void testCompleteFailure() { + + int numRetries = randomIntBetween(1, 5); + + AtomicInteger callCount = new AtomicInteger(0); + AtomicLong relativeTimeMs = new AtomicLong(0); + doAnswer(invocationOnMock -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; + if (callCount.incrementAndGet() > numRetries) { + relativeTimeMs.set(TimeValue.timeValueMinutes(5).millis() + 1); + } + listener.onFailure(new Exception(Long.toString(relativeTimeMs.get()))); + return null; + }).when(client).execute(any(), any(), any()); + doAnswer(invocationOnMock -> relativeTimeMs.get()).when(threadPool).relativeTimeInMillis(); + + AtomicReference storedFailure = new AtomicReference<>(); + + UpdateJobAction.Request updateRequest = new UpdateJobAction.Request(JOB_ID, new JobUpdate.Builder(JOB_ID).build()); + RetryableUpdateModelSnapshotAction updateModelSnapshotAction = new RetryableUpdateModelSnapshotAction( + client, + updateRequest, + new ActionListener<>() { + @Override + public void onResponse(PutJobAction.Response response) { + fail("this should not be called"); + } + + @Override + public void onFailure(Exception e) { + storedFailure.set(e); + } + } + ); + updateModelSnapshotAction.run(); + + verify(threadPool, times(numRetries)).schedule(any(), any(), any()); + assertEquals(Long.toString(relativeTimeMs.get()), storedFailure.get().getMessage()); + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/queries/WeightedTokensQueryBuilderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/queries/WeightedTokensQueryBuilderTests.java index 14880a5cf85e7..2576f3f802b98 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/queries/WeightedTokensQueryBuilderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/queries/WeightedTokensQueryBuilderTests.java @@ -231,7 +231,7 @@ public void testPruningIsAppliedCorrectly() throws IOException { WeightedTokensQueryBuilder queryThatShouldBePruned = new WeightedTokensQueryBuilder( RANK_FEATURES_FIELD, inputTokens, - new TokenPruningConfig(1.5f, 0.5f, false) + new TokenPruningConfig(2, 0.5f, false) ); query = queryThatShouldBePruned.doToQuery(context); assertCorrectLuceneQuery("queryThatShouldBePruned", query, List.of("dog", "jumped", "on", "me")); @@ -239,7 +239,7 @@ public void testPruningIsAppliedCorrectly() throws IOException { WeightedTokensQueryBuilder onlyScorePrunedTokensQuery = new WeightedTokensQueryBuilder( RANK_FEATURES_FIELD, inputTokens, - new TokenPruningConfig(1.5f, 0.5f, true) + new TokenPruningConfig(2, 0.5f, true) ); query = onlyScorePrunedTokensQuery.doToQuery(context); assertCorrectLuceneQuery("onlyScorePrunedTokensQuery", query, List.of("the", "black")); @@ -361,21 +361,21 @@ public void testIllegalValues() { { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> new WeightedTokensQueryBuilder("field name", weightedTokens, new TokenPruningConfig(-1f, 0.0f, false)) + () -> new WeightedTokensQueryBuilder("field name", weightedTokens, new TokenPruningConfig(-1, 0.0f, false)) ); - assertEquals("[tokens_freq_ratio_threshold] must be between [1.0] and [100.0], got -1.0", e.getMessage()); + assertEquals("[tokens_freq_ratio_threshold] must be between [1] and [100], got -1.0", e.getMessage()); } { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> new WeightedTokensQueryBuilder("field name", weightedTokens, new TokenPruningConfig(101f, 0.0f, false)) + () -> new WeightedTokensQueryBuilder("field name", weightedTokens, new TokenPruningConfig(101, 0.0f, false)) ); - assertEquals("[tokens_freq_ratio_threshold] must be between [1.0] and [100.0], got 101.0", e.getMessage()); + assertEquals("[tokens_freq_ratio_threshold] must be between [1] and [100], got 101.0", e.getMessage()); } { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> new WeightedTokensQueryBuilder("field name", weightedTokens, new TokenPruningConfig(5f, 5f, false)) + () -> new WeightedTokensQueryBuilder("field name", weightedTokens, new TokenPruningConfig(5, 5f, false)) ); assertEquals("[tokens_weight_threshold] must be between 0 and 1", e.getMessage()); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java index ed41042913421..5aaaa3ff958fd 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java @@ -26,7 +26,6 @@ import org.elasticsearch.analysis.common.CommonAnalysisPlugin; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.OriginSettingClient; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.common.settings.Settings; @@ -82,6 +81,7 @@ import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; import org.elasticsearch.xpack.core.ml.utils.MlTaskState; import org.elasticsearch.xpack.ilm.IndexLifecycle; +import org.elasticsearch.xpack.inference.InferencePlugin; import org.elasticsearch.xpack.ml.LocalStateMachineLearning; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.MlSingleNodeTestCase; @@ -103,7 +103,6 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; import java.util.function.Function; @@ -161,7 +160,8 @@ protected Collection> nodePlugins() { DataStreamsPlugin.class, // To remove errors from parsing build in templates that contain scaled_float MapperExtrasPlugin.class, - Wildcard.class + Wildcard.class, + InferencePlugin.class ); } @@ -172,10 +172,7 @@ protected Collection> getMockPlugins() { @Before public void ensureTemplatesArePresent() throws Exception { - assertBusy(() -> { - ClusterState state = clusterAdmin().prepareState().get().getState(); - assertTrue("Timed out waiting for the ML templates to be installed", MachineLearning.criticalTemplatesInstalled(state)); - }, 20, TimeUnit.SECONDS); + awaitClusterState(logger, MachineLearning::criticalTemplatesInstalled); } protected Job.Builder createJob(String id) { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/persistence/ResultsPersisterServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/persistence/ResultsPersisterServiceTests.java index 9e2f14aaabd84..f8ffed0864372 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/persistence/ResultsPersisterServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/persistence/ResultsPersisterServiceTests.java @@ -33,6 +33,8 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndexPrimaryShardNotAllocatedException; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ClientHelper; @@ -75,23 +77,28 @@ public class ResultsPersisterServiceTests extends ESTestCase { // Constants for searchWithRetry tests private static final SearchRequest SEARCH_REQUEST = new SearchRequest("my-index"); - private static final SearchResponse SEARCH_RESPONSE_SUCCESS = new SearchResponse( - null, + public static final SearchResponse SEARCH_RESPONSE_SUCCESS = SearchResponseUtils.emptyWithTotalHits( null, 1, 1, 0, - 0, + 1L, ShardSearchFailure.EMPTY_ARRAY, null ); - private static final SearchResponse SEARCH_RESPONSE_FAILURE = new SearchResponse( + public static final SearchResponse SEARCH_RESPONSE_FAILURE = new SearchResponse( + SearchHits.EMPTY_WITHOUT_TOTAL_HITS, + null, + null, + false, null, null, 1, + null, + 1, 0, 0, - 0, + 1L, ShardSearchFailure.EMPTY_ARRAY, null ); @@ -418,4 +425,5 @@ public static ResultsPersisterService buildResultsPersisterService(OriginSetting }).when(tp).schedule(any(Runnable.class), any(TimeValue.class), any(Executor.class)); return new ResultsPersisterService(tp, client, clusterService, Settings.EMPTY); } + } diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java index 5ea99aeea4092..eaec54ca9c1a3 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java @@ -77,7 +77,7 @@ public class MonitoringTemplateRegistry extends IndexTemplateRegistry { * writes monitoring data in ECS format as of 8.0. These templates define the ECS schema as well as alias fields for the old monitoring * mappings that point to the corresponding ECS fields. */ - public static final int STACK_MONITORING_REGISTRY_VERSION = 8_00_00_99 + 12; + public static final int STACK_MONITORING_REGISTRY_VERSION = 8_00_00_99 + 13; private static final String STACK_MONITORING_REGISTRY_VERSION_VARIABLE = "xpack.stack.monitoring.template.release.version"; private static final String STACK_TEMPLATE_VERSION = "8"; private static final String STACK_TEMPLATE_VERSION_VARIABLE = "xpack.stack.monitoring.template.version"; diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/FilteredMonitoringDoc.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/FilteredMonitoringDoc.java index 9bf6c4153d5a8..fedaab44761ac 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/FilteredMonitoringDoc.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/FilteredMonitoringDoc.java @@ -9,16 +9,14 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; -import org.elasticsearch.xcontent.NamedXContentRegistry; -import org.elasticsearch.xcontent.XContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.monitoring.MonitoredSystem; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; import java.io.IOException; -import java.io.InputStream; import java.util.Set; /** @@ -58,14 +56,16 @@ Set getFilters() { @Override public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - final XContent xContent = builder.contentType().xContent(); try (BytesStreamOutput out = new BytesStreamOutput()) { try (XContentBuilder filteredBuilder = new XContentBuilder(builder.contentType(), out, filters)) { super.toXContent(filteredBuilder, params); } try ( - InputStream stream = out.bytes().streamInput(); - XContentParser parser = xContent.createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream) + XContentParser parser = XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG, + out.bytes(), + builder.contentType() + ) ) { return builder.copyCurrentStructure(parser); } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistryTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistryTests.java index 0d19ae1c95590..07df17a0922d6 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistryTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistryTests.java @@ -12,8 +12,8 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.admin.indices.template.put.PutComponentTemplateAction; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; -import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutIndexTemplateAction; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; @@ -45,7 +45,8 @@ import org.elasticsearch.xpack.core.ilm.LifecyclePolicyMetadata; import org.elasticsearch.xpack.core.ilm.OperationMode; import org.elasticsearch.xpack.core.ilm.Phase; -import org.elasticsearch.xpack.core.ilm.action.PutLifecycleAction; +import org.elasticsearch.xpack.core.ilm.action.ILMActions; +import org.elasticsearch.xpack.core.ilm.action.PutLifecycleRequest; import org.junit.After; import org.junit.Before; @@ -65,6 +66,7 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.sameInstance; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; @@ -159,11 +161,10 @@ public void testThatNonExistingPoliciesAreAddedImmediately() throws Exception { AtomicInteger calledTimes = new AtomicInteger(0); client.setVerifier((action, request, listener) -> { - if (action instanceof PutLifecycleAction) { + if (action == ILMActions.PUT) { calledTimes.incrementAndGet(); - assertThat(action, instanceOf(PutLifecycleAction.class)); - assertThat(request, instanceOf(PutLifecycleAction.Request.class)); - final PutLifecycleAction.Request putRequest = (PutLifecycleAction.Request) request; + assertThat(request, instanceOf(PutLifecycleRequest.class)); + final PutLifecycleRequest putRequest = (PutLifecycleRequest) request; assertThat(putRequest.getPolicy().getName(), equalTo(MonitoringTemplateRegistry.MONITORING_POLICY_NAME)); if (putRequest.getPolicy().getName().equals(MonitoringTemplateRegistry.MONITORING_POLICY_NAME)) { Phase delete = putRequest.getPolicy().getPhases().get("delete"); @@ -174,10 +175,10 @@ public void testThatNonExistingPoliciesAreAddedImmediately() throws Exception { } else if (action instanceof PutComponentTemplateAction) { // Ignore this, it's verified in another test return new MonitoringTemplateRegistryTests.TestPutIndexTemplateResponse(true); - } else if (action instanceof PutComposableIndexTemplateAction) { + } else if (action == TransportPutComposableIndexTemplateAction.TYPE) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; - } else if (action instanceof PutIndexTemplateAction) { + } else if (action == TransportPutIndexTemplateAction.TYPE) { return AcknowledgedResponse.TRUE; } else { fail("client called with unexpected request: " + request.toString()); @@ -220,13 +221,13 @@ public void testPolicyAlreadyExists() { if (action instanceof PutComponentTemplateAction) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; - } else if (action instanceof PutComposableIndexTemplateAction) { + } else if (action == TransportPutComposableIndexTemplateAction.TYPE) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; - } else if (action instanceof PutIndexTemplateAction) { + } else if (action == TransportPutIndexTemplateAction.TYPE) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; - } else if (action instanceof PutLifecycleAction) { + } else if (action == ILMActions.PUT) { fail("if the policy already exists it should not be re-put"); } else { fail("client called with unexpected request: " + request.toString()); @@ -252,13 +253,13 @@ public void testPolicyAlreadyExistsButDiffers() throws IOException { if (action instanceof PutComponentTemplateAction) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; - } else if (action instanceof PutComposableIndexTemplateAction) { + } else if (action == TransportPutComposableIndexTemplateAction.TYPE) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; - } else if (action instanceof PutIndexTemplateAction) { + } else if (action == TransportPutIndexTemplateAction.TYPE) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; - } else if (action instanceof PutLifecycleAction) { + } else if (action == ILMActions.PUT) { fail("if the policy already exists it should not be re-put"); } else { fail("client called with unexpected request: " + request.toString()); @@ -350,19 +351,20 @@ private ActionResponse verifyComposableTemplateInstalled( if (action instanceof PutComponentTemplateAction) { // Ignore this, it's verified in another test return new TestPutIndexTemplateResponse(true); - } else if (action instanceof PutLifecycleAction) { + } else if (action == ILMActions.PUT) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; - } else if (action instanceof PutComposableIndexTemplateAction) { + } else if (action == TransportPutComposableIndexTemplateAction.TYPE) { // Ignore this, it's verified in another test calledTimes.incrementAndGet(); - assertThat(action, instanceOf(PutComposableIndexTemplateAction.class)); - assertThat(request, instanceOf(PutComposableIndexTemplateAction.Request.class)); - final PutComposableIndexTemplateAction.Request putRequest = ((PutComposableIndexTemplateAction.Request) request); + assertThat(action, sameInstance(TransportPutComposableIndexTemplateAction.TYPE)); + assertThat(request, instanceOf(TransportPutComposableIndexTemplateAction.Request.class)); + final TransportPutComposableIndexTemplateAction.Request putRequest = + ((TransportPutComposableIndexTemplateAction.Request) request); assertThat(putRequest.indexTemplate().version(), equalTo((long) MonitoringTemplateRegistry.STACK_MONITORING_REGISTRY_VERSION)); assertNotNull(listener); return AcknowledgedResponse.TRUE; - } else if (action instanceof PutIndexTemplateAction) { + } else if (action == TransportPutIndexTemplateAction.TYPE) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; } else { diff --git a/x-pack/plugin/old-lucene-versions/src/internalClusterTest/java/org/elasticsearch/xpack/lucene/bwc/ArchiveLicenseIntegTests.java b/x-pack/plugin/old-lucene-versions/src/internalClusterTest/java/org/elasticsearch/xpack/lucene/bwc/ArchiveLicenseIntegTests.java index 1efeda816f59f..0e752a02b5ee4 100644 --- a/x-pack/plugin/old-lucene-versions/src/internalClusterTest/java/org/elasticsearch/xpack/lucene/bwc/ArchiveLicenseIntegTests.java +++ b/x-pack/plugin/old-lucene-versions/src/internalClusterTest/java/org/elasticsearch/xpack/lucene/bwc/ArchiveLicenseIntegTests.java @@ -15,7 +15,6 @@ import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.license.DeleteLicenseAction; import org.elasticsearch.license.License; import org.elasticsearch.license.LicensesMetadata; import org.elasticsearch.license.PostStartBasicAction; @@ -23,6 +22,7 @@ import org.elasticsearch.license.PostStartTrialAction; import org.elasticsearch.license.PostStartTrialRequest; import org.elasticsearch.license.PostStartTrialResponse; +import org.elasticsearch.license.TransportDeleteLicenseAction; import org.elasticsearch.protocol.xpack.XPackUsageRequest; import org.elasticsearch.protocol.xpack.license.DeleteLicenseRequest; import org.elasticsearch.snapshots.SnapshotRestoreException; @@ -57,7 +57,7 @@ public void testFeatureUsage() throws Exception { } public void testFailRestoreOnInvalidLicense() throws Exception { - assertAcked(client().execute(DeleteLicenseAction.INSTANCE, new DeleteLicenseRequest()).get()); + assertAcked(client().execute(TransportDeleteLicenseAction.TYPE, new DeleteLicenseRequest()).get()); assertAcked(client().execute(PostStartBasicAction.INSTANCE, new PostStartBasicRequest()).get()); ensureClusterSizeConsistency(); @@ -93,7 +93,7 @@ public void testShardAllocationOnInvalidLicense() throws Exception { assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); ensureGreen(indexName); - assertAcked(client().execute(DeleteLicenseAction.INSTANCE, new DeleteLicenseRequest()).get()); + assertAcked(client().execute(TransportDeleteLicenseAction.TYPE, new DeleteLicenseRequest()).get()); assertAcked(client().execute(PostStartBasicAction.INSTANCE, new PostStartBasicRequest()).get()); ensureClusterSizeConsistency(); diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/CancellationIT.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/CancellationIT.java index ef5198499ff09..fa28877f5b4c1 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/CancellationIT.java +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/CancellationIT.java @@ -10,6 +10,7 @@ import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; import org.apache.logging.log4j.LogManager; +import org.apache.lucene.tests.util.LuceneTestCase; import org.apache.lucene.util.SetOnce; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; @@ -48,6 +49,7 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.instanceOf; +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103809") public class CancellationIT extends ProfilingTestCase { @Override protected Collection> nodePlugins() { diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetFlameGraphActionIT.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetFlameGraphActionIT.java index 8553574d39646..e0e4ef2a12985 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetFlameGraphActionIT.java +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetFlameGraphActionIT.java @@ -7,6 +7,9 @@ package org.elasticsearch.xpack.profiling; +import org.apache.lucene.tests.util.LuceneTestCase; + +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103809") public class GetFlameGraphActionIT extends ProfilingTestCase { public void testGetStackTracesUnfiltered() throws Exception { GetStackTracesRequest request = new GetStackTracesRequest(1000, 600.0d, 1.0d, null, null, null, null, null, null, null, null); diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java index 289f6896ed698..9c60a6bcdfc1c 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java @@ -7,11 +7,13 @@ package org.elasticsearch.xpack.profiling; +import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.TermQueryBuilder; import java.util.List; +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103809") public class GetStackTracesActionIT extends ProfilingTestCase { public void testGetStackTracesUnfiltered() throws Exception { GetStackTracesRequest request = new GetStackTracesRequest(1000, 600.0d, 1.0d, null, null, null, null, null, null, null, null); @@ -30,8 +32,8 @@ public void testGetStackTracesUnfiltered() throws Exception { assertEquals(18, stackTrace.fileIds.size()); assertEquals(18, stackTrace.frameIds.size()); assertEquals(18, stackTrace.typeIds.size()); - assertEquals(0.0000098789d, stackTrace.annualCO2Tons, 0.0000000001d); - assertEquals(0.093075d, stackTrace.annualCostsUSD, 0.000001d); + assertEquals(0.0000048475146d, stackTrace.annualCO2Tons, 0.0000000001d); + assertEquals(0.18834d, stackTrace.annualCostsUSD, 0.00001d); assertNotNull(response.getStackFrames()); StackFrame stackFrame = response.getStackFrames().get("8NlMClggx8jaziUTJXlmWAAAAAAAAIYI"); diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStatusActionIT.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStatusActionIT.java index 7cd5b08ee773f..8dbab6e8c06a5 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStatusActionIT.java +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStatusActionIT.java @@ -9,7 +9,10 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ESIntegTestCase; +import org.junit.Before; +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) public class GetStatusActionIT extends ProfilingTestCase { @Override protected boolean requiresDataSetup() { @@ -17,6 +20,14 @@ protected boolean requiresDataSetup() { return false; } + @Before + public void setupCluster() { + // dedicated master with a data node + internalCluster().setBootstrapMasterNodeIndex(0); + internalCluster().startMasterOnlyNode(); + internalCluster().startDataOnlyNode(); + } + public void testTimeoutIfResourcesNotCreated() throws Exception { updateProfilingTemplatesEnabled(false); GetStatusAction.Request request = new GetStatusAction.Request(); @@ -27,6 +38,7 @@ public void testTimeoutIfResourcesNotCreated() throws Exception { GetStatusAction.Response response = client().execute(GetStatusAction.INSTANCE, request).get(); assertEquals(RestStatus.REQUEST_TIMEOUT, response.status()); assertFalse(response.isResourcesCreated()); + assertFalse(response.hasData()); } public void testNoTimeoutIfNotWaiting() throws Exception { @@ -37,8 +49,10 @@ public void testNoTimeoutIfNotWaiting() throws Exception { GetStatusAction.Response response = client().execute(GetStatusAction.INSTANCE, request).get(); assertEquals(RestStatus.OK, response.status()); assertFalse(response.isResourcesCreated()); + assertFalse(response.hasData()); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/104035") public void testWaitsUntilResourcesAreCreated() throws Exception { updateProfilingTemplatesEnabled(true); GetStatusAction.Request request = new GetStatusAction.Request(); @@ -47,5 +61,17 @@ public void testWaitsUntilResourcesAreCreated() throws Exception { GetStatusAction.Response response = client().execute(GetStatusAction.INSTANCE, request).get(); assertEquals(RestStatus.OK, response.status()); assertTrue(response.isResourcesCreated()); + assertFalse(response.hasData()); + } + + public void testHasData() throws Exception { + doSetupData(); + GetStatusAction.Request request = new GetStatusAction.Request(); + request.waitForResourcesCreated(true); + + GetStatusAction.Response response = client().execute(GetStatusAction.INSTANCE, request).get(); + assertEquals(RestStatus.OK, response.status()); + assertTrue(response.isResourcesCreated()); + assertTrue(response.hasData()); } } diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/ProfilingTestCase.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/ProfilingTestCase.java index 6a95b7c8d8573..82d6f6193505d 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/ProfilingTestCase.java +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/ProfilingTestCase.java @@ -112,9 +112,12 @@ protected final void bulkIndex(String file) throws Exception { @Before public void setupData() throws Exception { - if (requiresDataSetup() == false) { - return; + if (requiresDataSetup()) { + doSetupData(); } + } + + protected final void doSetupData() throws Exception { final String apmTestIndex = "apm-test-001"; // only enable index management while setting up indices to avoid interfering with the rest of the test infrastructure updateProfilingTemplatesEnabled(true); diff --git a/x-pack/plugin/profiling/src/internalClusterTest/resources/data/profiling-hosts.ndjson b/x-pack/plugin/profiling/src/internalClusterTest/resources/data/profiling-hosts.ndjson index cd3ddc1271d2d..a830ef8da66f1 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/resources/data/profiling-hosts.ndjson +++ b/x-pack/plugin/profiling/src/internalClusterTest/resources/data/profiling-hosts.ndjson @@ -1,2 +1,2 @@ {"create": {"_index": "profiling-hosts", "_id": "eLH27YsBj2lLi3tJYlvr"}} -{"profiling.project.id": 100, "host.id": "8457605156473051743", "@timestamp": 1700504426, "ecs.version": "1.12.0", "profiling.agent.build_timestamp": 1688111067, "profiling.instance.private_ipv4s": ["192.168.1.2"], "ec2.instance_life_cycle": "on-demand", "profiling.agent.config.map_scale_factor": 0, "ec2.instance_type": "i3.2xlarge", "profiling.host.ip": "192.168.1.2", "profiling.agent.config.bpf_log_level": 0, "profiling.host.sysctl.net.core.bpf_jit_enable": 1, "profiling.agent.config.file": "/etc/prodfiler/prodfiler.conf", "ec2.local_ipv4": "192.168.1.2", "profiling.agent.config.no_kernel_version_check": false, "profiling.host.machine": "x86_64", ",profiling.host.tags": ["cloud_provider:aws", "cloud_environment:qa", "cloud_region:eu-west-1"], "profiling.agent.config.probabilistic_threshold": 100, "profiling.agent.config.disable_tls": false, "profiling.agent.config.tracers": "all", "profiling.agent.start_time": 1700090045589, "profiling.agent.config.max_elements_per_interval": 800, "ec2.placement.region": "eu-west-1", "profiling.agent.config.present_cpu_cores": 8, "profiling.host.kernel_version": "9.9.9-0-aws", "profiling.agent.config.bpf_log_size": 65536, "profiling.agent.config.known_traces_entries": 65536, "profiling.host.sysctl.kernel.unprivileged_bpf_disabled": 1, "profiling.agent.config.verbose": false, "profiling.agent.config.probabilistic_interval": "1m0s", "ec2.placement.availability_zone_id": "euw1-az1", "ec2.security_groups": "", "ec2.local_hostname": "ip-192-168-1-2.eu-west-1.compute.internal", "ec2.placement.availability_zone": "eu-west-1c", "profiling.agent.config.upload_symbols": false, "profiling.host.sysctl.kernel.bpf_stats_enabled": 0, "profiling.host.name": "ip-192-168-1-2", "ec2.mac": "00:11:22:33:44:55", "profiling.host.kernel_proc_version": "Linux version 9.9.9-0-aws", "profiling.agent.config.cache_directory": "/var/cache/optimyze/", "profiling.agent.version": "v8.12.0", "ec2.hostname": "ip-192-168-1-2.eu-west-1.compute.internal", "profiling.agent.config.elastic_mode": false, "ec2.ami_id": "ami-aaaaaaaaaaa", "ec2.instance_id": "i-0b999999999999999" } +{"profiling.project.id": 100, "host.id": "8457605156473051743", "@timestamp": 1700504426, "ecs.version": "1.12.0", "profiling.agent.build_timestamp": 1688111067, "profiling.instance.private_ipv4s": ["192.168.1.2"], "ec2.instance_life_cycle": "on-demand", "profiling.agent.config.map_scale_factor": 0, "ec2.instance_type": "i3.2xlarge", "profiling.host.ip": "192.168.1.2", "profiling.agent.config.bpf_log_level": 0, "profiling.host.sysctl.net.core.bpf_jit_enable": 1, "profiling.agent.config.file": "/etc/prodfiler/prodfiler.conf", "ec2.local_ipv4": "192.168.1.2", "profiling.agent.config.no_kernel_version_check": false, "profiling.host.machine": "x86_64", "profiling.host.tags": ["cloud_provider:aws", "cloud_environment:qa", "cloud_region:eu-west-1"], "profiling.agent.config.probabilistic_threshold": 100, "profiling.agent.config.disable_tls": false, "profiling.agent.config.tracers": "all", "profiling.agent.start_time": 1700090045589, "profiling.agent.config.max_elements_per_interval": 800, "ec2.placement.region": "eu-west-1", "profiling.agent.config.present_cpu_cores": 8, "profiling.host.kernel_version": "9.9.9-0-aws", "profiling.agent.config.bpf_log_size": 65536, "profiling.agent.config.known_traces_entries": 65536, "profiling.host.sysctl.kernel.unprivileged_bpf_disabled": 1, "profiling.agent.config.verbose": false, "profiling.agent.config.probabilistic_interval": "1m0s", "ec2.placement.availability_zone_id": "euw1-az1", "ec2.security_groups": "", "ec2.local_hostname": "ip-192-168-1-2.eu-west-1.compute.internal", "ec2.placement.availability_zone": "eu-west-1c", "profiling.agent.config.upload_symbols": false, "profiling.host.sysctl.kernel.bpf_stats_enabled": 0, "profiling.host.name": "ip-192-168-1-2", "ec2.mac": "00:11:22:33:44:55", "profiling.host.kernel_proc_version": "Linux version 9.9.9-0-aws", "profiling.agent.config.cache_directory": "/var/cache/optimyze/", "profiling.agent.version": "v8.12.0", "ec2.hostname": "ip-192-168-1-2.eu-west-1.compute.internal", "profiling.agent.config.elastic_mode": false, "ec2.ami_id": "ami-aaaaaaaaaaa", "ec2.instance_id": "i-0b999999999999999" } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/CO2Calculator.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/CO2Calculator.java index 0d92bf0a78d09..1e44cba4e62b2 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/CO2Calculator.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/CO2Calculator.java @@ -18,7 +18,6 @@ final class CO2Calculator { private static final double DEFAULT_KILOWATTS_PER_CORE_ARM64 = 2.8d / 1000.0d; // unit: watt / core private static final double DEFAULT_KILOWATTS_PER_CORE = DEFAULT_KILOWATTS_PER_CORE_X86; // unit: watt / core private static final double DEFAULT_DATACENTER_PUE = 1.7d; - private final InstanceTypeService instanceTypeService; private final Map hostMetadata; private final double samplingDurationInSeconds; private final double customCO2PerKWH; @@ -27,7 +26,6 @@ final class CO2Calculator { private final double customKilowattsPerCoreARM64; CO2Calculator( - InstanceTypeService instanceTypeService, Map hostMetadata, double samplingDurationInSeconds, Double customCO2PerKWH, @@ -35,7 +33,6 @@ final class CO2Calculator { Double customPerCoreWattX86, Double customPerCoreWattARM64 ) { - this.instanceTypeService = instanceTypeService; this.hostMetadata = hostMetadata; this.samplingDurationInSeconds = samplingDurationInSeconds > 0 ? samplingDurationInSeconds : 1.0d; // avoid division by zero this.customCO2PerKWH = customCO2PerKWH == null ? DEFAULT_CO2_TONS_PER_KWH : customCO2PerKWH; @@ -54,7 +51,7 @@ public double getAnnualCO2Tons(String hostID, long samples) { return DEFAULT_KILOWATTS_PER_CORE * customCO2PerKWH * annualCoreHours * customDatacenterPUE; } - CostEntry costs = instanceTypeService.getCosts(host.instanceType); + CostEntry costs = InstanceTypeService.getCosts(host.instanceType); if (costs == null) { return getKiloWattsPerCore(host) * getCO2TonsPerKWH(host) * annualCoreHours * getDatacenterPUE(host); } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/CostCalculator.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/CostCalculator.java index 05319ba7d1cc4..ecaaee5d3bf4b 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/CostCalculator.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/CostCalculator.java @@ -15,20 +15,17 @@ final class CostCalculator { private static final double SECONDS_PER_YEAR = SECONDS_PER_HOUR * 24 * 365.0d; // unit: seconds private static final double DEFAULT_COST_USD_PER_CORE_HOUR = 0.0425d; // unit: USD / (core * hour) private static final double DEFAULT_AWS_COST_FACTOR = 1.0d; - private final InstanceTypeService instanceTypeService; private final Map hostMetadata; private final double samplingDurationInSeconds; private final double awsCostFactor; private final double customCostPerCoreHour; CostCalculator( - InstanceTypeService instanceTypeService, Map hostMetadata, double samplingDurationInSeconds, Double awsCostFactor, Double customCostPerCoreHour ) { - this.instanceTypeService = instanceTypeService; this.hostMetadata = hostMetadata; this.samplingDurationInSeconds = samplingDurationInSeconds > 0 ? samplingDurationInSeconds : 1.0d; // avoid division by zero this.awsCostFactor = awsCostFactor == null ? DEFAULT_AWS_COST_FACTOR : awsCostFactor; @@ -45,7 +42,7 @@ public double annualCostsUSD(String hostID, double samples) { double providerCostFactor = host.instanceType.provider.equals("aws") ? awsCostFactor : 1.0d; - CostEntry costs = instanceTypeService.getCosts(host.instanceType); + CostEntry costs = InstanceTypeService.getCosts(host.instanceType); if (costs == null) { return annualCoreHours * customCostPerCoreHour * providerCostFactor; } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphAction.java index 79f8632238d4c..fc04f735fdf87 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphAction.java @@ -7,12 +7,13 @@ package org.elasticsearch.xpack.profiling; import org.elasticsearch.action.ActionType; +import org.elasticsearch.common.io.stream.Writeable; public final class GetFlamegraphAction extends ActionType { public static final GetFlamegraphAction INSTANCE = new GetFlamegraphAction(); public static final String NAME = "indices:data/read/profiling/flamegraph"; private GetFlamegraphAction() { - super(NAME, GetFlamegraphResponse::new); + super(NAME, Writeable.Reader.localOnly()); } } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphResponse.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphResponse.java index 457faecf4ad54..468b74ed16000 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphResponse.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphResponse.java @@ -8,8 +8,8 @@ package org.elasticsearch.xpack.profiling; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.common.collect.Iterators; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.common.xcontent.ChunkedToXContentObject; @@ -43,30 +43,6 @@ public class GetFlamegraphResponse extends ActionResponse implements ChunkedToXC private final List annualCostsUSDInclusive; private final List annualCostsUSDExclusive; - public GetFlamegraphResponse(StreamInput in) throws IOException { - this.size = in.readInt(); - this.samplingRate = in.readDouble(); - this.edges = in.readCollectionAsList(i -> i.readMap(StreamInput::readInt)); - this.fileIds = in.readCollectionAsList(StreamInput::readString); - this.frameTypes = in.readCollectionAsList(StreamInput::readInt); - this.inlineFrames = in.readCollectionAsList(StreamInput::readBoolean); - this.fileNames = in.readCollectionAsList(StreamInput::readString); - this.addressOrLines = in.readCollectionAsList(StreamInput::readInt); - this.functionNames = in.readCollectionAsList(StreamInput::readString); - this.functionOffsets = in.readCollectionAsList(StreamInput::readInt); - this.sourceFileNames = in.readCollectionAsList(StreamInput::readString); - this.sourceLines = in.readCollectionAsList(StreamInput::readInt); - this.countInclusive = in.readCollectionAsList(StreamInput::readLong); - this.countExclusive = in.readCollectionAsList(StreamInput::readLong); - this.annualCO2TonsInclusive = in.readCollectionAsList(StreamInput::readDouble); - this.annualCO2TonsExclusive = in.readCollectionAsList(StreamInput::readDouble); - this.annualCostsUSDInclusive = in.readCollectionAsList(StreamInput::readDouble); - this.annualCostsUSDExclusive = in.readCollectionAsList(StreamInput::readDouble); - this.selfCPU = in.readLong(); - this.totalCPU = in.readLong(); - this.totalSamples = in.readLong(); - } - public GetFlamegraphResponse( int size, double samplingRate, @@ -115,27 +91,7 @@ public GetFlamegraphResponse( @Override public void writeTo(StreamOutput out) throws IOException { - out.writeInt(this.size); - out.writeDouble(this.samplingRate); - out.writeCollection(this.edges, (o, v) -> o.writeMap(v, StreamOutput::writeString, StreamOutput::writeInt)); - out.writeCollection(this.fileIds, StreamOutput::writeString); - out.writeCollection(this.frameTypes, StreamOutput::writeInt); - out.writeCollection(this.inlineFrames, StreamOutput::writeBoolean); - out.writeCollection(this.fileNames, StreamOutput::writeString); - out.writeCollection(this.addressOrLines, StreamOutput::writeInt); - out.writeCollection(this.functionNames, StreamOutput::writeString); - out.writeCollection(this.functionOffsets, StreamOutput::writeInt); - out.writeCollection(this.sourceFileNames, StreamOutput::writeString); - out.writeCollection(this.sourceLines, StreamOutput::writeInt); - out.writeCollection(this.countInclusive, StreamOutput::writeLong); - out.writeCollection(this.countExclusive, StreamOutput::writeLong); - out.writeCollection(this.annualCO2TonsInclusive, StreamOutput::writeDouble); - out.writeCollection(this.annualCO2TonsExclusive, StreamOutput::writeDouble); - out.writeCollection(this.annualCostsUSDInclusive, StreamOutput::writeDouble); - out.writeCollection(this.annualCostsUSDExclusive, StreamOutput::writeDouble); - out.writeLong(this.selfCPU); - out.writeLong(this.totalCPU); - out.writeLong(this.totalSamples); + TransportAction.localOnly(); } public int getSize() { diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesAction.java index 8df5b1ec9154e..84ab6643be781 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesAction.java @@ -7,12 +7,13 @@ package org.elasticsearch.xpack.profiling; import org.elasticsearch.action.ActionType; +import org.elasticsearch.common.io.stream.Writeable; public final class GetStackTracesAction extends ActionType { public static final GetStackTracesAction INSTANCE = new GetStackTracesAction(); public static final String NAME = "indices:data/read/profiling/stack_traces"; private GetStackTracesAction() { - super(NAME, GetStackTracesResponse::new); + super(NAME, Writeable.Reader.localOnly()); } } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesRequest.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesRequest.java index f81b5f01caae3..efa8fc1d64244 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesRequest.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesRequest.java @@ -10,8 +10,8 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.tasks.CancellableTask; @@ -93,35 +93,9 @@ public GetStackTracesRequest( this.customCostPerCoreHour = customCostPerCoreHour; } - public GetStackTracesRequest(StreamInput in) throws IOException { - this.query = in.readOptionalNamedWriteable(QueryBuilder.class); - this.sampleSize = in.readOptionalInt(); - this.requestedDuration = in.readOptionalDouble(); - this.awsCostFactor = in.readOptionalDouble(); - this.adjustSampleCount = in.readOptionalBoolean(); - this.indices = in.readOptionalString(); - this.stackTraceIds = in.readOptionalString(); - this.customCO2PerKWH = in.readOptionalDouble(); - this.customDatacenterPUE = in.readOptionalDouble(); - this.customPerCoreWattX86 = in.readOptionalDouble(); - this.customPerCoreWattARM64 = in.readOptionalDouble(); - this.customCostPerCoreHour = in.readOptionalDouble(); - } - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeOptionalNamedWriteable(query); - out.writeOptionalInt(sampleSize); - out.writeOptionalDouble(requestedDuration); - out.writeOptionalDouble(awsCostFactor); - out.writeOptionalBoolean(adjustSampleCount); - out.writeOptionalString(indices); - out.writeOptionalString(stackTraceIds); - out.writeOptionalDouble(customCO2PerKWH); - out.writeOptionalDouble(customDatacenterPUE); - out.writeOptionalDouble(customPerCoreWattX86); - out.writeOptionalDouble(customPerCoreWattARM64); - out.writeOptionalDouble(customCostPerCoreHour); + public void writeTo(StreamOutput out) { + TransportAction.localOnly(); } public Integer getSampleSize() { diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesResponse.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesResponse.java index 2f1e15252c277..89c0b4ab6b0fb 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesResponse.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesResponse.java @@ -7,16 +7,15 @@ package org.elasticsearch.xpack.profiling; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.common.xcontent.ChunkedToXContentObject; import org.elasticsearch.core.Nullable; import org.elasticsearch.xcontent.ToXContent; -import java.io.IOException; import java.util.Collections; import java.util.Iterator; import java.util.Map; @@ -36,37 +35,6 @@ public class GetStackTracesResponse extends ActionResponse implements ChunkedToX private final double samplingRate; private final long totalSamples; - public GetStackTracesResponse(StreamInput in) throws IOException { - this.stackTraces = in.readBoolean() - ? in.readMap( - i -> new StackTrace( - i.readCollectionAsList(StreamInput::readInt), - i.readCollectionAsList(StreamInput::readString), - i.readCollectionAsList(StreamInput::readString), - i.readCollectionAsList(StreamInput::readInt), - i.readDouble(), - i.readDouble(), - i.readLong() - ) - ) - : null; - this.stackFrames = in.readBoolean() - ? in.readMap( - i -> new StackFrame( - i.readCollectionAsList(StreamInput::readString), - i.readCollectionAsList(StreamInput::readString), - i.readCollectionAsList(StreamInput::readInt), - i.readCollectionAsList(StreamInput::readInt) - ) - ) - : null; - this.executables = in.readBoolean() ? in.readMap(StreamInput::readString) : null; - this.stackTraceEvents = in.readBoolean() ? in.readMap(i -> new TraceEvent(i.readString(), i.readLong())) : null; - this.totalFrames = in.readInt(); - this.samplingRate = in.readDouble(); - this.totalSamples = in.readLong(); - } - public GetStackTracesResponse( Map stackTraces, Map stackFrames, @@ -86,50 +54,8 @@ public GetStackTracesResponse( } @Override - public void writeTo(StreamOutput out) throws IOException { - if (stackTraces != null) { - out.writeBoolean(true); - out.writeMap(stackTraces, (o, v) -> { - o.writeCollection(v.addressOrLines, StreamOutput::writeInt); - o.writeStringCollection(v.fileIds); - o.writeStringCollection(v.frameIds); - o.writeCollection(v.typeIds, StreamOutput::writeInt); - o.writeDouble(v.annualCO2Tons); - o.writeDouble(v.annualCostsUSD); - o.writeLong(v.count); - }); - } else { - out.writeBoolean(false); - } - if (stackFrames != null) { - out.writeBoolean(true); - out.writeMap(stackFrames, (o, v) -> { - o.writeStringCollection(v.fileName); - o.writeStringCollection(v.functionName); - o.writeCollection(v.functionOffset, StreamOutput::writeInt); - o.writeCollection(v.lineNumber, StreamOutput::writeInt); - }); - } else { - out.writeBoolean(false); - } - if (executables != null) { - out.writeBoolean(true); - out.writeMap(executables, StreamOutput::writeString); - } else { - out.writeBoolean(false); - } - if (stackTraceEvents != null) { - out.writeBoolean(true); - out.writeMap(stackTraceEvents, (o, v) -> { - o.writeString(v.stacktraceID); - o.writeLong(v.count); - }); - } else { - out.writeBoolean(false); - } - out.writeInt(totalFrames); - out.writeDouble(samplingRate); - out.writeLong(totalSamples); + public void writeTo(StreamOutput out) { + TransportAction.localOnly(); } public Map getStackTraces() { diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStatusAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStatusAction.java index 1ddf2d7178584..cb88021eebcf8 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStatusAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStatusAction.java @@ -31,11 +31,11 @@ protected GetStatusAction() { public static class Response extends ActionResponse implements ToXContentObject { - private boolean profilingEnabled; - private boolean resourceManagementEnabled; - private boolean resourcesCreated; - private boolean pre891Data; - private boolean hasData; + private final boolean profilingEnabled; + private final boolean resourceManagementEnabled; + private final boolean resourcesCreated; + private final boolean pre891Data; + private final boolean hasData; private boolean timedOut; public Response(StreamInput in) throws IOException { @@ -70,6 +70,10 @@ public boolean isResourcesCreated() { return resourcesCreated; } + public boolean hasData() { + return hasData; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/InstanceType.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/InstanceType.java index 98e75ff264375..150b2639e9ac3 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/InstanceType.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/InstanceType.java @@ -23,19 +23,9 @@ final class InstanceType implements ToXContentObject { final String name; InstanceType(String provider, String region, String name) { - this.provider = provider; - this.region = region; - this.name = name; - } - - /** - * Creates a {@link InstanceType} from a {@link Map} of source data provided from JSON or profiling-costs. - * - * @param source the source data - * @return the {@link InstanceType} - */ - public static InstanceType fromCostSource(Map source) { - return new InstanceType((String) source.get("provider"), (String) source.get("region"), (String) source.get("instance_type")); + this.provider = provider != null ? provider : ""; + this.region = region != null ? region : ""; + this.name = name != null ? name : ""; } /** @@ -45,16 +35,45 @@ public static InstanceType fromCostSource(Map source) { * @return the {@link InstanceType} */ public static InstanceType fromHostSource(Map source) { + // Check and handle AWS. + String region = (String) source.get("ec2.placement.region"); + if (region != null) { + String instanceType = (String) source.get("ec2.instance_type"); + return new InstanceType("aws", region, instanceType); + } + + // Check and handle GCP. + String zone = (String) source.get("gce.instance.zone"); + if (zone != null) { + // example: "gce.instance.zone": "projects/123456789/zones/europe-west1-b" + region = zone.substring(zone.lastIndexOf('/') + 1); + // region consist of the zone's first two tokens + String[] tokens = region.split("-", 3); + if (tokens.length > 2) { + region = tokens[0] + "-" + tokens[1]; + } + + // Support for instance type is planned for 8.13. + return new InstanceType("gcp", region, null); + } + + // Check and handle Azure. + region = (String) source.get("azure.compute.location"); + if (region != null) { + // example: "azure.compute.location": "eastus2" + // Support for instance type is planned for 8.13. + return new InstanceType("azure", region, null); + } + + // Support for configured tags (ECS). // Example of tags: // "profiling.host.tags": [ // "cloud_provider:aws", // "cloud_environment:qa", // "cloud_region:eu-west-1", // ], - String provider = ""; - String region = ""; - String instanceType = ""; - + String provider = null; + region = null; List tags = listOf(source.get("profiling.host.tags")); for (String tag : tags) { String[] kv = tag.toLowerCase(Locale.ROOT).split(":", 2); @@ -69,14 +88,7 @@ public static InstanceType fromHostSource(Map source) { } } - // We only support AWS for 8.12, but plan for GCP and Azure later. - // "gcp": check 'gce.instance.name' or 'gce.instance.name' to extract the instanceType - // "azure": extract the instanceType - if ("aws".equals(provider)) { - instanceType = (String) source.get("ec2.instance_type"); - } - - return new InstanceType(provider, region, instanceType); + return new InstanceType(provider, region, null); } @SuppressWarnings("unchecked") @@ -109,7 +121,7 @@ public boolean equals(Object o) { return false; } InstanceType that = (InstanceType) o; - return Objects.equals(provider, that.provider) && Objects.equals(region, that.region) && Objects.equals(name, that.name); + return provider.equals(that.provider) && region.equals(that.region) && name.equals(that.name); } @Override @@ -119,6 +131,6 @@ public int hashCode() { @Override public String toString() { - return name + " in region " + region; + return "provider '" + name + "' in region '" + region + "'"; } } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/InstanceTypeService.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/InstanceTypeService.java index 570a2c499fe35..58dd19c91f966 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/InstanceTypeService.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/InstanceTypeService.java @@ -13,36 +13,51 @@ import org.elasticsearch.xcontent.XContentType; import java.io.IOException; -import java.io.UncheckedIOException; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.function.Function; import java.util.zip.GZIPInputStream; -public class InstanceTypeService { - private final Map costsPerDatacenter = new HashMap<>(); - - public void load() { - try ( - GZIPInputStream in = new GZIPInputStream( - InstanceTypeService.class.getClassLoader().getResourceAsStream("profiling-costs.json.gz") - ) - ) { - XContentParser parser = XContentType.JSON.xContent().createParser(XContentParserConfiguration.EMPTY, in); - if (parser.currentToken() == null) { - parser.nextToken(); - } - List> rawData = XContentParserUtils.parseList(parser, XContentParser::map); - for (Map entry : rawData) { - costsPerDatacenter.put(InstanceType.fromCostSource(entry), CostEntry.fromSource(entry)); - } +public final class InstanceTypeService { + + private InstanceTypeService() {} - } catch (IOException e) { - throw new UncheckedIOException(e); + private static final class Holder { + private static final Map costsPerDatacenter; + + static { + final Map objects = new HashMap<>(); + final Function dedupString = s -> (String) objects.computeIfAbsent(s, Function.identity()); + final Map tmp = new HashMap<>(); + try ( + GZIPInputStream in = new GZIPInputStream( + InstanceTypeService.class.getClassLoader().getResourceAsStream("profiling-costs.json.gz") + ); + XContentParser parser = XContentType.JSON.xContent().createParser(XContentParserConfiguration.EMPTY, in) + ) { + if (parser.currentToken() == null) { + parser.nextToken(); + } + List> rawData = XContentParserUtils.parseList(parser, XContentParser::map); + for (Map entry : rawData) { + tmp.put( + new InstanceType( + dedupString.apply((String) entry.get("provider")), + dedupString.apply((String) entry.get("region")), + dedupString.apply((String) entry.get("instance_type")) + ), + (CostEntry) objects.computeIfAbsent(CostEntry.fromSource(entry), Function.identity()) + ); + } + costsPerDatacenter = Map.copyOf(tmp); + } catch (IOException e) { + throw new ExceptionInInitializerError(e); + } } } - public CostEntry getCosts(InstanceType instance) { - return costsPerDatacenter.get(instance); + public static CostEntry getCosts(InstanceType instance) { + return Holder.costsPerDatacenter.get(instance); } } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingIndexTemplateRegistry.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingIndexTemplateRegistry.java index 0068d03767387..ce15982450a66 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingIndexTemplateRegistry.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingIndexTemplateRegistry.java @@ -174,11 +174,8 @@ protected List getLifecyclePolicies() { indexVersion("symbols", PROFILING_SYMBOLS_VERSION) ) )) { - try { - componentTemplates.put( - config.getTemplateName(), - ComponentTemplate.parse(JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, config.loadBytes())) - ); + try (var parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, config.loadBytes())) { + componentTemplates.put(config.getTemplateName(), ComponentTemplate.parse(parser)); } catch (IOException e) { throw new AssertionError(e); } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingPlugin.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingPlugin.java index a2459f839523b..b105cde3d5c2a 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingPlugin.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingPlugin.java @@ -86,24 +86,18 @@ public Collection createComponents(PluginServices services) { // set initial value updateTemplatesEnabled(PROFILING_TEMPLATES_ENABLED.get(settings)); clusterService.getClusterSettings().addSettingsUpdateConsumer(PROFILING_TEMPLATES_ENABLED, this::updateTemplatesEnabled); - InstanceTypeService instanceTypeService = createInstanceTypeService(); if (enabled) { registry.get().initialize(); indexManager.get().initialize(); dataStreamManager.get().initialize(); - instanceTypeService.load(); } - return List.of(createLicenseChecker(), instanceTypeService); + return List.of(createLicenseChecker()); } protected ProfilingLicenseChecker createLicenseChecker() { return new ProfilingLicenseChecker(XPackPlugin::getSharedLicenseState); } - protected InstanceTypeService createInstanceTypeService() { - return new InstanceTypeService(); - } - public void updateCheckOutdatedIndices(boolean newValue) { if (newValue == false) { logger.info("profiling will ignore outdated indices"); diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/RestGetFlamegraphAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/RestGetFlamegraphAction.java index d3c8fc4fd295b..3b1b2e1789ad1 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/RestGetFlamegraphAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/RestGetFlamegraphAction.java @@ -11,9 +11,8 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; -import org.elasticsearch.rest.action.RestActionListener; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.io.IOException; import java.util.List; @@ -35,9 +34,12 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli getStackTracesRequest.setAdjustSampleCount(true); return channel -> { - RestActionListener listener = new RestChunkedToXContentListener<>(channel); RestCancellableNodeClient cancelClient = new RestCancellableNodeClient(client, request.getHttpChannel()); - cancelClient.execute(GetFlamegraphAction.INSTANCE, getStackTracesRequest, listener); + cancelClient.execute( + GetFlamegraphAction.INSTANCE, + getStackTracesRequest, + new RestRefCountedChunkedToXContentListener<>(channel) + ); }; } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/RestGetStackTracesAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/RestGetStackTracesAction.java index f8ee53ce0826e..ac7e9943b6566 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/RestGetStackTracesAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/RestGetStackTracesAction.java @@ -11,9 +11,8 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; -import org.elasticsearch.rest.action.RestActionListener; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.io.IOException; import java.util.List; @@ -33,9 +32,12 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli request.applyContentParser(getStackTracesRequest::parseXContent); return channel -> { - RestActionListener listener = new RestChunkedToXContentListener<>(channel); RestCancellableNodeClient cancelClient = new RestCancellableNodeClient(client, request.getHttpChannel()); - cancelClient.execute(GetStackTracesAction.INSTANCE, getStackTracesRequest, listener); + cancelClient.execute( + GetStackTracesAction.INSTANCE, + getStackTracesRequest, + new RestRefCountedChunkedToXContentListener<>(channel) + ); }; } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetFlamegraphAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetFlamegraphAction.java index 3cd9ded3005a2..dd78d6f1815f5 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetFlamegraphAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetFlamegraphAction.java @@ -11,12 +11,11 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.ParentTaskAssigningClient; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; @@ -27,7 +26,7 @@ import java.util.SortedMap; import java.util.TreeMap; -public class TransportGetFlamegraphAction extends HandledTransportAction { +public class TransportGetFlamegraphAction extends TransportAction { private static final Logger log = LogManager.getLogger(TransportGetFlamegraphAction.class); private static final StackFrame EMPTY_STACKFRAME = new StackFrame("", "", 0, 0); @@ -36,7 +35,7 @@ public class TransportGetFlamegraphAction extends HandledTransportAction { +public class TransportGetStackTracesAction extends TransportAction { private static final Logger log = LogManager.getLogger(TransportGetStackTracesAction.class); public static final Setting PROFILING_MAX_STACKTRACE_QUERY_SLICES = Setting.intSetting( @@ -111,7 +110,6 @@ public class TransportGetStackTracesAction extends HandledTransportAction clusterService, threadPool.getThreadContext(), new StatusListener(listener, localNode, clusterService, resolver), - clusterState -> resolver.getResponse(clusterState).isResourcesCreated(), + resolver::isResourcesCreated, timeout, log ); @@ -98,7 +96,6 @@ protected ClusterBlockException checkBlock(GetStatusAction.Request request, Clus private static class StatusListener implements ClusterStateObserver.Listener { private final ActionListener listener; private final DiscoveryNode localNode; - private final ClusterService clusterService; private final StatusResolver resolver; @@ -116,7 +113,7 @@ private StatusListener( @Override public void onNewClusterState(ClusterState state) { - listener.onResponse(resolver.getResponse(state)); + resolver.execute(state, listener); } @Override @@ -126,62 +123,64 @@ public void onClusterServiceClose() { @Override public void onTimeout(TimeValue timeout) { - GetStatusAction.Response response = resolver.getResponse(clusterService.state()); - response.setTimedOut(true); - listener.onResponse(response); + resolver.execute(clusterService.state(), ActionListener.wrap(response -> { + response.setTimedOut(true); + listener.onResponse(response); + }, listener::onFailure)); } } private static class StatusResolver { private final ClusterService clusterService; - private final IndicesService indicesService; + private final NodeClient nodeClient; - private StatusResolver(ClusterService clusterService, IndicesService indicesService) { + private StatusResolver(ClusterService clusterService, NodeClient nodeClient) { this.clusterService = clusterService; - this.indicesService = indicesService; + this.nodeClient = nodeClient; } - private GetStatusAction.Response getResponse(ClusterState state) { - IndexStateResolver indexStateResolver = new IndexStateResolver( - getValue(state, ProfilingPlugin.PROFILING_CHECK_OUTDATED_INDICES) - ); - - boolean pluginEnabled = getValue(state, XPackSettings.PROFILING_ENABLED); - boolean resourceManagementEnabled = getValue(state, ProfilingPlugin.PROFILING_TEMPLATES_ENABLED); - + private boolean isResourcesCreated(ClusterState state) { + IndexStateResolver indexStateResolver = indexStateResolver(state); boolean templatesCreated = ProfilingIndexTemplateRegistry.isAllResourcesCreated(state, clusterService.getSettings()); boolean indicesCreated = ProfilingIndexManager.isAllResourcesCreated(state, indexStateResolver); boolean dataStreamsCreated = ProfilingDataStreamManager.isAllResourcesCreated(state, indexStateResolver); - boolean resourcesCreated = templatesCreated && indicesCreated && dataStreamsCreated; + return templatesCreated && indicesCreated && dataStreamsCreated; + } + private boolean isAnyPre891Data(ClusterState state) { + IndexStateResolver indexStateResolver = indexStateResolver(state); boolean indicesPre891 = ProfilingIndexManager.isAnyResourceTooOld(state, indexStateResolver); boolean dataStreamsPre891 = ProfilingDataStreamManager.isAnyResourceTooOld(state, indexStateResolver); - boolean anyPre891Data = indicesPre891 || dataStreamsPre891; + return indicesPre891 || dataStreamsPre891; + } - return new GetStatusAction.Response(pluginEnabled, resourceManagementEnabled, resourcesCreated, anyPre891Data, hasData(state)); + private IndexStateResolver indexStateResolver(ClusterState state) { + return new IndexStateResolver(getValue(state, ProfilingPlugin.PROFILING_CHECK_OUTDATED_INDICES)); } - private boolean hasData(ClusterState state) { - DataStream dataStream = state.metadata().dataStreams().get(EventsIndex.FULL_INDEX.getName()); - if (dataStream == null) { - return false; - } - for (Index index : dataStream.getIndices()) { - IndexMetadata meta = state.metadata().index(index); - if (meta == null) { - continue; - } - // It should not happen that we have index metadata but no corresponding index service. Be extra defensive and skip. - IndexService indexService = indicesService.indexService(meta.getIndex()); - if (indexService != null) { - for (IndexShard indexShard : indexService) { - if (indexShard.isReadAllowed() && indexShard.docStats().getCount() > 0L) { - return true; - } - } - } + private void execute(ClusterState state, ActionListener listener) { + boolean pluginEnabled = getValue(state, XPackSettings.PROFILING_ENABLED); + boolean resourceManagementEnabled = getValue(state, ProfilingPlugin.PROFILING_TEMPLATES_ENABLED); + boolean resourcesCreated = isResourcesCreated(state); + boolean anyPre891Data = isAnyPre891Data(state); + // only issue a search if there is any chance that we have data + if (resourcesCreated) { + SearchRequest countRequest = new SearchRequest(EventsIndex.FULL_INDEX.getName()); + countRequest.indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN); + countRequest.allowPartialSearchResults(true); + // we don't need an exact hit count, just whether there are any data at all + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().size(0).trackTotalHits(true).trackTotalHitsUpTo(1); + countRequest.source(searchSourceBuilder); + + nodeClient.search(countRequest, ActionListener.wrap(searchResponse -> { + boolean hasData = searchResponse.getHits().getTotalHits().value > 0; + listener.onResponse( + new GetStatusAction.Response(pluginEnabled, resourceManagementEnabled, resourcesCreated, anyPre891Data, hasData) + ); + }, listener::onFailure)); + } else { + listener.onResponse(new GetStatusAction.Response(pluginEnabled, resourceManagementEnabled, false, anyPre891Data, false)); } - return false; } private boolean getValue(ClusterState state, Setting setting) { diff --git a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/CO2CalculatorTests.java b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/CO2CalculatorTests.java index f2a21bc8b9cf5..dadd541808300 100644 --- a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/CO2CalculatorTests.java +++ b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/CO2CalculatorTests.java @@ -18,9 +18,6 @@ public class CO2CalculatorTests extends ESTestCase { private static final String HOST_ID_D = "4440256254710195394"; public void testCreateFromRegularSource() { - InstanceTypeService instanceTypeService = new InstanceTypeService(); - instanceTypeService.load(); - // tag::noformat Map hostsTable = Map.ofEntries( Map.entry(HOST_ID_A, @@ -40,7 +37,7 @@ public void testCreateFromRegularSource() { new InstanceType( "gcp", "europe-west1", - "" // Doesn't matter for unknown datacenters. + null // Doesn't matter for unknown datacenters. ), "x86_64" ) @@ -51,7 +48,7 @@ public void testCreateFromRegularSource() { new InstanceType( "azure", "northcentralus", - "" // Doesn't matter for unknown datacenters. + null // Doesn't matter for unknown datacenters. ), "aarch64" ) @@ -62,7 +59,7 @@ public void testCreateFromRegularSource() { new InstanceType( "on-prem-provider", "on-prem-region", - "" // Doesn't matter for unknown datacenters. + null // Doesn't matter for unknown datacenters. ), "aarch64" ) @@ -73,7 +70,7 @@ public void testCreateFromRegularSource() { double samplingDurationInSeconds = 1_800.0d; // 30 minutes long samples = 100_000L; // 100k samples double annualCoreHours = CostCalculator.annualCoreHours(samplingDurationInSeconds, samples, 20.0d); - CO2Calculator co2Calculator = new CO2Calculator(instanceTypeService, hostsTable, samplingDurationInSeconds, null, null, null, null); + CO2Calculator co2Calculator = new CO2Calculator(hostsTable, samplingDurationInSeconds, null, null, null, null); checkCO2Calculation(co2Calculator.getAnnualCO2Tons(HOST_ID_A, samples), annualCoreHours, 0.000002213477d); checkCO2Calculation(co2Calculator.getAnnualCO2Tons(HOST_ID_B, samples), annualCoreHours, 1.1d, 0.00004452d, 7.0d); diff --git a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/CarthesianCombinator.java b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/CarthesianCombinator.java new file mode 100644 index 0000000000000..2982df317a38c --- /dev/null +++ b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/CarthesianCombinator.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.profiling; + +import java.lang.reflect.Array; +import java.util.function.Consumer; + +public class CarthesianCombinator { + private final T[] elems; + private final int[] index; + private final T[] result; + private final int len; + + @SuppressWarnings("unchecked") + CarthesianCombinator(T[] elems, int len) { + if (elems.length == 0) { + throw new IllegalArgumentException("elems must not be empty"); + } + this.elems = elems; + this.index = new int[len]; + this.result = (T[]) Array.newInstance(elems[0].getClass(), len); + this.len = len; + } + + private void init(int length) { + for (int i = 0; i < length; i++) { + index[i] = 0; + result[i] = elems[0]; + } + } + + public void forEach(Consumer action) { + // Initialize index and result + init(len); + + int pos = 0; + while (pos < len) { + if (index[pos] < elems.length) { + result[pos] = elems[index[pos]]; + action.accept(result); + index[pos]++; + continue; + } + while (pos < len && index[pos] + 1 >= elems.length) { + pos++; + } + if (pos < len) { + index[pos]++; + result[pos] = elems[index[pos]]; + init(pos); + pos = 0; + } + } + } +} diff --git a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/CostCalculatorTests.java b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/CostCalculatorTests.java index f42ad1188693b..030616d285416 100644 --- a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/CostCalculatorTests.java +++ b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/CostCalculatorTests.java @@ -16,9 +16,6 @@ public class CostCalculatorTests extends ESTestCase { private static final String HOST_ID_B = "2220256254710195392"; public void testCreateFromRegularSource() { - InstanceTypeService instanceTypeService = new InstanceTypeService(); - instanceTypeService.load(); - // tag::noformat Map hostsTable = Map.ofEntries( Map.entry(HOST_ID_A, @@ -49,7 +46,7 @@ public void testCreateFromRegularSource() { double samplingDurationInSeconds = 1_800.0d; // 30 minutes long samples = 100_000L; // 100k samples double annualCoreHours = CostCalculator.annualCoreHours(samplingDurationInSeconds, samples, 20.0d); - CostCalculator costCalculator = new CostCalculator(instanceTypeService, hostsTable, samplingDurationInSeconds, null, null); + CostCalculator costCalculator = new CostCalculator(hostsTable, samplingDurationInSeconds, null, null); // Checks whether the cost calculation is based on the pre-calculated lookup data. checkCostCalculation(costCalculator.annualCostsUSD(HOST_ID_A, samples), annualCoreHours, 0.061d); diff --git a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/GetStackTracesRequestTests.java b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/GetStackTracesRequestTests.java index 8bf4598cf75f7..f0f328e48d00b 100644 --- a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/GetStackTracesRequestTests.java +++ b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/GetStackTracesRequestTests.java @@ -8,12 +8,8 @@ package org.elasticsearch.xpack.profiling; import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.query.BoolQueryBuilder; -import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.ESTestCase; @@ -28,47 +24,6 @@ import static java.util.Collections.emptyList; public class GetStackTracesRequestTests extends ESTestCase { - public void testSerialization() throws IOException { - Integer sampleSize = randomIntBetween(1, Integer.MAX_VALUE); - Double requestedDuration = randomBoolean() ? randomDoubleBetween(0.001d, Double.MAX_VALUE, true) : null; - Double awsCostFactor = randomBoolean() ? randomDoubleBetween(0.1d, 5.0d, true) : null; - Double customCO2PerKWH = randomBoolean() ? randomDoubleBetween(0.000001d, 0.001d, true) : null; - Double datacenterPUE = randomBoolean() ? randomDoubleBetween(1.0d, 3.0d, true) : null; - Double perCoreWattX86 = randomBoolean() ? randomDoubleBetween(0.01d, 20.0d, true) : null; - Double perCoreWattARM64 = randomBoolean() ? randomDoubleBetween(0.01d, 20.0d, true) : null; - Double customCostPerCoreHour = randomBoolean() ? randomDoubleBetween(0.001d, 1000.0d, true) : null; - QueryBuilder query = randomBoolean() ? new BoolQueryBuilder() : null; - - GetStackTracesRequest request = new GetStackTracesRequest( - sampleSize, - requestedDuration, - awsCostFactor, - query, - null, - null, - customCO2PerKWH, - datacenterPUE, - perCoreWattX86, - perCoreWattARM64, - customCostPerCoreHour - ); - try (BytesStreamOutput out = new BytesStreamOutput()) { - request.writeTo(out); - try (NamedWriteableAwareStreamInput in = new NamedWriteableAwareStreamInput(out.bytes().streamInput(), writableRegistry())) { - GetStackTracesRequest deserialized = new GetStackTracesRequest(in); - assertEquals(sampleSize, deserialized.getSampleSize()); - assertEquals(requestedDuration, deserialized.getRequestedDuration()); - assertEquals(awsCostFactor, deserialized.getAwsCostFactor()); - assertEquals(customCO2PerKWH, deserialized.getCustomCO2PerKWH()); - assertEquals(datacenterPUE, deserialized.getCustomDatacenterPUE()); - assertEquals(perCoreWattX86, deserialized.getCustomPerCoreWattX86()); - assertEquals(perCoreWattARM64, deserialized.getCustomPerCoreWattARM64()); - assertEquals(customCostPerCoreHour, deserialized.getCustomCostPerCoreHour()); - assertEquals(query, deserialized.getQuery()); - } - } - } - public void testParseValidXContent() throws IOException { try (XContentParser content = createParser(XContentFactory.jsonBuilder() //tag::noformat @@ -93,6 +48,15 @@ public void testParseValidXContent() throws IOException { assertEquals(Double.valueOf(100.54d), request.getRequestedDuration()); // a basic check suffices here assertEquals("@timestamp", ((RangeQueryBuilder) request.getQuery()).fieldName()); + // Expect the default values + assertNull(request.getIndices()); + assertNull(request.getStackTraceIds()); + assertNull(request.getAwsCostFactor()); + assertNull(request.getCustomCO2PerKWH()); + assertNull(request.getCustomDatacenterPUE()); + assertNull(request.getCustomCostPerCoreHour()); + assertNull(request.getCustomPerCoreWattX86()); + assertNull(request.getCustomPerCoreWattARM64()); } } @@ -124,7 +88,57 @@ public void testParseValidXContentWithCustomIndex() throws IOException { assertEquals("@timestamp", ((RangeQueryBuilder) request.getQuery()).fieldName()); // Expect the default values - assertEquals(null, request.getRequestedDuration()); + assertNull(request.getRequestedDuration()); + assertNull(request.getAwsCostFactor()); + assertNull(request.getCustomCO2PerKWH()); + assertNull(request.getCustomDatacenterPUE()); + assertNull(request.getCustomCostPerCoreHour()); + assertNull(request.getCustomPerCoreWattX86()); + assertNull(request.getCustomPerCoreWattARM64()); + } + } + + public void testParseValidXContentWithCustomCostAndCO2Data() throws IOException { + try (XContentParser content = createParser(XContentFactory.jsonBuilder() + //tag::noformat + .startObject() + .field("sample_size", 2000) + .field("requested_duration", 100.54d) + .field("aws_cost_factor", 7.3d) + .field("co2_per_kwh", 22.4d) + .field("datacenter_pue", 1.05d) + .field("cost_per_core_hour", 3.32d) + .field("per_core_watt_x86", 7.2d) + .field("per_core_watt_arm64", 2.82d) + .startObject("query") + .startObject("range") + .startObject("@timestamp") + .field("gte", "2022-10-05") + .endObject() + .endObject() + .endObject() + .endObject() + //end::noformat + )) { + + GetStackTracesRequest request = new GetStackTracesRequest(); + request.parseXContent(content); + + assertEquals(Integer.valueOf(2000), request.getSampleSize()); + assertEquals(Double.valueOf(100.54d), request.getRequestedDuration()); + assertEquals(Double.valueOf(7.3d), request.getAwsCostFactor()); + assertEquals(Double.valueOf(22.4d), request.getCustomCO2PerKWH()); + assertEquals(Double.valueOf(1.05d), request.getCustomDatacenterPUE()); + assertEquals(Double.valueOf(3.32d), request.getCustomCostPerCoreHour()); + assertEquals(Double.valueOf(7.2d), request.getCustomPerCoreWattX86()); + assertEquals(Double.valueOf(2.82d), request.getCustomPerCoreWattARM64()); + + // a basic check suffices here + assertEquals("@timestamp", ((RangeQueryBuilder) request.getQuery()).fieldName()); + + // Expect the default values + assertNull(request.getIndices()); + assertNull(request.getStackTraceIds()); } } @@ -246,7 +260,6 @@ public void testConsidersCustomIndicesInRelatedIndices() { } public void testConsidersDefaultIndicesInRelatedIndices() { - String customIndex = randomAlphaOfLength(5); GetStackTracesRequest request = new GetStackTracesRequest(1, 1.0d, 1.0d, null, null, null, null, null, null, null, null); String[] indices = request.indices(); assertEquals(15, indices.length); diff --git a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/GetStackTracesResponseTests.java b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/GetStackTracesResponseTests.java index 7455c2b30e13d..99a34719f96c9 100644 --- a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/GetStackTracesResponseTests.java +++ b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/GetStackTracesResponseTests.java @@ -7,20 +7,18 @@ package org.elasticsearch.xpack.profiling; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.AbstractChunkedSerializingTestCase; -import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.test.ESTestCase; import java.util.List; import java.util.Map; -public class GetStackTracesResponseTests extends AbstractWireSerializingTestCase { +public class GetStackTracesResponseTests extends ESTestCase { private T randomNullable(T v) { return randomBoolean() ? v : null; } - @Override - protected GetStackTracesResponse createTestInstance() { + private GetStackTracesResponse createTestInstance() { int totalFrames = randomIntBetween(1, 100); Map stackTraces = randomNullable( @@ -57,16 +55,6 @@ protected GetStackTracesResponse createTestInstance() { return new GetStackTracesResponse(stackTraces, stackFrames, executables, stackTraceEvents, totalFrames, 1.0, totalSamples); } - @Override - protected GetStackTracesResponse mutateInstance(GetStackTracesResponse instance) { - return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929 - } - - @Override - protected Writeable.Reader instanceReader() { - return GetStackTracesResponse::new; - } - public void testChunking() { AbstractChunkedSerializingTestCase.assertChunkCount(createTestInstance(), instance -> { // start, end, total_frames, samplingrate diff --git a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/HostMetadataTests.java b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/HostMetadataTests.java index 0359357004687..d8f93cd129916 100644 --- a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/HostMetadataTests.java +++ b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/HostMetadataTests.java @@ -13,7 +13,7 @@ import java.util.Map; public class HostMetadataTests extends ESTestCase { - public void testCreateFromRegularSource() { + public void testCreateFromSourceAWS() { final String hostID = "1440256254710195396"; final String machine = "x86_64"; final String provider = "aws"; @@ -25,9 +25,8 @@ public void testCreateFromRegularSource() { Map.of( "host.id", hostID, "profiling.host.machine", machine, - "profiling.host.tags", Arrays.asList( - "cloud_provider:"+provider, "cloud_environment:qa", "cloud_region:"+region), - "ec2.instance_type", instanceType + "ec2.instance_type", instanceType, + "ec2.placement.region", region ) ); // end::noformat @@ -38,4 +37,141 @@ public void testCreateFromRegularSource() { assertEquals(region, host.instanceType.region); assertEquals(instanceType, host.instanceType.name); } + + public void testCreateFromSourceGCP() { + final String hostID = "1440256254710195396"; + final String machine = "x86_64"; + final String provider = "gcp"; + final String[] regions = { "", "", "europe-west1", "europewest", "europe-west1" }; + final String[] zones = { + "", + "/", + "projects/123456789/zones/" + regions[2] + "-b", + "projects/123456789/zones/" + regions[3], + "projects/123456789/zones/" + regions[4] + "-b-c" }; + + for (int i = 0; i < regions.length; i++) { + String region = regions[i]; + String zone = zones[i]; + + // tag::noformat + HostMetadata host = HostMetadata.fromSource( + Map.of( + "host.id", hostID, + "profiling.host.machine", machine, + "gce.instance.zone", zone + ) + ); + // end::noformat + + assertEquals(hostID, host.hostID); + assertEquals(machine, host.profilingHostMachine); + assertEquals(provider, host.instanceType.provider); + assertEquals(region, host.instanceType.region); + assertEquals("", host.instanceType.name); + } + } + + public void testCreateFromSourceGCPZoneFuzzer() { + final String hostID = "1440256254710195396"; + final String machine = "x86_64"; + final String provider = "gcp"; + final Character[] chars = new Character[] { '/', '-', 'a' }; + + for (int zoneLength = 1; zoneLength <= 5; zoneLength++) { + CarthesianCombinator combinator = new CarthesianCombinator<>(chars, zoneLength); + + combinator.forEach((result) -> { + StringBuilder sb = new StringBuilder(); + for (Character c : result) { + sb.append(c); + } + String zone = sb.toString(); + + // tag::noformat + HostMetadata host = HostMetadata.fromSource( + Map.of( + "host.id", hostID, + "profiling.host.machine", machine, + "gce.instance.zone", zone + ) + ); + // end::noformat + + assertEquals(hostID, host.hostID); + assertEquals(machine, host.profilingHostMachine); + assertEquals(provider, host.instanceType.provider); + assertNotNull(host.instanceType.region); + assertEquals("", host.instanceType.name); + // region isn't tested because of the combinatorial nature of this test + }); + } + } + + public void testCreateFromSourceAzure() { + final String hostID = "1440256254710195396"; + final String machine = "x86_64"; + final String provider = "azure"; + final String region = "eastus2"; + + // tag::noformat + HostMetadata host = HostMetadata.fromSource( + Map.of( + "host.id", hostID, + "profiling.host.machine", machine, + "azure.compute.location", region + ) + ); + // end::noformat + + assertEquals(hostID, host.hostID); + assertEquals(machine, host.profilingHostMachine); + assertEquals(provider, host.instanceType.provider); + assertEquals(region, host.instanceType.region); + assertEquals("", host.instanceType.name); + } + + public void testCreateFromSourceECS() { + final String hostID = "1440256254710195396"; + final String machine = "x86_64"; + final String provider = "any-provider"; + final String region = "any-region"; + + // tag::noformat + HostMetadata host = HostMetadata.fromSource( + Map.of( + "host.id", hostID, + "profiling.host.machine", machine, + "profiling.host.tags", Arrays.asList( + "cloud_provider:"+provider, "cloud_environment:qa", "cloud_region:"+region) + ) + ); + // end::noformat + + assertEquals(hostID, host.hostID); + assertEquals(machine, host.profilingHostMachine); + assertEquals(provider, host.instanceType.provider); + assertEquals(region, host.instanceType.region); + assertEquals("", host.instanceType.name); + } + + public void testCreateFromSourceNoProvider() { + final String hostID = "1440256254710195396"; + final String machine = "x86_64"; + + // tag::noformat + HostMetadata host = HostMetadata.fromSource( + Map.of( + "host.id", hostID, + "profiling.host.machine", machine + ) + ); + // end::noformat + + assertEquals(hostID, host.hostID); + assertEquals(machine, host.profilingHostMachine); + assertEquals("", host.instanceType.provider); + assertEquals("", host.instanceType.region); + assertEquals("", host.instanceType.name); + } } diff --git a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/ProfilingDataStreamManagerTests.java b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/ProfilingDataStreamManagerTests.java index e23d003e2f209..87b8aed1811e2 100644 --- a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/ProfilingDataStreamManagerTests.java +++ b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/ProfilingDataStreamManagerTests.java @@ -11,12 +11,12 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.action.admin.indices.mapping.put.TransportPutMappingAction; import org.elasticsearch.action.admin.indices.rollover.RolloverAction; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; -import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction; +import org.elasticsearch.action.admin.indices.settings.put.TransportUpdateSettingsAction; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.datastreams.CreateDataStreamAction; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -404,7 +404,8 @@ private ActionResponse verifyDataStreamRolledOver( false, true, true, - true + true, + false ); } else { fail("client called with unexpected request:" + request.toString()); @@ -420,16 +421,14 @@ private ActionResponse verifyIndexMigrated( ActionRequest request, ActionListener listener ) { - if (action instanceof PutMappingAction) { + if (action == TransportPutMappingAction.TYPE) { mappingUpdates.incrementAndGet(); - assertThat(action, instanceOf(PutMappingAction.class)); assertThat(request, instanceOf(PutMappingRequest.class)); assertThat(((PutMappingRequest) request).indices(), equalTo(new String[] { indexName })); assertNotNull(listener); return AcknowledgedResponse.TRUE; - } else if (action instanceof UpdateSettingsAction) { + } else if (action == TransportUpdateSettingsAction.TYPE) { settingsUpdates.incrementAndGet(); - assertThat(action, instanceOf(UpdateSettingsAction.class)); assertThat(request, instanceOf(UpdateSettingsRequest.class)); assertNotNull(listener); return AcknowledgedResponse.TRUE; diff --git a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/ProfilingIndexManagerTests.java b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/ProfilingIndexManagerTests.java index 6919932a7823c..4b7819693aedb 100644 --- a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/ProfilingIndexManagerTests.java +++ b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/ProfilingIndexManagerTests.java @@ -16,9 +16,9 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.delete.TransportDeleteIndexAction; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; -import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction; +import org.elasticsearch.action.admin.indices.mapping.put.TransportPutMappingAction; +import org.elasticsearch.action.admin.indices.settings.put.TransportUpdateSettingsAction; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -441,16 +441,14 @@ private ActionResponse verifyIndexMigrated( ActionRequest request, ActionListener listener ) { - if (action instanceof PutMappingAction) { + if (action == TransportPutMappingAction.TYPE) { mappingUpdates.incrementAndGet(); - assertThat(action, instanceOf(PutMappingAction.class)); assertThat(request, instanceOf(PutMappingRequest.class)); assertThat(((PutMappingRequest) request).indices(), equalTo(new String[] { indexName })); assertNotNull(listener); return AcknowledgedResponse.TRUE; - } else if (action instanceof UpdateSettingsAction) { + } else if (action == TransportUpdateSettingsAction.TYPE) { settingsUpdates.incrementAndGet(); - assertThat(action, instanceOf(UpdateSettingsAction.class)); assertThat(request, instanceOf(UpdateSettingsRequest.class)); assertNotNull(listener); return AcknowledgedResponse.TRUE; diff --git a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/ProfilingIndexTemplateRegistryTests.java b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/ProfilingIndexTemplateRegistryTests.java index ad418e1bbc5e8..fb1051add3f1b 100644 --- a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/ProfilingIndexTemplateRegistryTests.java +++ b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/ProfilingIndexTemplateRegistryTests.java @@ -12,8 +12,8 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.admin.indices.template.put.PutComponentTemplateAction; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; -import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutIndexTemplateAction; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; @@ -43,7 +43,8 @@ import org.elasticsearch.xpack.core.ilm.LifecyclePolicyMetadata; import org.elasticsearch.xpack.core.ilm.OperationMode; import org.elasticsearch.xpack.core.ilm.TimeseriesLifecycleType; -import org.elasticsearch.xpack.core.ilm.action.PutLifecycleAction; +import org.elasticsearch.xpack.core.ilm.action.ILMActions; +import org.elasticsearch.xpack.core.ilm.action.PutLifecycleRequest; import org.junit.After; import org.junit.Before; @@ -59,6 +60,7 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.sameInstance; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; @@ -135,18 +137,17 @@ public void testThatNonExistingPoliciesAreAddedImmediately() throws Exception { AtomicInteger calledTimes = new AtomicInteger(0); client.setVerifier((action, request, listener) -> { - if (action instanceof PutLifecycleAction) { + if (action == ILMActions.PUT) { calledTimes.incrementAndGet(); - assertThat(action, instanceOf(PutLifecycleAction.class)); - assertThat(request, instanceOf(PutLifecycleAction.Request.class)); - final PutLifecycleAction.Request putRequest = (PutLifecycleAction.Request) request; + assertThat(request, instanceOf(PutLifecycleRequest.class)); + final PutLifecycleRequest putRequest = (PutLifecycleRequest) request; assertThat(putRequest.getPolicy().getName(), equalTo("profiling-60-days")); assertNotNull(listener); return AcknowledgedResponse.TRUE; } else if (action instanceof PutComponentTemplateAction) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; - } else if (action instanceof PutComposableIndexTemplateAction) { + } else if (action == TransportPutComposableIndexTemplateAction.TYPE) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; } else { @@ -173,13 +174,13 @@ public void testPolicyAlreadyExists() { if (action instanceof PutComponentTemplateAction) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; - } else if (action instanceof PutComposableIndexTemplateAction) { + } else if (action == TransportPutComposableIndexTemplateAction.TYPE) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; - } else if (action instanceof PutIndexTemplateAction) { + } else if (action == TransportPutIndexTemplateAction.TYPE) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; - } else if (action instanceof PutLifecycleAction) { + } else if (action == ILMActions.PUT) { fail("if the policy already exists it should not be re-put"); } else { fail("client called with unexpected request: " + request.toString()); @@ -209,13 +210,13 @@ public void testPolicyAlreadyExistsButDiffers() throws IOException { if (action instanceof PutComponentTemplateAction) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; - } else if (action instanceof PutComposableIndexTemplateAction) { + } else if (action == TransportPutComposableIndexTemplateAction.TYPE) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; - } else if (action instanceof PutIndexTemplateAction) { + } else if (action == TransportPutIndexTemplateAction.TYPE) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; - } else if (action instanceof PutLifecycleAction) { + } else if (action == ILMActions.PUT) { fail("if the policy already exists it should not be re-put"); } else { fail("client called with unexpected request: " + request.toString()); @@ -263,17 +264,16 @@ public void testPolicyUpgraded() throws Exception { if (action instanceof PutComponentTemplateAction) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; - } else if (action instanceof PutComposableIndexTemplateAction) { + } else if (action == TransportPutComposableIndexTemplateAction.TYPE) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; - } else if (action instanceof PutIndexTemplateAction) { + } else if (action == TransportPutIndexTemplateAction.TYPE) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; - } else if (action instanceof PutLifecycleAction) { + } else if (action == ILMActions.PUT) { calledTimes.incrementAndGet(); - assertThat(action, instanceOf(PutLifecycleAction.class)); - assertThat(request, instanceOf(PutLifecycleAction.Request.class)); - final PutLifecycleAction.Request putRequest = (PutLifecycleAction.Request) request; + assertThat(request, instanceOf(PutLifecycleRequest.class)); + final PutLifecycleRequest putRequest = (PutLifecycleRequest) request; assertThat(putRequest.getPolicy().getName(), equalTo("profiling-60-days")); assertNotNull(listener); return AcknowledgedResponse.TRUE; @@ -400,18 +400,19 @@ private ActionResponse verifyComposableTemplateInstalled( if (action instanceof PutComponentTemplateAction) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; - } else if (action instanceof PutLifecycleAction) { + } else if (action == ILMActions.PUT) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; - } else if (action instanceof PutComposableIndexTemplateAction) { + } else if (action == TransportPutComposableIndexTemplateAction.TYPE) { calledTimes.incrementAndGet(); - assertThat(action, instanceOf(PutComposableIndexTemplateAction.class)); - assertThat(request, instanceOf(PutComposableIndexTemplateAction.Request.class)); - final PutComposableIndexTemplateAction.Request putRequest = ((PutComposableIndexTemplateAction.Request) request); + assertThat(action, sameInstance(TransportPutComposableIndexTemplateAction.TYPE)); + assertThat(request, instanceOf(TransportPutComposableIndexTemplateAction.Request.class)); + final TransportPutComposableIndexTemplateAction.Request putRequest = + ((TransportPutComposableIndexTemplateAction.Request) request); assertThat(putRequest.indexTemplate().version(), equalTo((long) ProfilingIndexTemplateRegistry.INDEX_TEMPLATE_VERSION)); assertNotNull(listener); return AcknowledgedResponse.TRUE; - } else if (action instanceof PutIndexTemplateAction) { + } else if (action == TransportPutIndexTemplateAction.TYPE) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; } else { diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/async/AsyncTaskManagementService.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/async/AsyncTaskManagementService.java index 29705d9e4b116..1289833051624 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/async/AsyncTaskManagementService.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/async/AsyncTaskManagementService.java @@ -215,6 +215,7 @@ private ActionListener wrapStoringListener( acquiredListener.onResponse(operation.initialResponse(searchTask)); } }, waitForCompletionTimeout, threadPool.executor(ThreadPool.Names.SEARCH)); + // This will be performed at the end of normal execution return ActionListener.wrap(response -> { ActionListener acquiredListener = exclusiveListener.getAndSet(null); @@ -234,7 +235,11 @@ private ActionListener wrapStoringListener( } } else { // We finished after timeout - saving results - storeResults(searchTask, new StoredAsyncResponse<>(response, threadPool.absoluteTimeInMillis() + keepAlive.getMillis())); + storeResults( + searchTask, + new StoredAsyncResponse<>(response, threadPool.absoluteTimeInMillis() + keepAlive.getMillis()), + ActionListener.running(response::decRef) + ); } }, e -> { ActionListener acquiredListener = exclusiveListener.getAndSet(null); @@ -268,10 +273,12 @@ private void storeResults(T searchTask, StoredAsyncResponse storedResp asyncTaskIndexService.createResponseForEQL( searchTask.getExecutionId().getDocId(), searchTask.getOriginHeaders(), + threadPool.getThreadContext().getResponseHeaders(), // includes ESQL warnings storedResponse, ActionListener.wrap( // We should only unregister after the result is saved resp -> { + // TODO: generalize the logging, not just eql logger.trace(() -> "stored eql search results for [" + searchTask.getExecutionId().getEncoded() + "]"); taskManager.unregister(searchTask); if (storedResponse.getException() != null) { @@ -290,6 +297,7 @@ private void storeResults(T searchTask, StoredAsyncResponse storedResp if (cause instanceof DocumentMissingException == false && cause instanceof VersionConflictEngineException == false) { logger.error( + // TODO: generalize the logging, not just eql () -> format("failed to store eql search results for [%s]", searchTask.getExecutionId().getEncoded()), exc ); @@ -309,9 +317,10 @@ private void storeResults(T searchTask, StoredAsyncResponse storedResp /** * Adds a self-unregistering listener to a task. It works as a normal listener except it retrieves a partial response and unregister - * itself from the task if timeout occurs. + * itself from the task if timeout occurs. Returns false if the listener could not be added, if say for example the task completed. + * Otherwise, returns true. */ - public static > void addCompletionListener( + public static > boolean addCompletionListener( ThreadPool threadPool, Task task, ActionListener> listener, @@ -319,9 +328,10 @@ public static ListenerTimeouts.wrapWithTimeout( threadPool, timeout, threadPool.executor(ThreadPool.Names.SEARCH), diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/Expression.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/Expression.java index 9e95dab82df19..0bbe663dab90e 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/Expression.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/Expression.java @@ -99,6 +99,26 @@ public boolean childrenResolved() { return lazyChildrenResolved; } + /** + * Does the tree rooted at this expression have valid types at all nodes? + *

    + * For example, {@code SIN(1.2)} has a valid type and should return + * {@link TypeResolution#TYPE_RESOLVED} to signal "this type is fine". + * Another example, {@code SIN("cat")} has an invalid type in the + * tree. The value passed to the {@code SIN} function is a string which + * doesn't make any sense. So this method should return a "failure" + * resolution which it can build by calling {@link TypeResolution#TypeResolution(String)}. + *

    + *

    + * Take {@code SIN(1.2) + COS(ATAN("cat"))}, this tree should also + * fail, specifically because {@code ATAN("cat")} is invalid. This should + * fail even though {@code +} is perfectly valid when run on the results + * of {@code SIN} and {@code COS}. And {@code COS} can operate on the results + * of any valid call to {@code ATAN}. For this method to return a "valid" + * result the whole tree rooted at this expression must + * be valid. + *

    + */ public final TypeResolution typeResolved() { if (lazyTypeResolution == null) { lazyTypeResolution = resolveType(); @@ -106,6 +126,17 @@ public final TypeResolution typeResolved() { return lazyTypeResolution; } + /** + * The implementation of {@link #typeResolved}, which is just a caching wrapper + * around this method. See it's javadoc for what this method should return. + *

    + * Implementations will rarely interact with the {@link TypeResolution} + * class directly, instead usually calling the utility methods on {@link TypeResolutions}. + *

    + *

    + * Implementations should fail if {@link #childrenResolved()} returns {@code false}. + *

    + */ protected TypeResolution resolveType() { return TypeResolution.TYPE_RESOLVED; } @@ -142,6 +173,13 @@ public boolean resolved() { return childrenResolved() && typeResolved().resolved(); } + /** + * The {@link DataType} returned by executing the tree rooted at this + * expression. If {@link #typeResolved()} returns an error then the behavior + * of this method is undefined. It may return a valid + * type. Or it may throw an exception. Or it may return a totally nonsensical + * type. + */ public abstract DataType dataType(); @Override diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/planner/ExpressionTranslators.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/planner/ExpressionTranslators.java index c2c51863dbb77..0d659c5dbfb2d 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/planner/ExpressionTranslators.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/planner/ExpressionTranslators.java @@ -133,7 +133,7 @@ public static Query doTranslate(RegexMatch e, TranslatorHandler handler) { Expression field = e.field(); if (field instanceof FieldAttribute fa) { - q = translateField(e, handler.nameOf(fa.exactAttribute())); + return handler.wrapFunctionQuery(e, fa, () -> translateField(e, handler.nameOf(fa.exactAttribute()))); } else if (field instanceof MetadataAttribute ma) { q = translateField(e, handler.nameOf(ma)); } else { diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/rule/RuleExecutor.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/rule/RuleExecutor.java index cb13cfd651ed3..2ccdd66089c79 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/rule/RuleExecutor.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/rule/RuleExecutor.java @@ -67,6 +67,10 @@ public Batch(String name, Rule... rules) { public String name() { return name; } + + public Rule[] rules() { + return rules; + } } private Iterable> batches = null; diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/util/SpatialCoordinateTypes.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/util/SpatialCoordinateTypes.java index 57e472cd5bb17..48fb3a34469fb 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/util/SpatialCoordinateTypes.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/util/SpatialCoordinateTypes.java @@ -9,22 +9,22 @@ import org.apache.lucene.geo.GeoEncodingUtils; import org.apache.lucene.geo.XYEncodingUtils; -import org.elasticsearch.common.geo.GeoPoint; -import org.elasticsearch.common.geo.SpatialPoint; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.Point; import org.elasticsearch.geometry.utils.GeometryValidator; +import org.elasticsearch.geometry.utils.WellKnownBinary; import org.elasticsearch.geometry.utils.WellKnownText; -import java.util.Locale; +import java.nio.ByteOrder; import static org.apache.lucene.geo.GeoEncodingUtils.encodeLatitude; import static org.apache.lucene.geo.GeoEncodingUtils.encodeLongitude; public enum SpatialCoordinateTypes { GEO { - public SpatialPoint longAsPoint(long encoded) { - return new GeoPoint(GeoEncodingUtils.decodeLatitude((int) (encoded >>> 32)), GeoEncodingUtils.decodeLongitude((int) encoded)); + public Point longAsPoint(long encoded) { + return new Point(GeoEncodingUtils.decodeLongitude((int) encoded), GeoEncodingUtils.decodeLatitude((int) (encoded >>> 32))); } public long pointAsLong(double x, double y) { @@ -32,16 +32,23 @@ public long pointAsLong(double x, double y) { int longitudeEncoded = encodeLongitude(x); return (((long) latitudeEncoded) << 32) | (longitudeEncoded & 0xFFFFFFFFL); } - - public SpatialPoint pointAsPoint(Point point) { - return new GeoPoint(point.getY(), point.getX()); - } }, CARTESIAN { - public SpatialPoint longAsPoint(long encoded) { - final double x = XYEncodingUtils.decode((int) (encoded >>> 32)); - final double y = XYEncodingUtils.decode((int) (encoded & 0xFFFFFFFF)); - return makePoint(x, y); + + private static final int MAX_VAL_ENCODED = XYEncodingUtils.encode((float) XYEncodingUtils.MAX_VAL_INCL); + private static final int MIN_VAL_ENCODED = XYEncodingUtils.encode((float) XYEncodingUtils.MIN_VAL_INCL); + + public Point longAsPoint(long encoded) { + final int x = checkCoordinate((int) (encoded >>> 32)); + final int y = checkCoordinate((int) (encoded & 0xFFFFFFFF)); + return new Point(XYEncodingUtils.decode(x), XYEncodingUtils.decode(y)); + } + + private int checkCoordinate(int i) { + if (i > MAX_VAL_ENCODED || i < MIN_VAL_ENCODED) { + throw new IllegalArgumentException("Failed to convert invalid encoded value to cartesian point"); + } + return i; } public long pointAsLong(double x, double y) { @@ -49,71 +56,45 @@ public long pointAsLong(double x, double y) { final long yi = XYEncodingUtils.encode((float) y); return (yi & 0xFFFFFFFFL) | xi << 32; } + }; - public SpatialPoint pointAsPoint(Point point) { - return makePoint(point.getX(), point.getY()); - } + public abstract Point longAsPoint(long encoded); - private SpatialPoint makePoint(double x, double y) { - return new SpatialPoint() { - @Override - public double getX() { - return x; - } - - @Override - public double getY() { - return y; - } - - @Override - public int hashCode() { - return 31 * Double.hashCode(x) + Double.hashCode(y); - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (obj instanceof SpatialPoint other) { - return x == other.getX() && y == other.getY(); - } - return false; - } - - @Override - public String toString() { - return String.format(Locale.ROOT, "POINT (%f %f)", x, y); - } - }; - } - }; + public abstract long pointAsLong(double x, double y); - public abstract SpatialPoint longAsPoint(long encoded); + public String pointAsString(Point point) { + return WellKnownText.toWKT(point); + } - public long pointAsLong(SpatialPoint point) { - return pointAsLong(point.getX(), point.getY()); + public BytesRef pointAsWKB(Point point) { + return new BytesRef(WellKnownBinary.toWKB(point, ByteOrder.LITTLE_ENDIAN)); } - public abstract long pointAsLong(double x, double y); + public BytesRef longAsWKB(long encoded) { + return pointAsWKB(longAsPoint(encoded)); + } - public String pointAsString(SpatialPoint point) { - return WellKnownText.toWKT(new Point(point.getX(), point.getY())); + public long wkbAsLong(BytesRef wkb) { + Geometry geometry = WellKnownBinary.fromWKB(GeometryValidator.NOOP, false, wkb.bytes, wkb.offset, wkb.length); + if (geometry instanceof Point point) { + return pointAsLong(point.getX(), point.getY()); + } else { + throw new IllegalArgumentException("Unsupported geometry: " + geometry.type()); + } } - public SpatialPoint stringAsPoint(String string) { + public BytesRef stringAsWKB(String string) { + // TODO: we should be able to transform WKT to WKB without building the geometry + // we should as well use different validator for cartesian and geo? try { Geometry geometry = WellKnownText.fromWKT(GeometryValidator.NOOP, false, string); - if (geometry instanceof Point point) { - return pointAsPoint(point); - } else { - throw new IllegalArgumentException("Unsupported geometry type " + geometry.type()); - } + return new BytesRef(WellKnownBinary.toWKB(geometry, ByteOrder.LITTLE_ENDIAN)); } catch (Exception e) { - throw new RuntimeException("Failed to parse WKT: " + e.getMessage(), e); + throw new IllegalArgumentException("Failed to parse WKT: " + e.getMessage(), e); } } - public abstract SpatialPoint pointAsPoint(Point point); + public String wkbAsString(BytesRef wkb) { + return WellKnownText.fromWKB(wkb.bytes, wkb.offset, wkb.length); + } } diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/util/StringUtils.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/util/StringUtils.java index 83c731ce4e7a9..dad3c8574dc4a 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/util/StringUtils.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/util/StringUtils.java @@ -44,7 +44,7 @@ private StringUtils() {} private static final String INVALID_REGEX_SEQUENCE = "Invalid sequence - escape character is not followed by special wildcard char"; - // CamelCase to camel_case + // CamelCase to camel_case (and isNaN to is_nan) public static String camelCaseToUnderscore(String string) { if (Strings.hasText(string) == false) { return EMPTY; @@ -57,7 +57,8 @@ public static String camelCaseToUnderscore(String string) { char ch = s.charAt(i); if (Character.isAlphabetic(ch)) { if (Character.isUpperCase(ch)) { - if (i > 0 && previousCharWasUp == false) { + // append `_` when encountering a capital after a small letter, but only if not the last letter. + if (i > 0 && i < s.length() - 1 && previousCharWasUp == false) { sb.append("_"); } previousCharWasUp = true; diff --git a/x-pack/plugin/ql/src/test/java/org/elasticsearch/xpack/ql/util/SpatialCoordinateTypesTests.java b/x-pack/plugin/ql/src/test/java/org/elasticsearch/xpack/ql/util/SpatialCoordinateTypesTests.java index 6909475c04521..ca650bf29662f 100644 --- a/x-pack/plugin/ql/src/test/java/org/elasticsearch/xpack/ql/util/SpatialCoordinateTypesTests.java +++ b/x-pack/plugin/ql/src/test/java/org/elasticsearch/xpack/ql/util/SpatialCoordinateTypesTests.java @@ -7,7 +7,9 @@ package org.elasticsearch.xpack.ql.util; -import org.elasticsearch.common.geo.SpatialPoint; +import org.elasticsearch.geo.GeometryTestUtils; +import org.elasticsearch.geo.ShapeTestUtils; +import org.elasticsearch.geometry.Point; import org.elasticsearch.test.ESTestCase; import java.util.LinkedHashMap; @@ -21,10 +23,10 @@ public class SpatialCoordinateTypesTests extends ESTestCase { private static final Map types = new LinkedHashMap<>(); static { - types.put(SpatialCoordinateTypes.GEO, new TestTypeFunctions(ESTestCase::randomGeoPoint, v -> 1e-5)); + types.put(SpatialCoordinateTypes.GEO, new TestTypeFunctions(GeometryTestUtils::randomPoint, v -> 1e-5)); types.put( SpatialCoordinateTypes.CARTESIAN, - new TestTypeFunctions(ESTestCase::randomCartesianPoint, SpatialCoordinateTypesTests::cartesianError) + new TestTypeFunctions(ShapeTestUtils::randomPoint, SpatialCoordinateTypesTests::cartesianError) ); } @@ -33,15 +35,15 @@ private static double cartesianError(double v) { return (abs < 1) ? 1e-5 : abs / 1e7; } - record TestTypeFunctions(Supplier randomPoint, Function error) {} + record TestTypeFunctions(Supplier randomPoint, Function error) {} public void testEncoding() { for (var type : types.entrySet()) { for (int i = 0; i < 10; i++) { SpatialCoordinateTypes coordType = type.getKey(); - SpatialPoint original = type.getValue().randomPoint().get(); + Point original = type.getValue().randomPoint().get(); var error = type.getValue().error; - SpatialPoint point = coordType.longAsPoint(coordType.pointAsLong(original)); + Point point = coordType.longAsPoint(coordType.pointAsLong(original.getX(), original.getY())); assertThat(coordType + ": Y[" + i + "]", point.getY(), closeTo(original.getY(), error.apply(original.getY()))); assertThat(coordType + ": X[" + i + "]", point.getX(), closeTo(original.getX(), error.apply(original.getX()))); } @@ -52,10 +54,8 @@ public void testParsing() { for (var type : types.entrySet()) { for (int i = 0; i < 10; i++) { SpatialCoordinateTypes coordType = type.getKey(); - SpatialPoint geoPoint = type.getValue().randomPoint.get(); - SpatialPoint point = coordType.stringAsPoint(coordType.pointAsString(geoPoint)); - assertThat(coordType + ": Y[" + i + "]", point.getY(), closeTo(geoPoint.getY(), 1e-5)); - assertThat(coordType + ": X[" + i + "]", point.getX(), closeTo(geoPoint.getX(), 1e-5)); + Point point = type.getValue().randomPoint.get(); + assertEquals(coordType.wkbAsString(coordType.pointAsWKB(point)), coordType.pointAsString(point)); } } } diff --git a/x-pack/plugin/ql/test-fixtures/src/main/java/org/elasticsearch/xpack/ql/CsvSpecReader.java b/x-pack/plugin/ql/test-fixtures/src/main/java/org/elasticsearch/xpack/ql/CsvSpecReader.java index 7ddd660645a7c..a3b5147988b13 100644 --- a/x-pack/plugin/ql/test-fixtures/src/main/java/org/elasticsearch/xpack/ql/CsvSpecReader.java +++ b/x-pack/plugin/ql/test-fixtures/src/main/java/org/elasticsearch/xpack/ql/CsvSpecReader.java @@ -10,6 +10,7 @@ import java.util.ArrayList; import java.util.List; import java.util.Locale; +import java.util.function.Function; import static org.hamcrest.CoreMatchers.is; import static org.junit.Assert.assertThat; @@ -119,6 +120,16 @@ public List expectedWarnings(boolean forEmulated) { } return warnings; } + + /** + * Modifies the expected warnings. + * In some cases, we modify the query to run against multiple clusters. As a result, the line/column positions + * of the expected warnings no longer match the actual warnings. To enable reusing of spec tests, this method + * allows adjusting the expected warnings. + */ + public void adjustExpectedWarnings(Function updater) { + expectedWarnings.replaceAll(updater::apply); + } } } diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupResponseTranslator.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupResponseTranslator.java index e90ad56e3395a..ed3a3f294c65c 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupResponseTranslator.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupResponseTranslator.java @@ -34,7 +34,6 @@ import org.elasticsearch.search.aggregations.metrics.Min; import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.xpack.core.rollup.RollupField; import java.nio.charset.StandardCharsets; @@ -340,20 +339,15 @@ private static SearchResponse mergeFinalResponse( isTerminatedEarly = isTerminatedEarly && liveResponse.isTerminatedEarly(); numReducePhases += liveResponse.getNumReducePhases(); } - - InternalSearchResponse combinedInternal = new InternalSearchResponse( + // Shard failures are ignored atm, so returning an empty array is fine + return new SearchResponse( SearchHits.EMPTY_WITH_TOTAL_HITS, aggs, null, - null, isTimedOut, isTerminatedEarly, - numReducePhases - ); - - // Shard failures are ignored atm, so returning an empty array is fine - return new SearchResponse( - combinedInternal, + null, + numReducePhases, null, totalShards, sucessfulShards, diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java index 9364d5fcc3f6d..41c2f855ff8c9 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java @@ -19,8 +19,8 @@ import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsAction; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.action.admin.indices.mapping.put.TransportPutMappingAction; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -279,7 +279,7 @@ static void updateMapping( PutMappingRequest request = new PutMappingRequest(indexName); request.source(newMapping); client.execute( - PutMappingAction.INSTANCE, + TransportPutMappingAction.TYPE, request, ActionListener.wrap(putMappingResponse -> startPersistentTask(job, listener, persistentTasksService), listener::onFailure) ); diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/rest/RestPutRollupJobAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/rest/RestPutRollupJobAction.java index d6c00e3e89682..e434da37b7585 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/rest/RestPutRollupJobAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/rest/RestPutRollupJobAction.java @@ -29,7 +29,10 @@ public List routes() { @Override protected RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { final String id = request.param("id"); - final PutRollupJobAction.Request putRollupJobRequest = PutRollupJobAction.Request.fromXContent(request.contentParser(), id); + final PutRollupJobAction.Request putRollupJobRequest; + try (var parser = request.contentParser()) { + putRollupJobRequest = PutRollupJobAction.Request.fromXContent(parser, id); + } return channel -> client.execute(PutRollupJobAction.INSTANCE, putRollupJobRequest, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/rest/RestRollupSearchAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/rest/RestRollupSearchAction.java index 693daeaee030a..266f515d1dbb6 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/rest/RestRollupSearchAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/rest/RestRollupSearchAction.java @@ -10,7 +10,7 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.xpack.core.rollup.action.RollupSearchAction; @@ -48,7 +48,11 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient ) ); RestSearchAction.validateSearchRequest(restRequest, searchRequest); - return channel -> client.execute(RollupSearchAction.INSTANCE, searchRequest, new RestChunkedToXContentListener<>(channel)); + return channel -> client.execute( + RollupSearchAction.INSTANCE, + searchRequest, + new RestRefCountedChunkedToXContentListener<>(channel) + ); } @Override diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupResponseTranslationTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupResponseTranslationTests.java index 44f5f51668ea3..7e814230a2223 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupResponseTranslationTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupResponseTranslationTests.java @@ -72,7 +72,6 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator.PipelineTree; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValueType; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.InternalAggregationTestCase; import org.elasticsearch.xpack.core.rollup.RollupField; @@ -516,15 +515,13 @@ public void testMismatch() throws IOException { // TODO SearchResponse.Clusters is not public, using null for now. Should fix upstream. MultiSearchResponse.Item unrolledItem = new MultiSearchResponse.Item( new SearchResponse( - new InternalSearchResponse( - null, - InternalAggregations.from(Collections.singletonList(responses.get(0))), - null, - null, - false, - false, - 1 - ), + null, + InternalAggregations.from(Collections.singletonList(responses.get(0))), + null, + false, + false, + null, + 1, null, 1, 1, @@ -537,15 +534,13 @@ public void testMismatch() throws IOException { ); MultiSearchResponse.Item rolledItem = new MultiSearchResponse.Item( new SearchResponse( - new InternalSearchResponse( - null, - InternalAggregations.from(Collections.singletonList(responses.get(1))), - null, - null, - false, - false, - 1 - ), + null, + InternalAggregations.from(Collections.singletonList(responses.get(1))), + null, + false, + false, + null, + 1, null, 1, 1, diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobStateMachineTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobStateMachineTests.java index fef48f32d1fea..dd6f5173cb6ba 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobStateMachineTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobStateMachineTests.java @@ -15,8 +15,8 @@ import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsAction; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.action.admin.indices.mapping.put.TransportPutMappingAction; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.MappingMetadata; @@ -353,11 +353,11 @@ public void testAddJobToMapping() { // Bail here with an error, further testing will happen through tests of #startPersistentTask requestCaptor2.getValue().onFailure(new RuntimeException("Ending")); return null; - }).when(client).execute(eq(PutMappingAction.INSTANCE), any(PutMappingRequest.class), requestCaptor2.capture()); + }).when(client).execute(eq(TransportPutMappingAction.TYPE), any(PutMappingRequest.class), requestCaptor2.capture()); TransportPutRollupJobAction.updateMapping(job, testListener, mock(PersistentTasksService.class), client, logger); verify(client).execute(eq(GetMappingsAction.INSTANCE), any(GetMappingsRequest.class), any()); - verify(client).execute(eq(PutMappingAction.INSTANCE), any(PutMappingRequest.class), any()); + verify(client).execute(eq(TransportPutMappingAction.TYPE), any(PutMappingRequest.class), any()); } @SuppressWarnings({ "unchecked", "rawtypes" }) diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java index 16034354d0ff2..1e6a4794b14ae 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java @@ -27,7 +27,6 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchResponseSections; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.Rounding; import org.elasticsearch.common.time.DateFormatter; @@ -866,17 +865,25 @@ protected void doNextSearch(long waitTimeInNanos, ActionListener } catch (IOException e) { listener.onFailure(e); } - SearchResponseSections sections = new SearchResponseSections( - null, - new Aggregations(Collections.singletonList(result)), - null, - false, - null, - null, - 1 + ActionListener.respondAndRelease( + listener, + new SearchResponse( + null, + new Aggregations(Collections.singletonList(result)), + null, + false, + null, + null, + 1, + null, + 1, + 1, + 0, + 0, + ShardSearchFailure.EMPTY_ARRAY, + null + ) ); - SearchResponse response = new SearchResponse(sections, null, 1, 1, 0, 0, ShardSearchFailure.EMPTY_ARRAY, null); - listener.onResponse(response); } @Override diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java index 6fb40541330b2..bb910da326e0a 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.rollup.job; -import org.apache.lucene.search.TotalHits; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequest; @@ -14,9 +13,7 @@ import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchResponseSections; import org.elasticsearch.action.search.ShardSearchFailure; -import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.InternalAggregations; @@ -106,17 +103,25 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return null; } })); - final SearchResponseSections sections = new SearchResponseSections( - new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), 0), - aggs, - null, - false, - null, - null, - 1 + ActionListener.respondAndRelease( + nextPhase, + new SearchResponse( + SearchHits.EMPTY_WITH_TOTAL_HITS, + aggs, + null, + false, + null, + null, + 1, + null, + 1, + 1, + 0, + 0, + new ShardSearchFailure[0], + null + ) ); - final SearchResponse response = new SearchResponse(sections, null, 1, 1, 0, 0, new ShardSearchFailure[0], null); - nextPhase.onResponse(response); } @Override @@ -222,8 +227,7 @@ protected void doNextSearch(long waitTimeInNanos, ActionListener } try { - SearchResponse response = searchFunction.apply(buildSearchRequest()); - nextPhase.onResponse(response); + ActionListener.respondAndRelease(nextPhase, searchFunction.apply(buildSearchRequest())); } catch (Exception e) { nextPhase.onFailure(e); } @@ -473,17 +477,25 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return null; } })); - final SearchResponseSections sections = new SearchResponseSections( - new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), 0), - aggs, - null, - false, - null, - null, - 1 + ActionListener.respondAndRelease( + nextPhase, + new SearchResponse( + SearchHits.EMPTY_WITH_TOTAL_HITS, + aggs, + null, + false, + null, + null, + 1, + null, + 1, + 1, + 0, + 0, + ShardSearchFailure.EMPTY_ARRAY, + null + ) ); - final SearchResponse response = new SearchResponse(sections, null, 1, 1, 0, 0, ShardSearchFailure.EMPTY_ARRAY, null); - nextPhase.onResponse(response); } @Override @@ -684,16 +696,22 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return null; } })); - final SearchResponseSections sections = new SearchResponseSections( - new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), 0), + return new SearchResponse( + SearchHits.EMPTY_WITH_TOTAL_HITS, aggs, null, false, null, null, - 1 + 1, + null, + 1, + 1, + 0, + 0, + ShardSearchFailure.EMPTY_ARRAY, + null ); - return new SearchResponse(sections, null, 1, 1, 0, 0, ShardSearchFailure.EMPTY_ARRAY, null); }; Function bulkFunction = bulkRequest -> new BulkResponse(new BulkItemResponse[0], 100); @@ -808,16 +826,22 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return null; } })); - final SearchResponseSections sections = new SearchResponseSections( - new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), 0), + return new SearchResponse( + SearchHits.EMPTY_WITH_TOTAL_HITS, aggs, null, false, null, null, - 1 + 1, + null, + 1, + 1, + 0, + 0, + ShardSearchFailure.EMPTY_ARRAY, + null ); - return new SearchResponse(sections, null, 1, 1, 0, 0, ShardSearchFailure.EMPTY_ARRAY, null); }; Function bulkFunction = bulkRequest -> new BulkResponse(new BulkItemResponse[0], 100); @@ -981,16 +1005,22 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return null; } })); - final SearchResponseSections sections = new SearchResponseSections( - new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), 0), + return new SearchResponse( + SearchHits.EMPTY_WITH_TOTAL_HITS, aggs, null, false, null, null, - 1 + 1, + null, + 1, + 1, + 0, + 0, + ShardSearchFailure.EMPTY_ARRAY, + null ); - return new SearchResponse(sections, null, 1, 1, 0, 0, ShardSearchFailure.EMPTY_ARRAY, null); }; Function bulkFunction = bulkRequest -> { diff --git a/x-pack/plugin/searchable-snapshots/qa/url/build.gradle b/x-pack/plugin/searchable-snapshots/qa/url/build.gradle index 12fc0873958e1..850fe85ece3cd 100644 --- a/x-pack/plugin/searchable-snapshots/qa/url/build.gradle +++ b/x-pack/plugin/searchable-snapshots/qa/url/build.gradle @@ -1,12 +1,11 @@ import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE -apply plugin: 'elasticsearch.legacy-java-rest-test' +apply plugin: 'elasticsearch.internal-java-rest-test' apply plugin: 'elasticsearch.rest-resources' -final Project fixture = project(':test:fixtures:nginx-fixture') - dependencies { javaRestTestImplementation(testArtifact(project(xpackModule('searchable-snapshots')))) + javaRestTestImplementation project(':test:fixtures:url-fixture') } restResources { @@ -15,34 +14,6 @@ restResources { } } -apply plugin: 'elasticsearch.test.fixtures' -testFixtures.useFixture(fixture.path, 'nginx-fixture') - -def fixtureAddress = { fixtureName -> - int ephemeralPort = fixture.postProcessFixture.ext."test.fixtures.${fixtureName}.tcp.80" - assert ephemeralPort > 0 - 'http://127.0.0.1:' + ephemeralPort -} - -File repositoryDir = fixture.fsRepositoryDir as File - tasks.named("javaRestTest").configure { - dependsOn fixture.getTasks().named("postProcessFixture") - - nonInputProperties.systemProperty 'test.url.fs.repo.dir', repositoryDir.absolutePath - nonInputProperties.systemProperty 'test.url.http', "${-> fixtureAddress('nginx-fixture')}" -} - -testClusters.matching { it.name == "javaRestTest" }.configureEach { - testDistribution = 'DEFAULT' - setting 'path.repo', repositoryDir.absolutePath, IGNORE_VALUE - setting 'repositories.url.allowed_urls', { "${-> fixtureAddress('nginx-fixture')}" }, IGNORE_VALUE - - setting 'xpack.license.self_generated.type', 'trial' - - setting 'xpack.searchable.snapshot.shared_cache.size', '16MB' - setting 'xpack.searchable.snapshot.shared_cache.region_size', '256KB' - setting 'xpack.searchable_snapshots.cache_fetch_async_thread_pool.keep_alive', '0ms' - - setting 'xpack.security.enabled', 'false' + usesDefaultDistribution() } diff --git a/x-pack/plugin/searchable-snapshots/qa/url/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/URLSearchableSnapshotsIT.java b/x-pack/plugin/searchable-snapshots/qa/url/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/URLSearchableSnapshotsIT.java index b37b71cf95a31..b59dcb3a9d210 100644 --- a/x-pack/plugin/searchable-snapshots/qa/url/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/URLSearchableSnapshotsIT.java +++ b/x-pack/plugin/searchable-snapshots/qa/url/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/URLSearchableSnapshotsIT.java @@ -7,14 +7,37 @@ package org.elasticsearch.xpack.searchablesnapshots; +import fixture.url.URLFixture; + import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.fs.FsRepository; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; import static org.hamcrest.Matchers.blankOrNullString; import static org.hamcrest.Matchers.not; public class URLSearchableSnapshotsIT extends AbstractSearchableSnapshotsRestTestCase { + public static URLFixture urlFixture = new URLFixture(); + + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .setting("xpack.license.self_generated.type", "trial") + .setting("repositories.url.allowed_urls", () -> urlFixture.getAddress()) + .setting("path.repo", () -> urlFixture.getRepositoryDir()) + .setting("xpack.searchable.snapshot.shared_cache.size", "16MB") + .setting("xpack.searchable.snapshot.shared_cache.region_size", "256KB") + .setting("xpack.searchable_snapshots.cache_fetch_async_thread_pool.keep_alive", "0ms") + .setting("xpack.security.enabled", "false") + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(urlFixture).around(cluster); + @Override protected String writeRepositoryType() { return FsRepository.TYPE; @@ -22,7 +45,7 @@ protected String writeRepositoryType() { @Override protected Settings writeRepositorySettings() { - final String repoDirectory = System.getProperty("test.url.fs.repo.dir"); + final String repoDirectory = urlFixture.getRepositoryDir(); assertThat(repoDirectory, not(blankOrNullString())); return Settings.builder().put("location", repoDirectory).build(); @@ -40,9 +63,14 @@ protected String readRepositoryType() { @Override protected Settings readRepositorySettings() { - final String url = System.getProperty("test.url.http"); + final String url = urlFixture.getAddress(); assertThat(url, not(blankOrNullString())); return Settings.builder().put("url", url).build(); } + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } } diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/BaseSearchableSnapshotsIntegTestCase.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/BaseSearchableSnapshotsIntegTestCase.java index b7dc212fe12ad..ad5f57645aa84 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/BaseSearchableSnapshotsIntegTestCase.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/BaseSearchableSnapshotsIntegTestCase.java @@ -8,9 +8,12 @@ import org.apache.lucene.search.TotalHits; import org.apache.lucene.store.AlreadyClosedException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.support.ActionFilter; import org.elasticsearch.blobcache.BlobCachePlugin; import org.elasticsearch.blobcache.shared.SharedBlobCacheService; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -33,6 +36,8 @@ import org.elasticsearch.index.shard.ShardPath; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.recovery.RecoveryState; +import org.elasticsearch.indices.store.TransportNodesListShardStoreMetadata; +import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.snapshots.AbstractSnapshotIntegTestCase; @@ -85,6 +90,7 @@ protected Collection> nodePlugins() { plugins.add(BlobCachePlugin.class); plugins.add(LocalStateSearchableSnapshots.class); plugins.add(LicensedSnapshotBasedRecoveriesPlugin.class); + plugins.add(ForbiddenActionsPlugin.class); return Collections.unmodifiableList(plugins); } @@ -359,4 +365,38 @@ public boolean isLicenseEnabled() { return true; } } + + public static class ForbiddenActionsPlugin extends Plugin implements ActionPlugin { + + private ActionFilter actionFilter; + + @Override + public Collection createComponents(PluginServices services) { + final var clusterService = services.clusterService(); + actionFilter = new ActionFilter.Simple() { + @Override + protected boolean apply(String action, ActionRequest request, ActionListener listener) { + if (action.equals(TransportNodesListShardStoreMetadata.ACTION_NAME)) { + final var shardId = asInstanceOf(TransportNodesListShardStoreMetadata.Request.class, request).shardId(); + final var indexMetadata = clusterService.state().metadata().index(shardId.getIndex()); + if (indexMetadata != null) { + assertFalse(shardId.toString(), indexMetadata.isSearchableSnapshot()); + } + } + return true; + } + + @Override + public int order() { + return 0; + } + }; + return List.of(); + } + + @Override + public List getActionFilters() { + return List.of(actionFilter); + } + } } diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/FrozenSearchableSnapshotsIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/FrozenSearchableSnapshotsIntegTests.java index 5ef524f8211c1..18b4e6ed7cb31 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/FrozenSearchableSnapshotsIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/FrozenSearchableSnapshotsIntegTests.java @@ -208,12 +208,12 @@ public void testCreateAndRestorePartialSearchableSnapshot() throws Exception { for (ShardStats shardStats : indicesStatsResponse.getShards()) { StoreStats store = shardStats.getStats().getStore(); - assertThat(shardStats.getShardRouting().toString(), store.getReservedSize().getBytes(), equalTo(0L)); - assertThat(shardStats.getShardRouting().toString(), store.getSize().getBytes(), equalTo(0L)); + assertThat(shardStats.getShardRouting().toString(), store.reservedSizeInBytes(), equalTo(0L)); + assertThat(shardStats.getShardRouting().toString(), store.sizeInBytes(), equalTo(0L)); } if (indicesStatsResponse.getShards().length > 0) { - assertThat(indicesStatsResponse.getTotal().getStore().getReservedSize().getBytes(), equalTo(0L)); - assertThat(indicesStatsResponse.getTotal().getStore().getSize().getBytes(), equalTo(0L)); + assertThat(indicesStatsResponse.getTotal().getStore().reservedSizeInBytes(), equalTo(0L)); + assertThat(indicesStatsResponse.getTotal().getStore().sizeInBytes(), equalTo(0L)); } } }, "test-stats-watcher"); @@ -251,8 +251,8 @@ public void testCreateAndRestorePartialSearchableSnapshot() throws Exception { StoreStats store = shardStats.getStats().getStore(); final ShardRouting shardRouting = shardStats.getShardRouting(); - assertThat(shardRouting.toString(), store.getReservedSize().getBytes(), equalTo(0L)); - assertThat(shardRouting.toString(), store.getSize().getBytes(), equalTo(0L)); + assertThat(shardRouting.toString(), store.reservedSizeInBytes(), equalTo(0L)); + assertThat(shardRouting.toString(), store.sizeInBytes(), equalTo(0L)); // the original shard size from the snapshot final long originalSize = snapshotShards.get(shardRouting.getId()).getStats().getTotalSize(); @@ -273,11 +273,11 @@ public void testCreateAndRestorePartialSearchableSnapshot() throws Exception { final ByteBuffersDirectory inMemoryDir = (ByteBuffersDirectory) unwrappedDir; assertThat(inMemoryDir.listAll(), arrayWithSize(1)); - assertThat(shardRouting.toString(), store.getTotalDataSetSize().getBytes(), equalTo(originalSize)); + assertThat(shardRouting.toString(), store.totalDataSetSizeInBytes(), equalTo(originalSize)); } final StoreStats store = indicesStatsResponse.getTotal().getStore(); - assertThat(store.getTotalDataSetSize().getBytes(), equalTo(totalExpectedSize)); + assertThat(store.totalDataSetSizeInBytes(), equalTo(totalExpectedSize)); statsWatcherRunning.set(false); statsWatcher.join(); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/RetrySearchIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/RetrySearchIntegTests.java index c80cf3c3d62e3..e3b631ba69c8a 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/RetrySearchIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/RetrySearchIntegTests.java @@ -143,23 +143,18 @@ public void testRetryPointInTime() throws Exception { ).keepAlive(TimeValue.timeValueMinutes(2)); final String pitId = client().execute(TransportOpenPointInTimeAction.TYPE, openRequest).actionGet().getPointInTimeId(); try { - assertNoFailuresAndResponse( - prepareSearch().setIndices(indexName).setPreference(null).setPointInTime(new PointInTimeBuilder(pitId)), - resp -> { - assertThat(resp.pointInTimeId(), equalTo(pitId)); - assertHitCount(resp, docCount); - } - ); + assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), resp -> { + assertThat(resp.pointInTimeId(), equalTo(pitId)); + assertHitCount(resp, docCount); + }); final Set allocatedNodes = internalCluster().nodesInclude(indexName); for (String allocatedNode : allocatedNodes) { internalCluster().restartNode(allocatedNode); } ensureGreen(indexName); assertNoFailuresAndResponse( - prepareSearch().setIndices(indexName) - .setQuery(new RangeQueryBuilder("created_date").gte("2011-01-01").lte("2011-12-12")) + prepareSearch().setQuery(new RangeQueryBuilder("created_date").gte("2011-01-01").lte("2011-12-12")) .setSearchType(SearchType.QUERY_THEN_FETCH) - .setPreference(null) .setPreFilterShardSize(between(1, 10)) .setAllowPartialSearchResults(true) .setPointInTime(new PointInTimeBuilder(pitId)), diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java index 876ff9ebdb86f..38222f64b282b 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java @@ -498,7 +498,7 @@ private Map getMaxShardSizeByNodeInBytes(String indexName) { IndexStats indexStats = indicesStats.getIndex(indexName); Map maxShardSizeByNode = new HashMap<>(); for (ShardStats shard : indexStats.getShards()) { - long sizeInBytes = shard.getStats().getStore().getSizeInBytes(); + long sizeInBytes = shard.getStats().getStore().sizeInBytes(); if (sizeInBytes > 0) { maxShardSizeByNode.compute( shard.getShardRouting().currentNodeId(), diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsLicenseIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsLicenseIntegTests.java index 824114139a7e2..7ee81b444af46 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsLicenseIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsLicenseIntegTests.java @@ -17,7 +17,6 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.license.DeleteLicenseAction; import org.elasticsearch.license.License; import org.elasticsearch.license.LicensesMetadata; import org.elasticsearch.license.PostStartBasicAction; @@ -25,6 +24,7 @@ import org.elasticsearch.license.PostStartTrialAction; import org.elasticsearch.license.PostStartTrialRequest; import org.elasticsearch.license.PostStartTrialResponse; +import org.elasticsearch.license.TransportDeleteLicenseAction; import org.elasticsearch.protocol.xpack.license.DeleteLicenseRequest; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.xpack.core.searchablesnapshots.MountSearchableSnapshotAction; @@ -78,7 +78,7 @@ public void createAndMountSearchableSnapshot() throws Exception { assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); ensureGreen(indexName); - assertAcked(client().execute(DeleteLicenseAction.INSTANCE, new DeleteLicenseRequest()).get()); + assertAcked(client().execute(TransportDeleteLicenseAction.TYPE, new DeleteLicenseRequest()).get()); assertAcked(client().execute(PostStartBasicAction.INSTANCE, new PostStartBasicRequest()).get()); ensureClusterSizeConsistency(); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/PartiallyCachedShardAllocationIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/PartiallyCachedShardAllocationIntegTests.java index 2824aa22496a1..a55521394f548 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/PartiallyCachedShardAllocationIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/PartiallyCachedShardAllocationIntegTests.java @@ -12,7 +12,7 @@ import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplanation; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.elasticsearch.action.support.ActionTestUtils; -import org.elasticsearch.action.support.ListenableActionFuture; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.cluster.routing.RoutingNode; @@ -198,10 +198,10 @@ public void testPartialSearchableSnapshotDelaysAllocationUntilNodeCacheStatesKno final MountSearchableSnapshotRequest req = prepareMountRequest(); - final Map> cacheInfoBlocks = ConcurrentCollections.newConcurrentMap(); - final Function> cacheInfoBlockGetter = nodeName -> cacheInfoBlocks.computeIfAbsent( + final Map> cacheInfoBlocks = ConcurrentCollections.newConcurrentMap(); + final Function> cacheInfoBlockGetter = nodeName -> cacheInfoBlocks.computeIfAbsent( nodeName, - ignored -> new ListenableActionFuture<>() + ignored -> new SubscribableListener<>() ); // Unblock all the existing nodes for (final String nodeName : internalCluster().getNodeNames()) { diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotAllocator.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotAllocator.java index fbac3d339e902..ee018578ce143 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotAllocator.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotAllocator.java @@ -58,6 +58,7 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentMap; +import java.util.function.Predicate; import static java.util.stream.Collectors.toSet; import static org.elasticsearch.cluster.routing.ExpectedShardSizeEstimator.getExpectedShardSize; @@ -125,7 +126,7 @@ public void beforeAllocation(RoutingAllocation allocation) { } @Override - public void afterPrimariesBeforeReplicas(RoutingAllocation allocation) {} + public void afterPrimariesBeforeReplicas(RoutingAllocation allocation, Predicate isRelevantShardPredicate) {} @Override public void allocateUnassigned( diff --git a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java index 82a6a5fc55c13..24ece3ff99bc4 100644 --- a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java +++ b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java @@ -57,6 +57,7 @@ import java.nio.file.StandardOpenOption; import java.nio.file.attribute.PosixFileAttributeView; import java.nio.file.attribute.PosixFilePermission; +import java.security.GeneralSecurityException; import java.security.Key; import java.security.KeyPair; import java.security.KeyStore; @@ -570,6 +571,25 @@ static void writePkcs12( } }); } + + /** + * Verify that the provided certificate is validly signed by the provided CA + */ + static void verifyIssuer(Certificate certificate, CAInfo caInfo, Terminal terminal) throws UserException { + try { + certificate.verify(caInfo.certAndKey.cert.getPublicKey()); + } catch (GeneralSecurityException e) { + terminal.errorPrintln(""); + terminal.errorPrintln("* ERROR *"); + terminal.errorPrintln("Verification of generated certificate failed."); + terminal.errorPrintln("This usually occurs if the provided CA certificate does not match with the CA key."); + terminal.errorPrintln("Cause: " + e); + for (var c = e.getCause(); c != null; c = c.getCause()) { + terminal.errorPrintln(" - " + c); + } + throw new UserException(ExitCodes.CONFIG, "Certificate verification failed"); + } + } } static class SigningRequestCommand extends CertificateCommand { @@ -788,7 +808,7 @@ void generateAndWriteSignedCertificates( final boolean usePassword = super.useOutputPassword(options); fullyWriteZipFile(output, (outputStream, pemWriter) -> { for (CertificateInformation certificateInformation : certs) { - CertificateAndKey pair = generateCertificateAndKey(certificateInformation, caInfo, keySize, days); + CertificateAndKey pair = generateCertificateAndKey(certificateInformation, caInfo, keySize, days, terminal); final String dirName = certificateInformation.name.filename + "/"; ZipEntry zipEntry = new ZipEntry(dirName); @@ -836,7 +856,7 @@ void generateAndWriteSignedCertificates( } else { assert certs.size() == 1; CertificateInformation certificateInformation = certs.iterator().next(); - CertificateAndKey pair = generateCertificateAndKey(certificateInformation, caInfo, keySize, days); + CertificateAndKey pair = generateCertificateAndKey(certificateInformation, caInfo, keySize, days, terminal); fullyWriteFile( output, stream -> writePkcs12( @@ -856,7 +876,8 @@ private static CertificateAndKey generateCertificateAndKey( CertificateInformation certificateInformation, CAInfo caInfo, int keySize, - int days + int days, + Terminal terminal ) throws Exception { KeyPair keyPair = CertGenUtils.generateKeyPair(keySize); Certificate certificate; @@ -873,6 +894,7 @@ private static CertificateAndKey generateCertificateAndKey( caInfo.certAndKey.key, days ); + verifyIssuer(certificate, caInfo, terminal); } else { certificate = CertGenUtils.generateSignedCertificate( certificateInformation.name.x500Principal, @@ -940,6 +962,7 @@ private void writeCertificateAuthority(Path output, CAInfo caInfo, boolean write ); } } + } @SuppressForbidden(reason = "resolve paths against CWD for a CLI tool") diff --git a/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/CertificateToolTests.java b/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/CertificateToolTests.java index a0484de419fe7..702bfac2a3ea5 100644 --- a/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/CertificateToolTests.java +++ b/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/CertificateToolTests.java @@ -27,6 +27,7 @@ import org.bouncycastle.cert.X509CertificateHolder; import org.bouncycastle.openssl.PEMParser; import org.bouncycastle.pkcs.PKCS10CertificationRequest; +import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.cli.MockTerminal; import org.elasticsearch.cli.ProcessInfo; import org.elasticsearch.cli.Terminal; @@ -36,6 +37,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.ssl.KeyStoreUtil; import org.elasticsearch.common.ssl.PemUtils; +import org.elasticsearch.common.util.ArrayUtils; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.IOUtils; @@ -60,6 +62,7 @@ import java.io.Reader; import java.net.InetAddress; import java.net.URI; +import java.net.URISyntaxException; import java.nio.file.FileSystem; import java.nio.file.FileSystems; import java.nio.file.Files; @@ -281,8 +284,7 @@ public void testGeneratingCsr() throws Exception { assertTrue(perms.toString(), perms.contains(PosixFilePermission.OWNER_WRITE)); assertEquals(perms.toString(), 2, perms.size()); - FileSystem fileSystem = FileSystems.newFileSystem(new URI("jar:" + outputFile.toUri()), Collections.emptyMap()); - Path zipRoot = fileSystem.getPath("/"); + final Path zipRoot = getRootPathOfZip(outputFile); assertFalse(Files.exists(zipRoot.resolve("ca"))); for (CertificateInformation certInfo : certInfos) { @@ -341,8 +343,7 @@ public void testGeneratingSignedPemCertificates() throws Exception { assertTrue(perms.toString(), perms.contains(PosixFilePermission.OWNER_WRITE)); assertEquals(perms.toString(), 2, perms.size()); - FileSystem fileSystem = FileSystems.newFileSystem(new URI("jar:" + outputFile.toUri()), Collections.emptyMap()); - Path zipRoot = fileSystem.getPath("/"); + final Path zipRoot = getRootPathOfZip(outputFile); assertFalse(Files.exists(zipRoot.resolve("ca"))); @@ -460,8 +461,7 @@ public void testHandleLongPasswords() throws Exception { Certificate caCert = caKeyStore.getCertificate("ca"); assertThat(caCert, notNullValue()); - FileSystem zip = FileSystems.newFileSystem(new URI("jar:" + pemZipFile.toUri()), Collections.emptyMap()); - Path zipRoot = zip.getPath("/"); + final Path zipRoot = getRootPathOfZip(pemZipFile); final Path keyPath = zipRoot.resolve("cert/cert.key"); final PrivateKey key = PemUtils.readPrivateKey(keyPath, () -> longPassword.toCharArray()); @@ -645,7 +645,7 @@ public void testCreateCaAndMultipleInstances() throws Exception { final String node2Ip = "200.182." + randomIntBetween(1, 250) + "." + randomIntBetween(1, 250); final String node3Ip = "200.183." + randomIntBetween(1, 250) + "." + randomIntBetween(1, 250); - final String caPassword = generateCA(caFile, terminal, env); + final String caPassword = generateCA(caFile, terminal, env, false); final GenerateCertificateCommand gen1Command = new PathAwareGenerateCertificateCommand(caFile, node1File); final OptionSet gen1Options = gen1Command.getParser() @@ -716,7 +716,7 @@ public void testCreateCaAndMultipleInstances() throws Exception { node3Ip ); gen3Args.add("-self-signed"); - final GenerateCertificateCommand gen3Command = new PathAwareGenerateCertificateCommand(null, node3File); + final GenerateCertificateCommand gen3Command = new PathAwareGenerateCertificateCommand(Map.of(), node3File); final OptionSet gen3Options = gen3Command.getParser().parse(Strings.toStringArray(gen3Args)); gen3Command.execute(terminal, gen3Options, env, processInfo); @@ -773,7 +773,7 @@ public void testTrustBetweenPEMandPKCS12() throws Exception { Environment env = TestEnvironment.newEnvironment(Settings.builder().put("path.home", tempDir).build()); final Path caFile = tempDir.resolve("ca.p12"); - final String caPassword = generateCA(caFile, terminal, env); + final String caPassword = generateCA(caFile, terminal, env, false); final Path node1Pkcs12 = tempDir.resolve("node1.p12"); final Path pemZip = tempDir.resolve("pem.zip"); @@ -831,8 +831,7 @@ public void testTrustBetweenPEMandPKCS12() throws Exception { assertThat(pemZip, pathExists()); - FileSystem zip2FS = FileSystems.newFileSystem(new URI("jar:" + pemZip.toUri()), Collections.emptyMap()); - Path zip2Root = zip2FS.getPath("/"); + final Path zip2Root = getRootPathOfZip(pemZip); final Path ca2 = zip2Root.resolve("ca/ca.p12"); assertThat(ca2, not(pathExists())); @@ -861,7 +860,7 @@ public void testZipOutputFromCommandLineOptions() throws Exception { final Path zip = tempDir.resolve("pem.zip"); final AtomicBoolean isZip = new AtomicBoolean(false); - final GenerateCertificateCommand genCommand = new PathAwareGenerateCertificateCommand(null, zip) { + final GenerateCertificateCommand genCommand = new PathAwareGenerateCertificateCommand(Map.of(), zip) { @Override void generateAndWriteSignedCertificates( Path output, @@ -892,6 +891,45 @@ Collection getCertificateInformationList(Terminal termin assertThat("For command line option " + optionThatTriggersZip, isZip.get(), equalTo(true)); } + public void testErrorIfSigningCertificateAndKeyDontMatch() throws Exception { + final Path tempDir = initTempDir(); + + final var terminal = MockTerminal.create(); + final var env = TestEnvironment.newEnvironment(Settings.builder().put("path.home", tempDir).build()); + final var processInfo = new ProcessInfo(Map.of(), Map.of(), createTempDir()); + + final Path ca1zip = tempDir.resolve("ca1.zip"); + final String ca1Password = generateCA(ca1zip, terminal, env, true); + terminal.reset(); + final Path ca2zip = tempDir.resolve("ca2.zip"); + final String ca2Password = generateCA(ca2zip, terminal, env, true); + + var ca1Root = getRootPathOfZip(ca1zip); + var ca1Cert = ca1Root.resolve("ca/ca.crt"); + var ca1Key = ca1Root.resolve("ca/ca.key"); + + var ca2Root = getRootPathOfZip(ca2zip); + var ca2Key = ca2Root.resolve("ca/ca.key"); + + var p12Out = tempDir.resolve("certs.p12"); + var p12Password = randomAlphaOfLength(8); + + final var gen1Command = new PathAwareGenerateCertificateCommand(Map.of("ca-cert", ca1Cert, "ca-key", ca2Key), p12Out); + final var gen1Options = gen1Command.getParser() + .parse("--ca-cert", "", "--ca-key", "", "--ca-pass", ca2Password, "--out", "", "--pass", p12Password); + + final UserException e = expectThrows(UserException.class, () -> gen1Command.execute(terminal, gen1Options, env, processInfo)); + assertThat(e.exitCode, is(ExitCodes.CONFIG)); + assertThat(e.getMessage(), containsString("Certificate verification failed")); + assertThat(p12Out, not(pathExists())); + + final var gen2Command = new PathAwareGenerateCertificateCommand(Map.of("ca-cert", ca1Cert, "ca-key", ca1Key), p12Out); + final var gen2Options = gen2Command.getParser() + .parse("--ca-cert", "", "--ca-key", "", "--ca-pass", ca1Password, "--out", "", "--pass", p12Password); + gen2Command.execute(terminal, gen2Options, env, processInfo); + assertThat(p12Out, pathExists()); + } + private int getKeySize(Key node1Key) { assertThat(node1Key, instanceOf(RSAKey.class)); return ((RSAKey) node1Key).getModulus().bitLength(); @@ -1034,25 +1072,32 @@ private static Path resolvePath(String path) { return PathUtils.get(path).toAbsolutePath(); } - private String generateCA(Path caFile, MockTerminal terminal, Environment env) throws Exception { + private static Path getRootPathOfZip(Path pemZip) throws IOException, URISyntaxException { + FileSystem zipFS = FileSystems.newFileSystem(new URI("jar:" + pemZip.toUri()), Collections.emptyMap()); + return zipFS.getPath("/"); + } + + private String generateCA(Path caFile, MockTerminal terminal, Environment env, boolean pem) throws Exception { final int caKeySize = randomIntBetween(4, 8) * 512; final int days = randomIntBetween(7, 1500); final String caPassword = randomFrom("", randomAlphaOfLengthBetween(4, 80)); final CertificateAuthorityCommand caCommand = new PathAwareCertificateAuthorityCommand(caFile); - final OptionSet caOptions = caCommand.getParser() - .parse( - "-ca-dn", - "CN=My ElasticSearch Cluster", - "-pass", - caPassword, - "-out", - caFile.toString(), - "-keysize", - String.valueOf(caKeySize), - "-days", - String.valueOf(days) - ); + String[] args = { + "-ca-dn", + "CN=My ElasticSearch Cluster", + "-pass", + caPassword, + "-out", + caFile.toString(), + "-keysize", + String.valueOf(caKeySize), + "-days", + String.valueOf(days) }; + if (pem) { + args = ArrayUtils.append(args, "--pem"); + } + final OptionSet caOptions = caCommand.getParser().parse(args); final ProcessInfo processInfo = new ProcessInfo(Map.of(), Map.of(), createTempDir()); caCommand.execute(terminal, caOptions, env, processInfo); @@ -1091,20 +1136,26 @@ Path resolveOutputPath(Terminal terminal, OptionSet options, String defaultFilen * This class works around that by sticking with the original path objects */ private static class PathAwareGenerateCertificateCommand extends GenerateCertificateCommand { - private final Path caFile; + private final Map inputPaths; private final Path outFile; PathAwareGenerateCertificateCommand(Path caFile, Path outFile) { - this.caFile = caFile; + this(Map.of("ca", caFile), outFile); + } + + PathAwareGenerateCertificateCommand(Map inputPaths, Path outFile) { + this.inputPaths = Map.copyOf(inputPaths); this.outFile = outFile; } @Override protected Path resolvePath(OptionSet options, OptionSpec spec) { - if (spec.options().contains("ca")) { - return caFile; - } - return super.resolvePath(options, spec); + return this.inputPaths.entrySet() + .stream() + .filter(entry -> spec.options().contains(entry.getKey())) + .findFirst() + .map(Entry::getValue) + .orElseGet(() -> super.resolvePath(options, spec)); } @Override diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/AbstractRemoteClusterSecurityDlsAndFlsRestIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/AbstractRemoteClusterSecurityDlsAndFlsRestIT.java index 9f93392ad13d7..6ffa09dc1f265 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/AbstractRemoteClusterSecurityDlsAndFlsRestIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/AbstractRemoteClusterSecurityDlsAndFlsRestIT.java @@ -14,6 +14,7 @@ import org.elasticsearch.core.Strings; import org.elasticsearch.core.Tuple; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.test.rest.ObjectPath; import java.io.IOException; @@ -172,12 +173,17 @@ protected void assertSearchResponseContainsExpectedIndicesAndFields( ) { try { assertOK(searchResponse); - final var searchResult = Arrays.stream(SearchResponse.fromXContent(responseAsParser(searchResponse)).getHits().getHits()) - .collect(Collectors.toMap(SearchHit::getIndex, SearchHit::getSourceAsMap)); + var response = SearchResponseUtils.responseAsSearchResponse(searchResponse); + try { + final var searchResult = Arrays.stream(response.getHits().getHits()) + .collect(Collectors.toMap(SearchHit::getIndex, SearchHit::getSourceAsMap)); - assertThat(searchResult.keySet(), containsInAnyOrder(expectedRemoteIndices)); - for (String remoteIndex : expectedRemoteIndices) { - assertThat(searchResult.get(remoteIndex).keySet(), containsInAnyOrder(expectedFields)); + assertThat(searchResult.keySet(), containsInAnyOrder(expectedRemoteIndices)); + for (String remoteIndex : expectedRemoteIndices) { + assertThat(searchResult.get(remoteIndex).keySet(), containsInAnyOrder(expectedFields)); + } + } finally { + response.decRef(); } } catch (IOException e) { throw new UncheckedIOException(e); @@ -195,13 +201,18 @@ protected void assertSearchResponseContainsExpectedIndicesAndFields( ) { try { assertOK(searchResponse); - final var searchResult = Arrays.stream(SearchResponse.fromXContent(responseAsParser(searchResponse)).getHits().getHits()) - .collect(Collectors.toMap(SearchHit::getIndex, SearchHit::getSourceAsMap)); + var response = SearchResponseUtils.responseAsSearchResponse(searchResponse); + try { + final var searchResult = Arrays.stream(response.getHits().getHits()) + .collect(Collectors.toMap(SearchHit::getIndex, SearchHit::getSourceAsMap)); - assertThat(searchResult.keySet(), equalTo(expectedRemoteIndicesAndFields.keySet())); - for (String remoteIndex : expectedRemoteIndicesAndFields.keySet()) { - Set expectedFields = expectedRemoteIndicesAndFields.get(remoteIndex); - assertThat(searchResult.get(remoteIndex).keySet(), equalTo(expectedFields)); + assertThat(searchResult.keySet(), equalTo(expectedRemoteIndicesAndFields.keySet())); + for (String remoteIndex : expectedRemoteIndicesAndFields.keySet()) { + Set expectedFields = expectedRemoteIndicesAndFields.get(remoteIndex); + assertThat(searchResult.get(remoteIndex).keySet(), equalTo(expectedFields)); + } + } finally { + response.decRef(); } } catch (IOException e) { throw new UncheckedIOException(e); @@ -211,7 +222,7 @@ protected void assertSearchResponseContainsExpectedIndicesAndFields( protected void assertSearchResponseContainsEmptyResult(Response response) { try { assertOK(response); - SearchResponse searchResponse = SearchResponse.fromXContent(responseAsParser(response)); + SearchResponse searchResponse = SearchResponseUtils.responseAsSearchResponse(response); try { assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); } finally { diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/AbstractRemoteClusterSecurityWithMultipleRemotesRestIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/AbstractRemoteClusterSecurityWithMultipleRemotesRestIT.java index 536176ed4c833..aa65edae88506 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/AbstractRemoteClusterSecurityWithMultipleRemotesRestIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/AbstractRemoteClusterSecurityWithMultipleRemotesRestIT.java @@ -214,7 +214,10 @@ private static void searchAndExpect403(String searchPath) { static void searchAndAssertIndicesFound(String searchPath, String... expectedIndices) throws IOException { final Response response = performRequestWithRemoteSearchUser(new Request("GET", searchPath)); assertOK(response); - final SearchResponse searchResponse = SearchResponse.fromXContent(responseAsParser(response)); + final SearchResponse searchResponse; + try (var parser = responseAsParser(response)) { + searchResponse = SearchResponse.fromXContent(parser); + } try { final List actualIndices = Arrays.stream(searchResponse.getHits().getHits()) .map(SearchHit::getIndex) diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityApiKeyRestIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityApiKeyRestIT.java index 4227354561178..d103e3c50ef7e 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityApiKeyRestIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityApiKeyRestIT.java @@ -183,7 +183,10 @@ public void testCrossClusterSearchWithApiKey() throws Exception { ); final Response response = performRequestWithApiKey(searchRequest, apiKeyEncoded); assertOK(response); - final SearchResponse searchResponse = SearchResponse.fromXContent(responseAsParser(response)); + final SearchResponse searchResponse; + try (var parser = responseAsParser(response)) { + searchResponse = SearchResponse.fromXContent(parser); + } try { final List actualIndices = Arrays.stream(searchResponse.getHits().getHits()) .map(SearchHit::getIndex) diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityBwcRestIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityBwcRestIT.java index 8c01398dd2969..5c4b61537e9a5 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityBwcRestIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityBwcRestIT.java @@ -189,7 +189,10 @@ public void testBwcWithLegacyCrossClusterSearch() throws Exception { ? performRequestWithRemoteAccessUser(searchRequest) : performRequestWithApiKey(searchRequest, apiKeyEncoded); assertOK(response); - final SearchResponse searchResponse = SearchResponse.fromXContent(responseAsParser(response)); + final SearchResponse searchResponse; + try (var parser = responseAsParser(response)) { + searchResponse = SearchResponse.fromXContent(parser); + } try { final List actualIndices = Arrays.stream(searchResponse.getHits().getHits()) .map(SearchHit::getIndex) diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityCcrIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityCcrIT.java index 03489f6365dd1..d4321f63017ad 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityCcrIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityCcrIT.java @@ -276,7 +276,10 @@ private void verifyReplicatedDocuments(long numberOfDocs, String... indices) thr throw new AssertionError(e); } assertOK(response); - final SearchResponse searchResponse = SearchResponse.fromXContent(responseAsParser(response)); + final SearchResponse searchResponse; + try (var parser = responseAsParser(response)) { + searchResponse = SearchResponse.fromXContent(parser); + } try { assertThat(searchResponse.getHits().getTotalHits().value, equalTo(numberOfDocs)); assertThat( diff --git a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java index 6e78eb2fb5b83..cab0c2bff28f0 100644 --- a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java +++ b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java @@ -255,6 +255,7 @@ public class Constants { "cluster:admin/xpack/security/profile/suggest", "cluster:admin/xpack/security/profile/set_enabled", "cluster:admin/xpack/security/realm/cache/clear", + "cluster:admin/xpack/security/remote_cluster_credentials/reload", "cluster:admin/xpack/security/role/delete", "cluster:admin/xpack/security/role/get", "cluster:admin/xpack/security/role/put", @@ -516,6 +517,7 @@ public class Constants { "indices:data/read/eql", "indices:data/read/eql/async/get", "indices:data/read/esql", + "indices:data/read/esql/async/get", "indices:data/read/explain", "indices:data/read/field_caps", "indices:data/read/get", diff --git a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java index 3c3b06c84da2a..6c4aaeada74c7 100644 --- a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java +++ b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Strings; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.test.XContentTestUtils; import org.elasticsearch.test.rest.ObjectPath; @@ -22,6 +23,7 @@ import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.security.action.apikey.ApiKey; +import org.elasticsearch.xpack.core.security.action.apikey.ApiKeyTests; import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyResponse; import org.elasticsearch.xpack.core.security.action.apikey.GrantApiKeyAction; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; @@ -34,6 +36,7 @@ import java.time.Instant; import java.time.temporal.ChronoUnit; import java.util.Collection; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; @@ -436,6 +439,23 @@ public void testBulkUpdateApiKey() throws IOException { doTestAuthenticationWithApiKey(apiKeyExpectingNoop.name, apiKeyExpectingNoop.id, apiKeyExpectingNoop.encoded); } + public void testBulkUpdateExpirationTimeApiKey() throws IOException { + final EncodedApiKey apiKey1 = createApiKey("my-api-key-name", Map.of()); + final EncodedApiKey apiKey2 = createApiKey("my-other-api-key-name", Map.of()); + final var bulkUpdateApiKeyRequest = new Request("POST", "_security/api_key/_bulk_update"); + final TimeValue expiration = ApiKeyTests.randomFutureExpirationTime(); + bulkUpdateApiKeyRequest.setJsonEntity( + XContentTestUtils.convertToXContent(Map.of("ids", List.of(apiKey1.id, apiKey2.id), "expiration", expiration), XContentType.JSON) + .utf8ToString() + ); + final Response bulkUpdateApiKeyResponse = performRequestUsingRandomAuthMethod(bulkUpdateApiKeyRequest); + assertOK(bulkUpdateApiKeyResponse); + final Map response = responseAsMap(bulkUpdateApiKeyResponse); + assertEquals(List.of(apiKey1.id(), apiKey2.id()), response.get("updated")); + assertNull(response.get("errors")); + assertEquals(List.of(), response.get("noops")); + } + public void testGrantTargetCanUpdateApiKey() throws IOException { final var request = new Request("POST", "_security/api_key/grant"); request.setOptions( @@ -923,7 +943,7 @@ public void testUpdateCrossClusterApiKey() throws IOException { final ObjectPath createResponse = assertOKAndCreateObjectPath(client().performRequest(createRequest)); final String apiKeyId = createResponse.evaluate("id"); - // Update both access and metadata + // Update access, metadata and expiration final Request updateRequest1 = new Request("PUT", "/_security/cross_cluster/api_key/" + apiKeyId); updateRequest1.setJsonEntity(""" { @@ -940,7 +960,8 @@ public void testUpdateCrossClusterApiKey() throws IOException { } ] }, - "metadata": { "tag": "shared", "points": 0 } + "metadata": { "tag": "shared", "points": 0 }, + "expiration": "30d" }"""); setUserForRequest(updateRequest1, MANAGE_SECURITY_USER, END_USER_PASSWORD); final ObjectPath updateResponse1 = assertOKAndCreateObjectPath(client().performRequest(updateRequest1)); @@ -966,6 +987,7 @@ public void testUpdateCrossClusterApiKey() throws IOException { fetchResponse1.evaluate("api_keys.0.role_descriptors"), equalTo(Map.of("cross_cluster", XContentTestUtils.convertToMap(updatedRoleDescriptor1))) ); + assertThat(fetchResponse1.evaluate("api_keys.0.expiration"), notNullValue()); assertThat(fetchResponse1.evaluate("api_keys.0.access"), equalTo(XContentHelper.convertToMap(JsonXContent.jsonXContent, """ { "search": [ @@ -1465,6 +1487,40 @@ private void doTestAuthenticationWithApiKey(final String apiKeyName, final Strin assertThat(authenticate, hasEntry("api_key", Map.of("id", apiKeyId, "name", apiKeyName))); } + private static Map getRandomUpdateApiKeyRequestBody( + final Map oldMetadata, + boolean updateExpiration, + boolean updateMetadata + ) { + return getRandomUpdateApiKeyRequestBody(oldMetadata, updateExpiration, updateMetadata, List.of()); + } + + private static Map getRandomUpdateApiKeyRequestBody( + final Map oldMetadata, + boolean updateExpiration, + boolean updateMetadata, + List ids + ) { + Map updateRequestBody = new HashMap<>(); + + if (updateMetadata) { + updateRequestBody.put("metadata", Map.of("not", "returned (changed)", "foo", "bar")); + } else if (oldMetadata != null) { + updateRequestBody.put("metadata", oldMetadata); + } + + if (updateExpiration) { + updateRequestBody.put("expiration", ApiKeyTests.randomFutureExpirationTime()); + } + + if (ids.isEmpty() == false) { + updateRequestBody.put("ids", ids); + } + + return updateRequestBody; + } + + @SuppressWarnings({ "unchecked" }) private void doTestUpdateApiKey( final String apiKeyName, final String apiKeyId, @@ -1472,19 +1528,17 @@ private void doTestUpdateApiKey( final Map oldMetadata ) throws IOException { final var updateApiKeyRequest = new Request("PUT", "_security/api_key/" + apiKeyId); - final boolean updated = randomBoolean(); - final Map expectedApiKeyMetadata = updated ? Map.of("not", "returned (changed)", "foo", "bar") : oldMetadata; - final Map updateApiKeyRequestBody = expectedApiKeyMetadata == null - ? Map.of() - : Map.of("metadata", expectedApiKeyMetadata); - updateApiKeyRequest.setJsonEntity(XContentTestUtils.convertToXContent(updateApiKeyRequestBody, XContentType.JSON).utf8ToString()); + final boolean updateExpiration = randomBoolean(); + final boolean updateMetadata = randomBoolean(); + final Map updateRequestBody = getRandomUpdateApiKeyRequestBody(oldMetadata, updateExpiration, updateMetadata); + updateApiKeyRequest.setJsonEntity(XContentTestUtils.convertToXContent(updateRequestBody, XContentType.JSON).utf8ToString()); final Response updateApiKeyResponse = performRequestUsingRandomAuthMethod(updateApiKeyRequest); assertOK(updateApiKeyResponse); final Map updateApiKeyResponseMap = responseAsMap(updateApiKeyResponse); - assertEquals(updated, updateApiKeyResponseMap.get("updated")); - expectMetadata(apiKeyId, expectedApiKeyMetadata == null ? Map.of() : expectedApiKeyMetadata); + assertEquals(updateMetadata || updateExpiration, updateApiKeyResponseMap.get("updated")); + expectMetadata(apiKeyId, (Map) updateRequestBody.get("metadata")); // validate authentication still works after update doTestAuthenticationWithApiKey(apiKeyName, apiKeyId, apiKeyEncoded); } @@ -1497,28 +1551,29 @@ private void doTestUpdateApiKeyUsingBulkAction( final Map oldMetadata ) throws IOException { final var bulkUpdateApiKeyRequest = new Request("POST", "_security/api_key/_bulk_update"); - final boolean updated = randomBoolean(); - final Map expectedApiKeyMetadata = updated ? Map.of("not", "returned (changed)", "foo", "bar") : oldMetadata; - final Map bulkUpdateApiKeyRequestBody = expectedApiKeyMetadata == null - ? Map.of("ids", List.of(apiKeyId)) - : Map.of("ids", List.of(apiKeyId), "metadata", expectedApiKeyMetadata); - bulkUpdateApiKeyRequest.setJsonEntity( - XContentTestUtils.convertToXContent(bulkUpdateApiKeyRequestBody, XContentType.JSON).utf8ToString() + boolean updateMetadata = randomBoolean(); + boolean updateExpiration = randomBoolean(); + Map updateRequestBody = getRandomUpdateApiKeyRequestBody( + oldMetadata, + updateExpiration, + updateMetadata, + List.of(apiKeyId) ); + bulkUpdateApiKeyRequest.setJsonEntity(XContentTestUtils.convertToXContent(updateRequestBody, XContentType.JSON).utf8ToString()); final Response bulkUpdateApiKeyResponse = performRequestUsingRandomAuthMethod(bulkUpdateApiKeyRequest); assertOK(bulkUpdateApiKeyResponse); final Map bulkUpdateApiKeyResponseMap = responseAsMap(bulkUpdateApiKeyResponse); assertThat(bulkUpdateApiKeyResponseMap, not(hasKey("errors"))); - if (updated) { + if (updateMetadata || updateExpiration) { assertThat((List) bulkUpdateApiKeyResponseMap.get("noops"), empty()); assertThat((List) bulkUpdateApiKeyResponseMap.get("updated"), contains(apiKeyId)); } else { assertThat((List) bulkUpdateApiKeyResponseMap.get("updated"), empty()); assertThat((List) bulkUpdateApiKeyResponseMap.get("noops"), contains(apiKeyId)); } - expectMetadata(apiKeyId, expectedApiKeyMetadata == null ? Map.of() : expectedApiKeyMetadata); + expectMetadata(apiKeyId, (Map) updateRequestBody.get("metadata")); // validate authentication still works after update doTestAuthenticationWithApiKey(apiKeyName, apiKeyId, apiKeyEncoded); } @@ -1604,7 +1659,6 @@ private String headerFromRandomAuthMethod(final String username, final SecureStr } } - @SuppressWarnings({ "unchecked" }) private void expectMetadata(final String apiKeyId, final Map expectedMetadata) throws IOException { final var request = new Request("GET", "_security/api_key/"); request.addParameter("id", apiKeyId); @@ -1613,7 +1667,8 @@ private void expectMetadata(final String apiKeyId, final Map exp try (XContentParser parser = responseAsParser(response)) { final var apiKeyResponse = GetApiKeyResponse.fromXContent(parser); assertThat(apiKeyResponse.getApiKeyInfos().length, equalTo(1)); - assertThat(apiKeyResponse.getApiKeyInfos()[0].getMetadata(), equalTo(expectedMetadata)); + // ApiKey metadata is set to empty Map if null + assertThat(apiKeyResponse.getApiKeyInfos()[0].getMetadata(), equalTo(expectedMetadata == null ? Map.of() : expectedMetadata)); } } diff --git a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/crossclusteraccess/CrossClusterAccessHeadersForCcsRestIT.java b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/crossclusteraccess/CrossClusterAccessHeadersForCcsRestIT.java index 97b52a699749e..875026c02754f 100644 --- a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/crossclusteraccess/CrossClusterAccessHeadersForCcsRestIT.java +++ b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/crossclusteraccess/CrossClusterAccessHeadersForCcsRestIT.java @@ -35,10 +35,8 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Tuple; -import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.ObjectPath; import org.elasticsearch.test.transport.MockTransportService; @@ -1152,15 +1150,13 @@ private static MockTransportService startTransport( ); channel.sendResponse( new SearchResponse( - new InternalSearchResponse( - new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), Float.NaN), - InternalAggregations.EMPTY, - null, - null, - false, - null, - 1 - ), + SearchHits.empty(new TotalHits(0, TotalHits.Relation.EQUAL_TO), Float.NaN), + InternalAggregations.EMPTY, + null, + false, + null, + null, + 1, null, 1, 1, diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamLifecycleDownsamplingSecurityIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamLifecycleDownsamplingSecurityIT.java index 9a68ac06d4d19..57a2272e38ce9 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamLifecycleDownsamplingSecurityIT.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamLifecycleDownsamplingSecurityIT.java @@ -13,7 +13,7 @@ import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverAction; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; @@ -335,7 +335,7 @@ private void putComposableIndexTemplate( @Nullable Map metadata, @Nullable DataStreamLifecycle lifecycle ) { - PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request(id); + TransportPutComposableIndexTemplateAction.Request request = new TransportPutComposableIndexTemplateAction.Request(id); request.indexTemplate( ComposableIndexTemplate.builder() .indexPatterns(patterns) @@ -344,7 +344,7 @@ private void putComposableIndexTemplate( .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) .build() ); - client.execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); + client.execute(TransportPutComposableIndexTemplateAction.TYPE, request).actionGet(); } private void indexDocuments(Client client, String dataStreamName, int docCount, long startTime) { diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamLifecycleServiceRuntimeSecurityIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamLifecycleServiceRuntimeSecurityIT.java index f5349cac99ed7..96b5a4445ed7e 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamLifecycleServiceRuntimeSecurityIT.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamLifecycleServiceRuntimeSecurityIT.java @@ -9,7 +9,7 @@ import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; @@ -216,7 +216,7 @@ private static void putComposableIndexTemplate( @Nullable Map metadata, @Nullable DataStreamLifecycle lifecycle ) throws IOException { - PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request(id); + TransportPutComposableIndexTemplateAction.Request request = new TransportPutComposableIndexTemplateAction.Request(id); request.indexTemplate( ComposableIndexTemplate.builder() .indexPatterns(patterns) @@ -225,7 +225,7 @@ private static void putComposableIndexTemplate( .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) .build() ); - client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); + client().execute(TransportPutComposableIndexTemplateAction.TYPE, request).actionGet(); } private static void indexDoc(String dataStream) { diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamSecurityIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamSecurityIT.java index 1e67ae572e4ff..5efa2aa46c7bc 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamSecurityIT.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamSecurityIT.java @@ -10,7 +10,7 @@ import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.datastreams.CreateDataStreamAction; import org.elasticsearch.action.datastreams.ModifyDataStreamsAction; import org.elasticsearch.action.search.SearchRequest; @@ -58,14 +58,14 @@ public void testRemoveGhostReference() throws Exception { ); final var client = client().filterWithHeader(headers); - var putTemplateRequest = new PutComposableIndexTemplateAction.Request("id"); + var putTemplateRequest = new TransportPutComposableIndexTemplateAction.Request("id"); putTemplateRequest.indexTemplate( ComposableIndexTemplate.builder() .indexPatterns(List.of("logs-*")) .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) .build() ); - assertAcked(client.execute(PutComposableIndexTemplateAction.INSTANCE, putTemplateRequest).actionGet()); + assertAcked(client.execute(TransportPutComposableIndexTemplateAction.TYPE, putTemplateRequest).actionGet()); String dataStreamName = "logs-es"; var request = new CreateDataStreamAction.Request(dataStreamName); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/IndicesPermissionsWithAliasesWildcardsAndRegexsTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/IndicesPermissionsWithAliasesWildcardsAndRegexsTests.java index 0566784e28153..0fdb494ce500a 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/IndicesPermissionsWithAliasesWildcardsAndRegexsTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/IndicesPermissionsWithAliasesWildcardsAndRegexsTests.java @@ -8,7 +8,7 @@ import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.datastreams.CreateDataStreamAction; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; @@ -281,7 +281,7 @@ public void testSearchResolveDataStreams() throws Exception { } private void putComposableIndexTemplate(String id, List patterns) throws IOException { - PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request(id); + TransportPutComposableIndexTemplateAction.Request request = new TransportPutComposableIndexTemplateAction.Request(id); request.indexTemplate( ComposableIndexTemplate.builder() .indexPatterns(patterns) @@ -289,6 +289,6 @@ private void putComposableIndexTemplate(String id, List patterns) throws .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) .build() ); - client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); + client().execute(TransportPutComposableIndexTemplateAction.TYPE, request).actionGet(); } } diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/MultipleIndicesPermissionsTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/MultipleIndicesPermissionsTests.java index 08fb0c79a076c..af54f71779f08 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/MultipleIndicesPermissionsTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/MultipleIndicesPermissionsTests.java @@ -12,7 +12,9 @@ import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; +import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.client.Request; @@ -33,6 +35,7 @@ import org.junit.Before; import java.util.Collections; +import java.util.concurrent.TimeUnit; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; @@ -204,11 +207,10 @@ public void testMonitorRestrictedWildcards() throws Exception { assertThat(getSettingsResponse.getIndexToSettings().containsKey("foobar"), is(true)); assertThat(getSettingsResponse.getIndexToSettings().containsKey("foobarfoo"), is(true)); - final IndicesShardStoresResponse indicesShardsStoresResponse = client.admin() - .indices() - .prepareShardStores(randomFrom("*", "_all", "foo*")) - .setShardStatuses("all") - .get(); + final IndicesShardStoresResponse indicesShardsStoresResponse = client.execute( + TransportIndicesShardStoresAction.TYPE, + new IndicesShardStoresRequest(randomFrom("*", "_all", "foo*")).shardStatuses("all") + ).actionGet(10, TimeUnit.SECONDS); assertThat(indicesShardsStoresResponse.getStoreStatuses().size(), is(3)); assertThat(indicesShardsStoresResponse.getStoreStatuses().containsKey("foo"), is(true)); assertThat(indicesShardsStoresResponse.getStoreStatuses().containsKey("foobar"), is(true)); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/PermissionPrecedenceTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/PermissionPrecedenceTests.java index 789d324d2fa68..f80c232e40f73 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/PermissionPrecedenceTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/PermissionPrecedenceTests.java @@ -8,7 +8,7 @@ import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesAction; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; -import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutIndexTemplateAction; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexTemplateMetadata; @@ -105,7 +105,7 @@ public void testDifferentCombinationsOfIndices() throws Exception { .indices() .preparePutTemplate("template1") .setPatterns(Collections.singletonList("test_*"))::get, - PutIndexTemplateAction.NAME, + TransportPutIndexTemplateAction.TYPE.name(), "user" ); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/ReloadRemoteClusterCredentialsIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/ReloadRemoteClusterCredentialsIT.java new file mode 100644 index 0000000000000..7d91f8994c20a --- /dev/null +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/ReloadRemoteClusterCredentialsIT.java @@ -0,0 +1,317 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security; + +import org.apache.lucene.search.TotalHits; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsRequest; +import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsResponse; +import org.elasticsearch.action.admin.cluster.node.reload.TransportNodesReloadSecureSettingsAction; +import org.elasticsearch.action.admin.cluster.remote.RemoteClusterNodesAction; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchShardsRequest; +import org.elasticsearch.action.search.SearchShardsResponse; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.action.search.TransportSearchAction; +import org.elasticsearch.action.search.TransportSearchShardsAction; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.VersionInformation; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.KeyStoreWrapper; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.env.Environment; +import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.test.SecuritySingleNodeTestCase; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.RemoteClusterCredentialsManager; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.security.authc.ApiKeyService; +import org.elasticsearch.xpack.security.authc.CrossClusterAccessHeaders; +import org.junit.BeforeClass; + +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + +public class ReloadRemoteClusterCredentialsIT extends SecuritySingleNodeTestCase { + private static final String CLUSTER_ALIAS = "my_remote_cluster"; + + @BeforeClass + public static void disableInFips() { + assumeFalse( + "Cannot run in FIPS mode since the keystore will be password protected and sending a password in the reload" + + "settings api call, require TLS to be configured for the transport layer", + inFipsJvm() + ); + } + + @Override + public String configRoles() { + return org.elasticsearch.core.Strings.format(""" + user: + cluster: [ "ALL" ] + indices: + - names: '*' + privileges: [ "ALL" ] + remote_indices: + - names: '*' + privileges: [ "ALL" ] + clusters: ["*"] + """); + } + + @Override + public void tearDown() throws Exception { + try { + clearRemoteCluster(); + super.tearDown(); + } finally { + ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); + } + } + + private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); + + public void testReloadRemoteClusterCredentials() throws Exception { + final String credentials = randomAlphaOfLength(42); + writeCredentialsToKeyStore(credentials); + final RemoteClusterCredentialsManager clusterCredentialsManager = getInstanceFromNode(TransportService.class) + .getRemoteClusterService() + .getRemoteClusterCredentialsManager(); + // Until we reload, credentials written to keystore are not loaded into the credentials manager + assertThat(clusterCredentialsManager.hasCredentials(CLUSTER_ALIAS), is(false)); + reloadSecureSettings(); + assertThat(clusterCredentialsManager.resolveCredentials(CLUSTER_ALIAS), equalTo(credentials)); + + // Check that credentials get used for a remote connection, once we configure it + final BlockingQueue> capturedHeaders = ConcurrentCollections.newBlockingQueue(); + try (MockTransportService remoteTransport = startTransport("remoteNodeA", threadPool, capturedHeaders)) { + final TransportAddress remoteAddress = remoteTransport.getOriginalTransport() + .profileBoundAddresses() + .get("_remote_cluster") + .publishAddress(); + + configureRemoteCluster(remoteAddress); + + // Run search to trigger header capturing on the receiving side + client().search(new SearchRequest(CLUSTER_ALIAS + ":index-a")).get().decRef(); + + assertHeadersContainCredentialsThenClear(credentials, capturedHeaders); + + // Update credentials and ensure they are used + final String updatedCredentials = randomAlphaOfLength(41); + writeCredentialsToKeyStore(updatedCredentials); + reloadSecureSettings(); + + client().search(new SearchRequest(CLUSTER_ALIAS + ":index-a")).get().decRef(); + + assertHeadersContainCredentialsThenClear(updatedCredentials, capturedHeaders); + } + } + + private void assertHeadersContainCredentialsThenClear(String credentials, BlockingQueue> capturedHeaders) { + assertThat(capturedHeaders, is(not(empty()))); + for (Map actualHeaders : capturedHeaders) { + assertThat(actualHeaders, hasKey(CrossClusterAccessHeaders.CROSS_CLUSTER_ACCESS_CREDENTIALS_HEADER_KEY)); + assertThat( + actualHeaders.get(CrossClusterAccessHeaders.CROSS_CLUSTER_ACCESS_CREDENTIALS_HEADER_KEY), + equalTo(ApiKeyService.withApiKeyPrefix(credentials)) + ); + } + capturedHeaders.clear(); + assertThat(capturedHeaders, is(empty())); + } + + private void clearRemoteCluster() throws InterruptedException, ExecutionException { + final var builder = Settings.builder() + .putNull("cluster.remote." + CLUSTER_ALIAS + ".mode") + .putNull("cluster.remote." + CLUSTER_ALIAS + ".seeds") + .putNull("cluster.remote." + CLUSTER_ALIAS + ".proxy_address"); + clusterAdmin().updateSettings(new ClusterUpdateSettingsRequest().persistentSettings(builder)).get(); + } + + @Override + protected Settings nodeSettings() { + return Settings.builder().put(super.nodeSettings()).put("xpack.security.remote_cluster_client.ssl.enabled", false).build(); + } + + private void configureRemoteCluster(TransportAddress remoteAddress) throws InterruptedException, ExecutionException { + final Settings.Builder builder = Settings.builder(); + if (randomBoolean()) { + builder.put("cluster.remote." + CLUSTER_ALIAS + ".mode", "sniff") + .put("cluster.remote." + CLUSTER_ALIAS + ".seeds", remoteAddress.toString()) + .putNull("cluster.remote." + CLUSTER_ALIAS + ".proxy_address"); + } else { + builder.put("cluster.remote." + CLUSTER_ALIAS + ".mode", "proxy") + .put("cluster.remote." + CLUSTER_ALIAS + ".proxy_address", remoteAddress.toString()) + .putNull("cluster.remote." + CLUSTER_ALIAS + ".seeds"); + } + clusterAdmin().updateSettings(new ClusterUpdateSettingsRequest().persistentSettings(builder)).get(); + } + + private void writeCredentialsToKeyStore(String credentials) throws Exception { + final Environment environment = getInstanceFromNode(Environment.class); + final KeyStoreWrapper keyStoreWrapper = KeyStoreWrapper.create(); + keyStoreWrapper.setString("cluster.remote." + CLUSTER_ALIAS + ".credentials", credentials.toCharArray()); + keyStoreWrapper.save(environment.configFile(), new char[0], false); + } + + public static MockTransportService startTransport( + final String nodeName, + final ThreadPool threadPool, + final BlockingQueue> capturedHeaders + ) { + boolean success = false; + final Settings settings = Settings.builder() + .put("node.name", nodeName) + .put("remote_cluster_server.enabled", "true") + .put("remote_cluster.port", "0") + .put("xpack.security.remote_cluster_server.ssl.enabled", "false") + .build(); + final MockTransportService service = MockTransportService.createNewService( + settings, + VersionInformation.CURRENT, + TransportVersion.current(), + threadPool, + null + ); + try { + service.registerRequestHandler( + ClusterStateAction.NAME, + EsExecutors.DIRECT_EXECUTOR_SERVICE, + ClusterStateRequest::new, + (request, channel, task) -> { + capturedHeaders.add(Map.copyOf(threadPool.getThreadContext().getHeaders())); + channel.sendResponse( + new ClusterStateResponse(ClusterName.DEFAULT, ClusterState.builder(ClusterName.DEFAULT).build(), false) + ); + } + ); + service.registerRequestHandler( + RemoteClusterNodesAction.TYPE.name(), + EsExecutors.DIRECT_EXECUTOR_SERVICE, + RemoteClusterNodesAction.Request::new, + (request, channel, task) -> { + capturedHeaders.add(Map.copyOf(threadPool.getThreadContext().getHeaders())); + channel.sendResponse(new RemoteClusterNodesAction.Response(List.of())); + } + ); + service.registerRequestHandler( + TransportSearchShardsAction.TYPE.name(), + EsExecutors.DIRECT_EXECUTOR_SERVICE, + SearchShardsRequest::new, + (request, channel, task) -> { + capturedHeaders.add(Map.copyOf(threadPool.getThreadContext().getHeaders())); + channel.sendResponse(new SearchShardsResponse(List.of(), List.of(), Collections.emptyMap())); + } + ); + service.registerRequestHandler( + TransportSearchAction.TYPE.name(), + EsExecutors.DIRECT_EXECUTOR_SERVICE, + SearchRequest::new, + (request, channel, task) -> { + capturedHeaders.add(Map.copyOf(threadPool.getThreadContext().getHeaders())); + channel.sendResponse( + new SearchResponse( + SearchHits.empty(new TotalHits(0, TotalHits.Relation.EQUAL_TO), Float.NaN), + InternalAggregations.EMPTY, + null, + false, + null, + null, + 1, + null, + 1, + 1, + 0, + 100, + ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY + ) + ); + } + ); + service.start(); + service.acceptIncomingRequests(); + success = true; + return service; + } finally { + if (success == false) { + service.close(); + } + } + } + + private void reloadSecureSettings() { + final AtomicReference reloadSettingsError = new AtomicReference<>(); + final CountDownLatch latch = new CountDownLatch(1); + final SecureString emptyPassword = randomBoolean() ? new SecureString(new char[0]) : null; + + final var request = new NodesReloadSecureSettingsRequest(); + try { + request.nodesIds(Strings.EMPTY_ARRAY); + request.setSecureStorePassword(emptyPassword); + client().execute(TransportNodesReloadSecureSettingsAction.TYPE, request, new ActionListener<>() { + @Override + public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { + try { + assertThat(nodesReloadResponse, notNullValue()); + final Map nodesMap = nodesReloadResponse.getNodesMap(); + assertThat(nodesMap.size(), equalTo(1)); + for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) { + assertThat(nodeResponse.reloadException(), nullValue()); + } + } catch (final AssertionError e) { + reloadSettingsError.set(e); + } finally { + latch.countDown(); + } + } + + @Override + public void onFailure(Exception e) { + reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + latch.countDown(); + } + }); + } finally { + request.decRef(); + } + safeAwait(latch); + if (reloadSettingsError.get() != null) { + throw reloadSettingsError.get(); + } + } +} diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/ScrollHelperIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/ScrollHelperIntegTests.java index 7fc4c1520f9c6..e481cf70b9afe 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/ScrollHelperIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/ScrollHelperIntegTests.java @@ -20,7 +20,6 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.security.ScrollHelper; @@ -83,22 +82,28 @@ public void testFetchAllByEntityWithBrokenScroll() { String scrollId = randomAlphaOfLength(5); SearchHit[] hits = new SearchHit[] { new SearchHit(1), new SearchHit(2) }; - InternalSearchResponse internalResponse = new InternalSearchResponse( - new SearchHits(hits, new TotalHits(3, TotalHits.Relation.EQUAL_TO), 1), - null, - null, - null, - false, - false, - 1 - ); Answer returnResponse = invocation -> { @SuppressWarnings("unchecked") ActionListener listener = (ActionListener) invocation.getArguments()[1]; ActionListener.respondAndRelease( listener, - new SearchResponse(internalResponse, scrollId, 1, 1, 0, 0, ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY) + new SearchResponse( + new SearchHits(hits, new TotalHits(3, TotalHits.Relation.EQUAL_TO), 1), + null, + null, + false, + false, + null, + 1, + scrollId, + 1, + 1, + 0, + 0, + ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY + ) ); return null; }; diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java index 72a6b6049932c..1329158f57d4d 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java @@ -1959,7 +1959,7 @@ public void testUpdateApiKeysForSingleKey() throws Exception { null ) ); - final var request = new UpdateApiKeyRequest(apiKeyId, newRoleDescriptors, ApiKeyTests.randomMetadata()); + final var request = new UpdateApiKeyRequest(apiKeyId, newRoleDescriptors, ApiKeyTests.randomMetadata(), null); final UpdateApiKeyResponse response = updateSingleApiKeyMaybeUsingBulkAction(TEST_USER_NAME, request); @@ -2030,7 +2030,7 @@ public void testBulkUpdateApiKeysForMultipleKeys() throws ExecutionException, In BulkUpdateApiKeyResponse response = executeBulkUpdateApiKey( TEST_USER_NAME, - new BulkUpdateApiKeyRequest(apiKeyIds, newRoleDescriptors, newMetadata) + new BulkUpdateApiKeyRequest(apiKeyIds, newRoleDescriptors, newMetadata, ApiKeyTests.randomFutureExpirationTime()) ); assertNotNull(response); @@ -2070,7 +2070,7 @@ public void testBulkUpdateApiKeysForMultipleKeys() throws ExecutionException, In () -> randomValueOtherThanMany(apiKeyIds::contains, () -> randomAlphaOfLength(10)) ); newIds.addAll(notFoundIds); - final BulkUpdateApiKeyRequest request = new BulkUpdateApiKeyRequest(shuffledList(newIds), newRoleDescriptors, newMetadata); + final BulkUpdateApiKeyRequest request = new BulkUpdateApiKeyRequest(shuffledList(newIds), newRoleDescriptors, newMetadata, null); response = executeBulkUpdateApiKey(TEST_USER_NAME, request); @@ -2100,7 +2100,8 @@ public void testBulkUpdateApiKeysForMultipleKeys() throws ExecutionException, In final BulkUpdateApiKeyRequest requestWithSomeErrors = new BulkUpdateApiKeyRequest( shuffledList(apiKeyIds), randomValueOtherThan(null, this::randomRoleDescriptors), - randomValueOtherThan(null, ApiKeyTests::randomMetadata) + randomValueOtherThan(null, ApiKeyTests::randomMetadata), + ApiKeyTests.randomFutureExpirationTime() ); response = executeBulkUpdateApiKey(TEST_USER_NAME, requestWithSomeErrors); @@ -2124,7 +2125,7 @@ public void testBulkUpdateApiKeysWithDuplicates() throws ExecutionException, Int BulkUpdateApiKeyResponse response = executeBulkUpdateApiKey( TEST_USER_NAME, - new BulkUpdateApiKeyRequest(idsWithDuplicates, newRoleDescriptors, newMetadata) + new BulkUpdateApiKeyRequest(idsWithDuplicates, newRoleDescriptors, newMetadata, ApiKeyTests.randomFutureExpirationTime()) ); assertNotNull(response); @@ -2142,7 +2143,12 @@ public void testBulkUpdateApiKeysWithDuplicates() throws ExecutionException, Int response = executeBulkUpdateApiKey( TEST_USER_NAME, - new BulkUpdateApiKeyRequest(notFoundIdsWithDuplicates, newRoleDescriptors, newMetadata) + new BulkUpdateApiKeyRequest( + notFoundIdsWithDuplicates, + newRoleDescriptors, + newMetadata, + ApiKeyTests.randomFutureExpirationTime() + ) ); assertNotNull(response); @@ -2317,7 +2323,12 @@ public void testUpdateApiKeysNotFoundScenarios() throws Exception { final Tuple> createdApiKey = createApiKey(TEST_USER_NAME, null); final var apiKeyId = createdApiKey.v1().getId(); final var expectedRoleDescriptor = new RoleDescriptor(randomAlphaOfLength(10), new String[] { "all" }, null, null); - final var request = new UpdateApiKeyRequest(apiKeyId, List.of(expectedRoleDescriptor), ApiKeyTests.randomMetadata()); + final var request = new UpdateApiKeyRequest( + apiKeyId, + List.of(expectedRoleDescriptor), + ApiKeyTests.randomMetadata(), + ApiKeyTests.randomFutureExpirationTime() + ); // Validate can update own API key final UpdateApiKeyResponse response = updateSingleApiKeyMaybeUsingBulkAction(TEST_USER_NAME, request); @@ -2326,12 +2337,24 @@ public void testUpdateApiKeysNotFoundScenarios() throws Exception { // Test not found exception on non-existent API key final var otherApiKeyId = randomValueOtherThan(apiKeyId, () -> randomAlphaOfLength(20)); - doTestUpdateApiKeysNotFound(new UpdateApiKeyRequest(otherApiKeyId, request.getRoleDescriptors(), request.getMetadata())); + doTestUpdateApiKeysNotFound( + new UpdateApiKeyRequest( + otherApiKeyId, + request.getRoleDescriptors(), + request.getMetadata(), + ApiKeyTests.randomFutureExpirationTime() + ) + ); // Test not found exception on other user's API key final Tuple> otherUsersApiKey = createApiKey("user_with_manage_api_key_role", null); doTestUpdateApiKeysNotFound( - new UpdateApiKeyRequest(otherUsersApiKey.v1().getId(), request.getRoleDescriptors(), request.getMetadata()) + new UpdateApiKeyRequest( + otherUsersApiKey.v1().getId(), + request.getRoleDescriptors(), + request.getMetadata(), + ApiKeyTests.randomFutureExpirationTime() + ) ); // Test not found exception on API key of user with the same username but from a different realm @@ -2351,7 +2374,12 @@ public void testUpdateApiKeysNotFoundScenarios() throws Exception { "all" ).v1().get(0); doTestUpdateApiKeysNotFound( - new UpdateApiKeyRequest(apiKeyForNativeRealmUser.getId(), request.getRoleDescriptors(), request.getMetadata()) + new UpdateApiKeyRequest( + apiKeyForNativeRealmUser.getId(), + request.getRoleDescriptors(), + request.getMetadata(), + ApiKeyTests.randomFutureExpirationTime() + ) ); } @@ -2364,7 +2392,12 @@ public void testInvalidUpdateApiKeysScenarios() throws ExecutionException, Inter final var apiKeyId = createdApiKey.getId(); final var roleDescriptor = new RoleDescriptor(randomAlphaOfLength(10), new String[] { "manage_own_api_key" }, null, null); - final var request = new UpdateApiKeyRequest(apiKeyId, List.of(roleDescriptor), ApiKeyTests.randomMetadata()); + final var request = new UpdateApiKeyRequest( + apiKeyId, + List.of(roleDescriptor), + ApiKeyTests.randomMetadata(), + ApiKeyTests.randomFutureExpirationTime() + ); final PlainActionFuture updateListener = new PlainActionFuture<>(); client().filterWithHeader( Collections.singletonMap( @@ -2465,7 +2498,8 @@ public void testUpdateApiKeysNoopScenarios() throws Exception { List.of(new RoleDescriptor(randomAlphaOfLength(10), new String[] { "all" }, null, null)), // Ensure not `null` to set metadata since we use the initialRequest further down in the test to ensure that // metadata updates are non-noops - randomValueOtherThanMany(Objects::isNull, ApiKeyTests::randomMetadata) + randomValueOtherThanMany(Objects::isNull, ApiKeyTests::randomMetadata), + null // Expiration is relative current time, so must be null to cause noop ); UpdateApiKeyResponse response = updateSingleApiKeyMaybeUsingBulkAction(TEST_USER_NAME, initialRequest); assertNotNull(response); @@ -2501,14 +2535,17 @@ public void testUpdateApiKeysNoopScenarios() throws Exception { () -> RoleDescriptorTests.randomRoleDescriptor(false) ) ); - response = updateSingleApiKeyMaybeUsingBulkAction(TEST_USER_NAME, new UpdateApiKeyRequest(apiKeyId, newRoleDescriptors, null)); + response = updateSingleApiKeyMaybeUsingBulkAction( + TEST_USER_NAME, + new UpdateApiKeyRequest(apiKeyId, newRoleDescriptors, null, null) + ); assertNotNull(response); assertTrue(response.isUpdated()); // Update with re-ordered role descriptors is a noop response = updateSingleApiKeyMaybeUsingBulkAction( TEST_USER_NAME, - new UpdateApiKeyRequest(apiKeyId, List.of(newRoleDescriptors.get(1), newRoleDescriptors.get(0)), null) + new UpdateApiKeyRequest(apiKeyId, List.of(newRoleDescriptors.get(1), newRoleDescriptors.get(0)), null, null) ); assertNotNull(response); assertFalse(response.isUpdated()); @@ -2519,7 +2556,8 @@ public void testUpdateApiKeysNoopScenarios() throws Exception { new UpdateApiKeyRequest( apiKeyId, null, - randomValueOtherThanMany(md -> md == null || md.equals(initialRequest.getMetadata()), ApiKeyTests::randomMetadata) + randomValueOtherThanMany(md -> md == null || md.equals(initialRequest.getMetadata()), ApiKeyTests::randomMetadata), + null ) ); assertNotNull(response); @@ -2677,7 +2715,8 @@ public void testUpdateApiKeysClearsApiKeyDocCache() throws Exception { apiKey1.v1(), List.of(), // Set metadata to ensure update - Map.of(randomAlphaOfLength(5), randomAlphaOfLength(10)) + Map.of(randomAlphaOfLength(5), randomAlphaOfLength(10)), + ApiKeyTests.randomFutureExpirationTime() ) ); @@ -3251,7 +3290,12 @@ private UpdateApiKeyResponse updateSingleApiKeyMaybeUsingBulkAction(final String if (useBulkAction) { final BulkUpdateApiKeyResponse response = executeBulkUpdateApiKey( username, - new BulkUpdateApiKeyRequest(List.of(request.getId()), request.getRoleDescriptors(), request.getMetadata()) + new BulkUpdateApiKeyRequest( + List.of(request.getId()), + request.getRoleDescriptors(), + request.getMetadata(), + request.getExpiration() + ) ); return toUpdateApiKeyResponse(request.getId(), response); } else { diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java index 19d29ef251dd1..faa85150dca31 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java @@ -44,8 +44,10 @@ import java.time.temporal.ChronoUnit; import java.util.ArrayList; import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -635,11 +637,11 @@ public void testRefreshingMultipleTimesWithinWindowSucceeds() throws Exception { } } logger.info("received access token [{}] and refresh token [{}]", result.accessToken(), result.getRefreshToken()); - completedLatch.countDown(); } catch (IOException e) { failed.set(true); - completedLatch.countDown(); logger.error("caught exception", e); + } finally { + completedLatch.countDown(); } })); } @@ -655,7 +657,9 @@ public void testRefreshingMultipleTimesWithinWindowSucceeds() throws Exception { assertThat(failed.get(), equalTo(false)); // Assert that we only ever got one token/refresh_token pair synchronized (tokens) { - assertThat((int) tokens.stream().distinct().count(), equalTo(1)); + Set uniqueTokens = new HashSet<>(tokens); + logger.info("Unique tokens received from refreshToken call [{}]", uniqueTokens); + assertThat(uniqueTokens.size(), equalTo(1)); } } diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/apikey/ApiKeySingleNodeTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/apikey/ApiKeySingleNodeTests.java index 91884086af959..33cd3de9e0685 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/apikey/ApiKeySingleNodeTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/apikey/ApiKeySingleNodeTests.java @@ -37,6 +37,7 @@ import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.security.action.Grant; import org.elasticsearch.xpack.core.security.action.apikey.ApiKey; +import org.elasticsearch.xpack.core.security.action.apikey.ApiKeyTests; import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequest; import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequestBuilder; @@ -706,7 +707,13 @@ public void testUpdateCrossClusterApiKey() throws IOException { updateMetadata = null; } - final var updateApiKeyRequest = new UpdateCrossClusterApiKeyRequest(apiKeyId, roleDescriptorBuilder, updateMetadata); + final boolean shouldUpdateExpiration = randomBoolean(); + TimeValue expiration = null; + if (shouldUpdateExpiration) { + ApiKeyTests.randomFutureExpirationTime(); + } + + final var updateApiKeyRequest = new UpdateCrossClusterApiKeyRequest(apiKeyId, roleDescriptorBuilder, updateMetadata, expiration); final UpdateApiKeyResponse updateApiKeyResponse = client().execute(UpdateCrossClusterApiKeyAction.INSTANCE, updateApiKeyRequest) .actionGet(); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/pki/PkiOptionalClientAuthTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/pki/PkiOptionalClientAuthTests.java index 07f35e499d43a..fa38ce7314494 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/pki/PkiOptionalClientAuthTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/pki/PkiOptionalClientAuthTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.security.authc.pki; import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy; +import org.apache.lucene.util.Constants; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; @@ -34,11 +35,13 @@ public class PkiOptionalClientAuthTests extends SecuritySingleNodeTestCase { + private static final int NUMBER_OF_CLIENT_PORTS = Constants.WINDOWS ? 300 : 100; + private static int randomClientPort; @BeforeClass public static void initPort() { - randomClientPort = randomIntBetween(49000, 65500); + randomClientPort = randomIntBetween(49152, 65535 - NUMBER_OF_CLIENT_PORTS); } @Override @@ -47,7 +50,7 @@ protected boolean addMockHttpTransport() { } protected Settings nodeSettings() { - String randomClientPortRange = randomClientPort + "-" + (randomClientPort + 100); + String randomClientPortRange = randomClientPort + "-" + (randomClientPort + NUMBER_OF_CLIENT_PORTS); Settings.Builder builder = Settings.builder() .put(super.nodeSettings()) diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerIntegTests.java index d3e1f736c1267..e6bf367845990 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerIntegTests.java @@ -10,7 +10,7 @@ import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.get.GetIndexResponse; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.Template; @@ -143,8 +143,10 @@ public void testSecurityIndexSettingsCannotBeChanged() throws Exception { .build(); assertAcked( client().execute( - PutComposableIndexTemplateAction.INSTANCE, - new PutComposableIndexTemplateAction.Request("composable-template-covering-the-main-security-index").indexTemplate(cit) + TransportPutComposableIndexTemplateAction.TYPE, + new TransportPutComposableIndexTemplateAction.Request("composable-template-covering-the-main-security-index").indexTemplate( + cit + ) ) ); // trigger index auto-creation diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringIntegrationTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringIntegrationTests.java index 16e0b322efcac..f479c4703194b 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringIntegrationTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringIntegrationTests.java @@ -6,6 +6,7 @@ */ package org.elasticsearch.xpack.security.transport.filter; +import org.apache.lucene.util.Constants; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.core.SuppressForbidden; @@ -28,11 +29,14 @@ // no client nodes as they all get rejected on network connections @ClusterScope(scope = Scope.SUITE, numDataNodes = 0, numClientNodes = 0) public class IpFilteringIntegrationTests extends SecurityIntegTestCase { + + private static final int NUMBER_OF_CLIENT_PORTS = Constants.WINDOWS ? 300 : 100; + private static int randomClientPort; @BeforeClass public static void getRandomPort() { - randomClientPort = randomIntBetween(49000, 65500); // ephemeral port + randomClientPort = randomIntBetween(49152, 65535 - NUMBER_OF_CLIENT_PORTS); // ephemeral port } @Override @@ -42,7 +46,7 @@ protected boolean addMockHttpTransport() { @Override protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { - String randomClientPortRange = randomClientPort + "-" + (randomClientPort + 100); + String randomClientPortRange = randomClientPort + "-" + (randomClientPort + NUMBER_OF_CLIENT_PORTS); return Settings.builder() .put(super.nodeSettings(nodeOrdinal, otherSettings)) .put("transport.profiles.client.port", randomClientPortRange) diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringUpdateTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringUpdateTests.java index d5cb0f165b89d..0b1d33cb35c97 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringUpdateTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringUpdateTests.java @@ -6,6 +6,7 @@ */ package org.elasticsearch.xpack.security.transport.filter; +import org.apache.lucene.util.Constants; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Strings; @@ -26,13 +27,15 @@ @ClusterScope(scope = TEST, supportsDedicatedMasters = false, numDataNodes = 1) public class IpFilteringUpdateTests extends SecurityIntegTestCase { + private static final int NUMBER_OF_CLIENT_PORTS = Constants.WINDOWS ? 300 : 100; + private static int randomClientPort; private final boolean httpEnabled = randomBoolean(); @BeforeClass public static void getRandomPort() { - randomClientPort = randomIntBetween(49000, 65500); + randomClientPort = randomIntBetween(49152, 65535 - NUMBER_OF_CLIENT_PORTS); } @Override @@ -42,7 +45,7 @@ protected boolean addMockHttpTransport() { @Override protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { - String randomClientPortRange = randomClientPort + "-" + (randomClientPort + 100); + String randomClientPortRange = randomClientPort + "-" + (randomClientPort + NUMBER_OF_CLIENT_PORTS); return Settings.builder() .put(super.nodeSettings(nodeOrdinal, otherSettings)) .put("xpack.security.transport.filter.deny", "127.0.0.200") diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 51a902d7e12c0..a9af4b4ba104a 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -13,6 +13,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.TransportVersion; @@ -21,6 +22,7 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.support.ActionFilter; import org.elasticsearch.action.support.DestructiveOperations; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; @@ -87,6 +89,7 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.internal.ShardSearchRequest; +import org.elasticsearch.telemetry.TelemetryProvider; import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ExecutorBuilder; import org.elasticsearch.threadpool.FixedExecutorBuilder; @@ -110,6 +113,7 @@ import org.elasticsearch.xpack.core.security.SecurityExtension; import org.elasticsearch.xpack.core.security.SecurityField; import org.elasticsearch.xpack.core.security.SecuritySettings; +import org.elasticsearch.xpack.core.security.action.ActionTypes; import org.elasticsearch.xpack.core.security.action.ClearSecurityCacheAction; import org.elasticsearch.xpack.core.security.action.DelegatePkiAuthenticationAction; import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyAction; @@ -244,6 +248,7 @@ import org.elasticsearch.xpack.security.action.service.TransportGetServiceAccountCredentialsAction; import org.elasticsearch.xpack.security.action.service.TransportGetServiceAccountNodesCredentialsAction; import org.elasticsearch.xpack.security.action.settings.TransportGetSecuritySettingsAction; +import org.elasticsearch.xpack.security.action.settings.TransportReloadRemoteClusterCredentialsAction; import org.elasticsearch.xpack.security.action.settings.TransportUpdateSecuritySettingsAction; import org.elasticsearch.xpack.security.action.token.TransportCreateTokenAction; import org.elasticsearch.xpack.security.action.token.TransportInvalidateTokenAction; @@ -364,7 +369,6 @@ import org.elasticsearch.xpack.security.support.CacheInvalidatorRegistry; import org.elasticsearch.xpack.security.support.ExtensionComponents; import org.elasticsearch.xpack.security.support.SecuritySystemIndices; -import org.elasticsearch.xpack.security.transport.RemoteClusterCredentialsResolver; import org.elasticsearch.xpack.security.transport.SecurityHttpSettings; import org.elasticsearch.xpack.security.transport.SecurityServerTransportInterceptor; import org.elasticsearch.xpack.security.transport.filter.IPFilter; @@ -372,6 +376,7 @@ import java.io.IOException; import java.net.InetSocketAddress; +import java.security.Provider; import java.time.Clock; import java.util.ArrayList; import java.util.Arrays; @@ -386,6 +391,7 @@ import java.util.Objects; import java.util.Set; import java.util.concurrent.Executor; +import java.util.concurrent.TimeUnit; import java.util.function.BiConsumer; import java.util.function.Function; import java.util.function.Predicate; @@ -554,6 +560,7 @@ public class Security extends Plugin private final SetOnce reservedRoleMappingAction = new SetOnce<>(); private final SetOnce workflowService = new SetOnce<>(); private final SetOnce realms = new SetOnce<>(); + private final SetOnce client = new SetOnce<>(); public Security(Settings settings) { this(settings, Collections.emptyList()); @@ -573,25 +580,30 @@ public Security(Settings settings) { runStartupChecks(settings); Automatons.updateConfiguration(settings); } else { - final List remoteClusterCredentialsSettingKeys = RemoteClusterService.REMOTE_CLUSTER_CREDENTIALS.getAllConcreteSettings( - settings - ).map(Setting::getKey).sorted().toList(); - if (false == remoteClusterCredentialsSettingKeys.isEmpty()) { - throw new IllegalArgumentException( - format( - "Found [%s] remote clusters with credentials [%s]. Security [%s] must be enabled to connect to them. " - + "Please either enable security or remove these settings from the keystore.", - remoteClusterCredentialsSettingKeys.size(), - Strings.collectionToCommaDelimitedString(remoteClusterCredentialsSettingKeys), - XPackSettings.SECURITY_ENABLED.getKey() - ) - ); - } + ensureNoRemoteClusterCredentialsOnDisabledSecurity(settings); this.bootstrapChecks.set(Collections.emptyList()); } this.securityExtensions.addAll(extensions); } + private void ensureNoRemoteClusterCredentialsOnDisabledSecurity(Settings settings) { + assert false == enabled; + final List remoteClusterCredentialsSettingKeys = RemoteClusterService.REMOTE_CLUSTER_CREDENTIALS.getAllConcreteSettings( + settings + ).map(Setting::getKey).sorted().toList(); + if (false == remoteClusterCredentialsSettingKeys.isEmpty()) { + throw new IllegalArgumentException( + format( + "Found [%s] remote clusters with credentials [%s]. Security [%s] must be enabled to connect to them. " + + "Please either enable security or remove these settings from the keystore.", + remoteClusterCredentialsSettingKeys.size(), + Strings.collectionToCommaDelimitedString(remoteClusterCredentialsSettingKeys), + XPackSettings.SECURITY_ENABLED.getKey() + ) + ); + } + } + private static void runStartupChecks(Settings settings) { validateRealmSettings(settings); if (XPackSettings.FIPS_MODE_ENABLED.get(settings)) { @@ -616,6 +628,14 @@ protected XPackLicenseState getLicenseState() { return XPackPlugin.getSharedLicenseState(); } + protected Client getClient() { + return client.get(); + } + + protected Realms getRealms() { + return realms.get(); + } + @Override public Collection createComponents(PluginServices services) { try { @@ -629,7 +649,8 @@ public Collection createComponents(PluginServices services) { services.xContentRegistry(), services.environment(), services.nodeEnvironment().nodeMetadata(), - services.indexNameExpressionResolver() + services.indexNameExpressionResolver(), + services.telemetryProvider() ); } catch (final Exception e) { throw new IllegalStateException("security initialization failed", e); @@ -647,13 +668,16 @@ Collection createComponents( NamedXContentRegistry xContentRegistry, Environment environment, NodeMetadata nodeMetadata, - IndexNameExpressionResolver expressionResolver + IndexNameExpressionResolver expressionResolver, + TelemetryProvider telemetryProvider ) throws Exception { logger.info("Security is {}", enabled ? "enabled" : "disabled"); if (enabled == false) { return Collections.singletonList(new SecurityUsageServices(null, null, null, null, null, null)); } + this.client.set(client); + // The settings in `environment` may have additional values over what was provided during construction // See Plugin#additionalSettings() this.settings = environment.settings(); @@ -923,7 +947,8 @@ Collection createComponents( tokenService, apiKeyService, serviceAccountService, - operatorPrivilegesService.get() + operatorPrivilegesService.get(), + telemetryProvider.getMeterRegistry() ) ); components.add(authcService.get()); @@ -980,8 +1005,6 @@ Collection createComponents( ipFilter.set(new IPFilter(settings, auditTrailService, clusterService.getClusterSettings(), getLicenseState())); components.add(ipFilter.get()); - final RemoteClusterCredentialsResolver remoteClusterCredentialsResolver = new RemoteClusterCredentialsResolver(settings); - DestructiveOperations destructiveOperations = new DestructiveOperations(settings, clusterService.getClusterSettings()); crossClusterAccessAuthcService.set(new CrossClusterAccessAuthenticationService(clusterService, apiKeyService, authcService.get())); components.add(crossClusterAccessAuthcService.get()); @@ -995,7 +1018,6 @@ Collection createComponents( securityContext.get(), destructiveOperations, crossClusterAccessAuthcService.get(), - remoteClusterCredentialsResolver, getLicenseState() ) ); @@ -1161,6 +1183,7 @@ public static List> getSettings(List securityExten // The following just apply in node mode settingsList.add(XPackSettings.FIPS_MODE_ENABLED); + settingsList.add(XPackSettings.FIPS_REQUIRED_PROVIDERS); SSLService.registerSettings(settingsList); // IP Filter settings @@ -1348,6 +1371,7 @@ public void onIndexModule(IndexModule module) { new ActionHandler<>(SetProfileEnabledAction.INSTANCE, TransportSetProfileEnabledAction.class), new ActionHandler<>(GetSecuritySettingsAction.INSTANCE, TransportGetSecuritySettingsAction.class), new ActionHandler<>(UpdateSecuritySettingsAction.INSTANCE, TransportUpdateSecuritySettingsAction.class), + new ActionHandler<>(ActionTypes.RELOAD_REMOTE_CLUSTER_CREDENTIALS_ACTION, TransportReloadRemoteClusterCredentialsAction.class), usageAction, infoAction ).filter(Objects::nonNull).toList(); @@ -1543,6 +1567,30 @@ static void validateForFips(Settings settings) { } }); + Set foundProviders = new HashSet<>(); + for (Provider provider : java.security.Security.getProviders()) { + foundProviders.add(provider.getName().toLowerCase(Locale.ROOT)); + if (logger.isTraceEnabled()) { + logger.trace("Security Provider: " + provider.getName() + ", Version: " + provider.getVersionStr()); + provider.entrySet().forEach(entry -> { logger.trace("\t" + entry.getKey()); }); + } + } + + final List requiredProviders = XPackSettings.FIPS_REQUIRED_PROVIDERS.get(settings); + logger.info("JVM Security Providers: " + foundProviders); + if (requiredProviders != null && requiredProviders.isEmpty() == false) { + List unsatisfiedProviders = requiredProviders.stream() + .map(s -> s.toLowerCase(Locale.ROOT)) + .filter(element -> foundProviders.contains(element) == false) + .toList(); + + if (unsatisfiedProviders.isEmpty() == false) { + String errorMessage = "Could not find required FIPS security provider: " + unsatisfiedProviders; + logger.error(errorMessage); + validationErrors.add(errorMessage); + } + } + if (validationErrors.isEmpty() == false) { final StringBuilder sb = new StringBuilder(); sb.append("Validation for FIPS 140 mode failed: \n"); @@ -1887,16 +1935,56 @@ public BiConsumer getJoinValidator() { @Override public void reload(Settings settings) throws Exception { if (enabled) { - realms.get().stream().filter(r -> JwtRealmSettings.TYPE.equals(r.realmRef().getType())).forEach(realm -> { - if (realm instanceof JwtRealm jwtRealm) { - jwtRealm.rotateClientSecret( - CLIENT_AUTHENTICATION_SHARED_SECRET.getConcreteSettingForNamespace(realm.realmRef().getName()).get(settings) - ); - } - }); + final List reloadExceptions = new ArrayList<>(); + try { + reloadRemoteClusterCredentials(settings); + } catch (Exception ex) { + reloadExceptions.add(ex); + } + + try { + reloadSharedSecretsForJwtRealms(settings); + } catch (Exception ex) { + reloadExceptions.add(ex); + } + + if (false == reloadExceptions.isEmpty()) { + final var combinedException = new ElasticsearchException( + "secure settings reload failed for one or more security components" + ); + reloadExceptions.forEach(combinedException::addSuppressed); + throw combinedException; + } + } else { + ensureNoRemoteClusterCredentialsOnDisabledSecurity(settings); } } + private void reloadSharedSecretsForJwtRealms(Settings settingsWithKeystore) { + getRealms().stream().filter(r -> JwtRealmSettings.TYPE.equals(r.realmRef().getType())).forEach(realm -> { + if (realm instanceof JwtRealm jwtRealm) { + jwtRealm.rotateClientSecret( + CLIENT_AUTHENTICATION_SHARED_SECRET.getConcreteSettingForNamespace(realm.realmRef().getName()).get(settingsWithKeystore) + ); + } + }); + } + + /** + * This method uses a transport action internally to access classes that are injectable but not part of the plugin contract. + * See {@link TransportReloadRemoteClusterCredentialsAction} for more context. + */ + private void reloadRemoteClusterCredentials(Settings settingsWithKeystore) { + final PlainActionFuture future = new PlainActionFuture<>(); + getClient().execute( + ActionTypes.RELOAD_REMOTE_CLUSTER_CREDENTIALS_ACTION, + new TransportReloadRemoteClusterCredentialsAction.Request(settingsWithKeystore), + future + ); + assert future.isDone() : "expecting local-only action call to return immediately on invocation"; + future.actionGet(0, TimeUnit.NANOSECONDS); + } + static final class ValidateLicenseForFIPS implements BiConsumer { private final boolean inFipsMode; private final LicenseService licenseService; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportUpdateCrossClusterApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportUpdateCrossClusterApiKeyAction.java index 011b95565e030..a47bbb0301ebc 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportUpdateCrossClusterApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportUpdateCrossClusterApiKeyAction.java @@ -50,7 +50,12 @@ void doExecuteUpdate( ) { apiKeyService.updateApiKeys( authentication, - new BaseBulkUpdateApiKeyRequest(List.of(request.getId()), request.getRoleDescriptors(), request.getMetadata()) { + new BaseBulkUpdateApiKeyRequest( + List.of(request.getId()), + request.getRoleDescriptors(), + request.getMetadata(), + request.getExpiration() + ) { @Override public ApiKey.Type getType() { return ApiKey.Type.CROSS_CLUSTER; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/settings/TransportReloadRemoteClusterCredentialsAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/settings/TransportReloadRemoteClusterCredentialsAction.java new file mode 100644 index 0000000000000..d6f54e9d3e9e1 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/settings/TransportReloadRemoteClusterCredentialsAction.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.action.settings; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.transport.RemoteClusterService; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.ActionTypes; +import org.elasticsearch.xpack.security.Security; + +import java.io.IOException; + +/** + * This is a local-only action which updates remote cluster credentials for remote cluster connections, from keystore settings reloaded via + * a call to {@link org.elasticsearch.rest.action.admin.cluster.RestReloadSecureSettingsAction}. + * + * It's invoked as part of the {@link Security#reload(Settings)} call. + * + * This action is largely an implementation detail to work around the fact that Security is a plugin without direct access to many core + * classes, including the {@link RemoteClusterService} which is required for a credentials reload. A transport action gives us access to + * the {@link RemoteClusterService} which is injectable but not part of the plugin contract. + */ +public class TransportReloadRemoteClusterCredentialsAction extends TransportAction< + TransportReloadRemoteClusterCredentialsAction.Request, + ActionResponse.Empty> { + + private final RemoteClusterService remoteClusterService; + + @Inject + public TransportReloadRemoteClusterCredentialsAction(TransportService transportService, ActionFilters actionFilters) { + super(ActionTypes.RELOAD_REMOTE_CLUSTER_CREDENTIALS_ACTION.name(), actionFilters, transportService.getTaskManager()); + this.remoteClusterService = transportService.getRemoteClusterService(); + } + + @Override + protected void doExecute(Task task, Request request, ActionListener listener) { + // We avoid stashing and marking context as system to keep the action as minimal as possible (i.e., avoid copying context) + remoteClusterService.updateRemoteClusterCredentials(request.getSettings()); + listener.onResponse(ActionResponse.Empty.INSTANCE); + } + + public static class Request extends ActionRequest { + private final Settings settings; + + public Request(Settings settings) { + this.settings = settings; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + public Settings getSettings() { + return settings; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + localOnly(); + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/ChangePasswordRequestBuilder.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/ChangePasswordRequestBuilder.java index c5fbd7ca3c397..c792fa364a74a 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/ChangePasswordRequestBuilder.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/ChangePasswordRequestBuilder.java @@ -15,7 +15,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; -import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.security.action.user.ChangePasswordRequest; @@ -25,7 +25,6 @@ import org.elasticsearch.xpack.core.security.xcontent.XContentUtils; import java.io.IOException; -import java.io.InputStream; import java.nio.CharBuffer; import java.util.Locale; @@ -93,9 +92,11 @@ public ChangePasswordRequestBuilder passwordHash(char[] passwordHashChars, Hashe public ChangePasswordRequestBuilder source(BytesReference source, XContentType xContentType, Hasher hasher) throws IOException { // EMPTY is ok here because we never call namedObject try ( - InputStream stream = source.streamInput(); - XContentParser parser = xContentType.xContent() - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream) + XContentParser parser = XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG, + source, + xContentType + ) ) { XContentUtils.verifyObject(parser); XContentParser.Token token; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyAuthenticator.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyAuthenticator.java index fadf03a19904c..10bd68e05007e 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyAuthenticator.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyAuthenticator.java @@ -10,23 +10,46 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.xpack.core.security.action.apikey.ApiKey; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; import org.elasticsearch.xpack.core.security.support.Exceptions; import org.elasticsearch.xpack.security.authc.ApiKeyService.ApiKeyCredentials; +import org.elasticsearch.xpack.security.metric.InstrumentedSecurityActionListener; +import org.elasticsearch.xpack.security.metric.SecurityMetricType; +import org.elasticsearch.xpack.security.metric.SecurityMetrics; + +import java.util.HashMap; +import java.util.Map; +import java.util.function.LongSupplier; import static org.elasticsearch.core.Strings.format; class ApiKeyAuthenticator implements Authenticator { + public static final String ATTRIBUTE_API_KEY_ID = "es.security.api_key_id"; + public static final String ATTRIBUTE_API_KEY_TYPE = "es.security.api_key_type"; + public static final String ATTRIBUTE_API_KEY_AUTHC_FAILURE_REASON = "es.security.api_key_authc_failure_reason"; + private static final Logger logger = LogManager.getLogger(ApiKeyAuthenticator.class); + private final SecurityMetrics authenticationMetrics; private final ApiKeyService apiKeyService; private final String nodeName; - ApiKeyAuthenticator(ApiKeyService apiKeyService, String nodeName) { + ApiKeyAuthenticator(ApiKeyService apiKeyService, String nodeName, MeterRegistry meterRegistry) { + this(apiKeyService, nodeName, meterRegistry, System::nanoTime); + } + + ApiKeyAuthenticator(ApiKeyService apiKeyService, String nodeName, MeterRegistry meterRegistry, LongSupplier nanoTimeSupplier) { + this.authenticationMetrics = new SecurityMetrics<>( + SecurityMetricType.AUTHC_API_KEY, + meterRegistry, + this::buildMetricAttributes, + nanoTimeSupplier + ); this.apiKeyService = apiKeyService; this.nodeName = nodeName; } @@ -51,30 +74,44 @@ public void authenticate(Context context, ActionListener { - if (authResult.isAuthenticated()) { - final Authentication authentication = Authentication.newApiKeyAuthentication(authResult, nodeName); - listener.onResponse(AuthenticationResult.success(authentication)); - } else if (authResult.getStatus() == AuthenticationResult.Status.TERMINATE) { - Exception e = (authResult.getException() != null) - ? authResult.getException() - : Exceptions.authenticationError(authResult.getMessage()); - logger.debug(() -> "API key service terminated authentication for request [" + context.getRequest() + "]", e); - context.getRequest().exceptionProcessingRequest(e, authenticationToken); - listener.onFailure(e); - } else { - if (authResult.getMessage() != null) { - if (authResult.getException() != null) { - logger.warn( - () -> format("Authentication using apikey failed - %s", authResult.getMessage()), - authResult.getException() - ); - } else { - logger.warn("Authentication using apikey failed - {}", authResult.getMessage()); + apiKeyService.tryAuthenticate( + context.getThreadContext(), + apiKeyCredentials, + InstrumentedSecurityActionListener.wrapForAuthc(authenticationMetrics, apiKeyCredentials, ActionListener.wrap(authResult -> { + if (authResult.isAuthenticated()) { + final Authentication authentication = Authentication.newApiKeyAuthentication(authResult, nodeName); + listener.onResponse(AuthenticationResult.success(authentication)); + } else if (authResult.getStatus() == AuthenticationResult.Status.TERMINATE) { + Exception e = (authResult.getException() != null) + ? authResult.getException() + : Exceptions.authenticationError(authResult.getMessage()); + logger.debug(() -> "API key service terminated authentication for request [" + context.getRequest() + "]", e); + context.getRequest().exceptionProcessingRequest(e, authenticationToken); + listener.onFailure(e); + } else { + if (authResult.getMessage() != null) { + if (authResult.getException() != null) { + logger.warn( + () -> format("Authentication using apikey failed - %s", authResult.getMessage()), + authResult.getException() + ); + } else { + logger.warn("Authentication using apikey failed - {}", authResult.getMessage()); + } } + listener.onResponse(AuthenticationResult.unsuccessful(authResult.getMessage(), authResult.getException())); } - listener.onResponse(AuthenticationResult.unsuccessful(authResult.getMessage(), authResult.getException())); - } - }, e -> listener.onFailure(context.getRequest().exceptionProcessingRequest(e, null)))); + }, e -> listener.onFailure(context.getRequest().exceptionProcessingRequest(e, null)))) + ); + } + + private Map buildMetricAttributes(ApiKeyCredentials credentials, String failureReason) { + final Map attributes = new HashMap<>(failureReason != null ? 3 : 2); + attributes.put(ATTRIBUTE_API_KEY_ID, credentials.getId()); + attributes.put(ATTRIBUTE_API_KEY_TYPE, credentials.getExpectedType().value()); + if (failureReason != null) { + attributes.put(ATTRIBUTE_API_KEY_AUTHC_FAILURE_REASON, failureReason); + } + return attributes; } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java index 2d700e23f127c..dd2dc453152cd 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java @@ -410,7 +410,7 @@ private void createApiKeyAndIndexIt( ActionListener listener ) { final Instant created = clock.instant(); - final Instant expiration = getApiKeyExpiration(created, request); + final Instant expiration = getApiKeyExpiration(created, request.getExpiration()); final SecureString apiKey = UUIDs.randomBase64UUIDSecureString(); assert ApiKey.Type.CROSS_CLUSTER != request.getType() || API_KEY_SECRET_LENGTH == apiKey.length(); final Version version = clusterService.state().nodes().getMinNodeVersion(); @@ -743,7 +743,8 @@ static XContentBuilder maybeBuildUpdatedDocument( final Version targetDocVersion, final Authentication authentication, final BaseUpdateApiKeyRequest request, - final Set userRoleDescriptors + final Set userRoleDescriptors, + final Clock clock ) throws IOException { assert currentApiKeyDoc.type == request.getType(); if (isNoop(apiKeyId, currentApiKeyDoc, targetDocVersion, authentication, request, userRoleDescriptors)) { @@ -755,9 +756,14 @@ static XContentBuilder maybeBuildUpdatedDocument( .field("doc_type", "api_key") .field("type", currentApiKeyDoc.type.value()) .field("creation_time", currentApiKeyDoc.creationTime) - .field("expiration_time", currentApiKeyDoc.expirationTime == -1 ? null : currentApiKeyDoc.expirationTime) .field("api_key_invalidated", false); + if (request.getExpiration() != null) { + builder.field("expiration_time", getApiKeyExpiration(clock.instant(), request.getExpiration()).toEpochMilli()); + } else { + builder.field("expiration_time", currentApiKeyDoc.expirationTime == -1 ? null : currentApiKeyDoc.expirationTime); + } + addApiKeyHash(builder, currentApiKeyDoc.hash.toCharArray()); final List keyRoles = request.getRoleDescriptors(); @@ -803,11 +809,16 @@ private static boolean isNoop( final Authentication authentication, final BaseUpdateApiKeyRequest request, final Set userRoleDescriptors - ) { + ) throws IOException { if (apiKeyDoc.version != targetDocVersion.id) { return false; } + if (request.getExpiration() != null) { + // Since expiration is relative current time, it's not likely that it matches the stored value to the ms, so assume update + return false; + } + final Map currentCreator = apiKeyDoc.creator; final var user = authentication.getEffectiveSubject().getUser(); final var sourceRealm = authentication.getEffectiveSubject().getRealm(); @@ -824,12 +835,11 @@ private static boolean isNoop( return false; } @SuppressWarnings("unchecked") - final var currentRealmDomain = RealmDomain.fromXContent( - XContentHelper.mapToXContentParser( - XContentParserConfiguration.EMPTY, - (Map) currentCreator.get("realm_domain") - ) - ); + var m = (Map) currentCreator.get("realm_domain"); + final RealmDomain currentRealmDomain; + try (var parser = XContentHelper.mapToXContentParser(XContentParserConfiguration.EMPTY, m)) { + currentRealmDomain = RealmDomain.fromXContent(parser); + } if (sourceRealm.getDomain().equals(currentRealmDomain) == false) { return false; } @@ -969,13 +979,13 @@ public List parseRoleDescriptors( try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { builder.map(rdMap); try ( - XContentParser parser = XContentType.JSON.xContent() - .createParser( - XContentParserConfiguration.EMPTY.withDeprecationHandler( - new ApiKeyLoggingDeprecationHandler(deprecationLogger, apiKeyId) - ), - BytesReference.bytes(builder).streamInput() - ) + XContentParser parser = XContentHelper.createParserNotCompressed( + XContentParserConfiguration.EMPTY.withDeprecationHandler( + new ApiKeyLoggingDeprecationHandler(deprecationLogger, apiKeyId) + ), + BytesReference.bytes(builder), + XContentType.JSON + ) ) { return RoleDescriptor.parse(name, parser, false); } @@ -1281,9 +1291,9 @@ protected void verifyKeyAgainstHash(String apiKeyHash, ApiKeyCredentials credent })); } - private static Instant getApiKeyExpiration(Instant now, AbstractCreateApiKeyRequest request) { - if (request.getExpiration() != null) { - return now.plusSeconds(request.getExpiration().getSeconds()); + private static Instant getApiKeyExpiration(Instant now, @Nullable TimeValue expiration) { + if (expiration != null) { + return now.plusSeconds(expiration.getSeconds()); } else { return null; } @@ -1473,7 +1483,8 @@ private IndexRequest maybeBuildIndexRequest( targetDocVersion, authentication, request, - userRoleDescriptors + userRoleDescriptors, + clock ); final boolean isNoop = builder == null; return isNoop diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/AuthenticationService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/AuthenticationService.java index 7d06798e7401b..e522f02891148 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/AuthenticationService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/AuthenticationService.java @@ -20,6 +20,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.http.HttpPreRequest; import org.elasticsearch.node.Node; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.xpack.core.security.authc.Authentication; @@ -88,7 +89,8 @@ public AuthenticationService( TokenService tokenService, ApiKeyService apiKeyService, ServiceAccountService serviceAccountService, - OperatorPrivilegesService operatorPrivilegesService + OperatorPrivilegesService operatorPrivilegesService, + MeterRegistry meterRegistry ) { this.realms = realms; this.auditTrailService = auditTrailService; @@ -111,7 +113,7 @@ public AuthenticationService( new AuthenticationContextSerializer(), new ServiceAccountAuthenticator(serviceAccountService, nodeName), new OAuth2TokenAuthenticator(tokenService), - new ApiKeyAuthenticator(apiKeyService, nodeName), + new ApiKeyAuthenticator(apiKeyService, nodeName, meterRegistry), new RealmsAuthenticator(numInvalidation, lastSuccessfulAuthCache) ); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java index aeb101ac0caa4..9c378e0e1156e 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java @@ -210,7 +210,7 @@ public final class TokenService { static final TransportVersion VERSION_ACCESS_TOKENS_AS_UUIDS = TransportVersions.V_7_2_0; static final TransportVersion VERSION_MULTIPLE_CONCURRENT_REFRESHES = TransportVersions.V_7_2_0; static final TransportVersion VERSION_CLIENT_AUTH_FOR_REFRESH = TransportVersions.V_8_2_0; - static final TransportVersion VERSION_GET_TOKEN_DOC_FOR_REFRESH = TransportVersions.V_8_500_040; + static final TransportVersion VERSION_GET_TOKEN_DOC_FOR_REFRESH = TransportVersions.V_8_500_061; private static final Logger logger = LogManager.getLogger(TokenService.class); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/UserToken.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/UserToken.java index 324150c2a35b1..4806fe348282e 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/UserToken.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/UserToken.java @@ -74,7 +74,7 @@ private UserToken(TransportVersion version, Authentication authentication, Insta this.id = input.readString(); this.authentication = new Authentication(input); this.expirationTime = Instant.ofEpochSecond(input.readLong(), input.readInt()); - this.metadata = input.readMap(); + this.metadata = input.readGenericMap(); } @Override diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/service/ElasticServiceAccounts.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/service/ElasticServiceAccounts.java index 8942be0bee29c..777fe5f71b0a0 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/service/ElasticServiceAccounts.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/service/ElasticServiceAccounts.java @@ -157,7 +157,7 @@ final class ElasticServiceAccounts { new String[] { "monitor", "manage_own_api_key" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder() - .indices("logs-*", "metrics-*") + .indices("logs-*", "metrics-*", "traces-*") .privileges("write", "create_index", "auto_configure") .build(), }, null, diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java index 6e359aa7297ab..4abf2e53d0264 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java @@ -23,11 +23,11 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.script.ScriptService; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; @@ -46,7 +46,6 @@ import org.elasticsearch.xpack.security.support.SecuritySystemIndices; import java.io.IOException; -import java.io.InputStream; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -174,9 +173,11 @@ protected void loadMappings(ActionListener> listener protected static ExpressionRoleMapping buildMapping(String id, BytesReference source) { try ( - InputStream stream = source.streamInput(); - XContentParser parser = XContentType.JSON.xContent() - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream) + XContentParser parser = XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG, + source, + XContentType.JSON + ) ) { return ExpressionRoleMapping.parse(id, parser); } catch (Exception e) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java index 8a0a9c09b7d1a..e4e9bc453ee83 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.datastreams.MigrateToDataStreamAction; import org.elasticsearch.action.delete.TransportDeleteAction; import org.elasticsearch.action.index.TransportIndexAction; +import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.GroupedActionListener; import org.elasticsearch.action.support.replication.TransportReplicationAction.ConcreteShardRequest; import org.elasticsearch.action.update.TransportUpdateAction; @@ -471,6 +472,11 @@ private void authorizeAction( } else if (isIndexAction(action)) { final Metadata metadata = clusterService.state().metadata(); final AsyncSupplier resolvedIndicesAsyncSupplier = new CachingAsyncSupplier<>(resolvedIndicesListener -> { + if (request instanceof SearchRequest searchRequest && searchRequest.pointInTimeBuilder() != null) { + var resolvedIndices = indicesAndAliasesResolver.resolvePITIndices(searchRequest); + resolvedIndicesListener.onResponse(resolvedIndices); + return; + } final ResolvedIndices resolvedIndices = IndicesAndAliasesResolver.tryResolveWithoutWildcards(action, request); if (resolvedIndices != null) { resolvedIndicesListener.onResponse(resolvedIndices); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java index 16258e71e85b8..a4163b6f10fc0 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java @@ -11,6 +11,8 @@ import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.action.search.SearchContextId; +import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.metadata.AliasMetadata; import org.elasticsearch.cluster.metadata.IndexAbstraction; @@ -176,6 +178,24 @@ static ResolvedIndices resolveIndicesAndAliasesWithoutWildcards(String action, I return new ResolvedIndices(localIndices, List.of()); } + /** + * Returns the resolved indices from the {@link SearchContextId} within the provided {@link SearchRequest}. + */ + ResolvedIndices resolvePITIndices(SearchRequest request) { + assert request.pointInTimeBuilder() != null; + var indices = SearchContextId.decodeIndices(request.pointInTimeBuilder().getEncodedId()); + final ResolvedIndices split; + if (request.allowsRemoteIndices()) { + split = remoteClusterResolver.splitLocalAndRemoteIndexNames(indices); + } else { + split = new ResolvedIndices(Arrays.asList(indices), Collections.emptyList()); + } + if (split.isEmpty()) { + return new ResolvedIndices(List.of(NO_INDEX_PLACEHOLDER), Collections.emptyList()); + } + return split; + } + private static void throwOnUnexpectedWildcards(String action, String[] indices) { final List wildcards = Stream.of(indices).filter(Regex::isSimpleMatchPattern).toList(); assert wildcards.isEmpty() == false : "we already know that there's at least one wildcard in the indices"; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java index b4c154e99b466..f92252ebe851c 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java @@ -41,7 +41,7 @@ import org.elasticsearch.index.Index; import org.elasticsearch.transport.TransportActionProxy; import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.xpack.core.async.DeleteAsyncResultAction; +import org.elasticsearch.xpack.core.async.TransportDeleteAsyncResultAction; import org.elasticsearch.xpack.core.eql.EqlAsyncActionNames; import org.elasticsearch.xpack.core.search.action.GetAsyncSearchAction; import org.elasticsearch.xpack.core.search.action.SubmitAsyncSearchAction; @@ -961,7 +961,7 @@ private static boolean isScrollRelatedAction(String action) { private static boolean isAsyncRelatedAction(String action) { return action.equals(SubmitAsyncSearchAction.NAME) || action.equals(GetAsyncSearchAction.NAME) - || action.equals(DeleteAsyncResultAction.NAME) + || action.equals(TransportDeleteAsyncResultAction.TYPE.name()) || action.equals(EqlAsyncActionNames.EQL_ASYNC_GET_RESULT_ACTION_NAME) || action.equals(SqlAsyncActionNames.SQL_ASYNC_GET_RESULT_ACTION_NAME); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/FileRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/FileRolesStore.java index 03ac7d5e0fa36..70d086cc5a831 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/FileRolesStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/FileRolesStore.java @@ -280,8 +280,7 @@ static RoleDescriptor parseRoleDescriptor( String roleName = null; XContentParserConfiguration parserConfig = XContentParserConfiguration.EMPTY.withRegistry(xContentRegistry) .withDeprecationHandler(LoggingDeprecationHandler.INSTANCE); - try { - XContentParser parser = YamlXContent.yamlXContent.createParser(parserConfig, segment); + try (XContentParser parser = YamlXContent.yamlXContent.createParser(parserConfig, segment)) { XContentParser.Token token = parser.nextToken(); if (token == XContentParser.Token.START_OBJECT) { token = parser.nextToken(); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStore.java index 004874f5b63b9..be936ae7d4a06 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStore.java @@ -25,12 +25,12 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.cache.Cache; import org.elasticsearch.common.cache.CacheBuilder; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; @@ -39,7 +39,6 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.index.query.TermsQueryBuilder; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParseException; import org.elasticsearch.xcontent.XContentParser; @@ -298,9 +297,11 @@ private static ApplicationPrivilegeDescriptor buildPrivilege( // EMPTY is safe here because we never use namedObject try ( - StreamInput input = source.streamInput(); - XContentParser parser = XContentType.JSON.xContent() - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, input) + XContentParser parser = XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG, + source, + XContentType.JSON + ) ) { final ApplicationPrivilegeDescriptor privilege = ApplicationPrivilegeDescriptor.parse(parser, null, null, true); assert privilege.getApplication().equals(name.v1()) diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/metric/InstrumentedSecurityActionListener.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/metric/InstrumentedSecurityActionListener.java new file mode 100644 index 0000000000000..101f49258dd59 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/metric/InstrumentedSecurityActionListener.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.metric; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; + +public class InstrumentedSecurityActionListener { + + /** + * Wraps the provided {@code listener} and returns a new wrapped listener which handles authentication metrics collection. + * + * @param metrics The metrics to collect. + * @param context The context object is used to collect and attach additional metric attributes. + * @param listener The authentication result handling listener. + * @return a new "wrapped" listener which overrides onResponse and onFailure methods in order to collect authentication metrics. + * @param The type of authentication result value. + * @param The type of context object which is used to attach additional attributes to collected authentication metrics. + */ + public static ActionListener> wrapForAuthc( + final SecurityMetrics metrics, + final C context, + final ActionListener> listener + ) { + assert metrics.type().group() == SecurityMetricGroup.AUTHC; + final long startTimeNano = metrics.relativeTimeInNanos(); + return ActionListener.runBefore(ActionListener.wrap(result -> { + if (result.isAuthenticated()) { + metrics.recordSuccess(context); + } else { + metrics.recordFailure(context, result.getMessage()); + } + listener.onResponse(result); + }, e -> { + metrics.recordFailure(context, e.getMessage()); + listener.onFailure(e); + }), () -> metrics.recordTime(context, startTimeNano)); + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/metric/SecurityMetricAttributesBuilder.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/metric/SecurityMetricAttributesBuilder.java new file mode 100644 index 0000000000000..37dd1b75eec2d --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/metric/SecurityMetricAttributesBuilder.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.metric; + +import java.util.Map; + +@FunctionalInterface +public interface SecurityMetricAttributesBuilder { + + Map build(C context, String failureReason); + + default Map build(C context) { + return build(context, null); + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/metric/SecurityMetricGroup.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/metric/SecurityMetricGroup.java new file mode 100644 index 0000000000000..9d334a603a21b --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/metric/SecurityMetricGroup.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.metric; + +/** + * Enumerates all metric groups we want to collect. + */ +public enum SecurityMetricGroup { + + AUTHC, + + AUTHZ, + + ; + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/metric/SecurityMetricInfo.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/metric/SecurityMetricInfo.java new file mode 100644 index 0000000000000..d80d0f581ea5b --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/metric/SecurityMetricInfo.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.metric; + +import org.elasticsearch.telemetry.metric.LongCounter; +import org.elasticsearch.telemetry.metric.LongHistogram; +import org.elasticsearch.telemetry.metric.MeterRegistry; + +import java.util.Objects; + +/** + * Holds all metric information needed to register a metric in {@link MeterRegistry}. + * + * @param name The unique metric name. + * @param description The brief metric description. + * @param unit The metric unit (e.g. count). + */ +public record SecurityMetricInfo(String name, String description, String unit) { + + public SecurityMetricInfo(String name, String description, String unit) { + this.name = Objects.requireNonNull(name); + this.description = Objects.requireNonNull(description); + this.unit = Objects.requireNonNull(unit); + } + + public LongCounter registerAsLongCounter(MeterRegistry meterRegistry) { + return meterRegistry.registerLongCounter(this.name(), this.description(), this.unit()); + } + + public LongHistogram registerAsLongHistogram(MeterRegistry meterRegistry) { + return meterRegistry.registerLongHistogram(this.name(), this.description(), this.unit()); + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/metric/SecurityMetricType.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/metric/SecurityMetricType.java new file mode 100644 index 0000000000000..c44c33f8e64b6 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/metric/SecurityMetricType.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.metric; + +/** + * Defines all security metric types that can be collected. + */ +public enum SecurityMetricType { + + AUTHC_API_KEY( + SecurityMetricGroup.AUTHC, + new SecurityMetricInfo("es.security.authc.api_key.success.total", "Number of successful API key authentications.", "count"), + new SecurityMetricInfo("es.security.authc.api_key.failures.total", "Number of failed API key authentications.", "count"), + new SecurityMetricInfo("es.security.authc.api_key.time", "Time it took (in nanoseconds) to execute API key authentication.", "ns") + ), + + ; + + private final SecurityMetricGroup group; + private final SecurityMetricInfo successMetricInfo; + private final SecurityMetricInfo failuresMetricInfo; + private final SecurityMetricInfo timeMetricInfo; + + SecurityMetricType( + SecurityMetricGroup group, + SecurityMetricInfo successMetricInfo, + SecurityMetricInfo failuresMetricInfo, + SecurityMetricInfo timeMetricInfo + ) { + this.group = group; + this.successMetricInfo = successMetricInfo; + this.failuresMetricInfo = failuresMetricInfo; + this.timeMetricInfo = timeMetricInfo; + } + + public SecurityMetricGroup group() { + return this.group; + } + + public SecurityMetricInfo successMetricInfo() { + return successMetricInfo; + } + + public SecurityMetricInfo failuresMetricInfo() { + return failuresMetricInfo; + } + + public SecurityMetricInfo timeMetricInfo() { + return timeMetricInfo; + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/metric/SecurityMetrics.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/metric/SecurityMetrics.java new file mode 100644 index 0000000000000..bf993516bf964 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/metric/SecurityMetrics.java @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.metric; + +import org.elasticsearch.telemetry.metric.LongCounter; +import org.elasticsearch.telemetry.metric.LongHistogram; +import org.elasticsearch.telemetry.metric.MeterRegistry; + +import java.util.Objects; +import java.util.function.LongSupplier; + +/** + * This class provides a common way for registering and collecting different types of security metrics. + * It allows for recoding the number of successful and failed executions as well as to record the execution time. + * + * @param The type of context object which is used to attach additional attributes to collected metrics. + */ +public final class SecurityMetrics { + + private final LongCounter successCounter; + private final LongCounter failuresCounter; + private final LongHistogram timeHistogram; + + private final SecurityMetricAttributesBuilder attributesBuilder; + private final LongSupplier nanoTimeSupplier; + private final SecurityMetricType metricType; + + public SecurityMetrics( + final SecurityMetricType metricType, + final MeterRegistry meterRegistry, + final SecurityMetricAttributesBuilder attributesBuilder, + final LongSupplier nanoTimeSupplier + ) { + this.metricType = Objects.requireNonNull(metricType); + this.successCounter = metricType.successMetricInfo().registerAsLongCounter(meterRegistry); + this.failuresCounter = metricType.failuresMetricInfo().registerAsLongCounter(meterRegistry); + this.timeHistogram = metricType.timeMetricInfo().registerAsLongHistogram(meterRegistry); + this.attributesBuilder = Objects.requireNonNull(attributesBuilder); + this.nanoTimeSupplier = Objects.requireNonNull(nanoTimeSupplier); + } + + public SecurityMetricType type() { + return this.metricType; + } + + /** + * Returns a value of nanoseconds that may be used for relative time calculations. + * This method should only be used for calculating time deltas. + */ + public long relativeTimeInNanos() { + return nanoTimeSupplier.getAsLong(); + } + + /** + * Records a single success execution. + * + * @param context The context object which is used to attach additional attributes to success metric. + */ + public void recordSuccess(final C context) { + this.successCounter.incrementBy(1L, attributesBuilder.build(context)); + } + + /** + * Records a single failed execution. + * + * @param context The context object which is used to attach additional attributes to failed metric. + * @param failureReason The optional failure reason which is stored as an attributed with recorded failure metric. + */ + public void recordFailure(final C context, final String failureReason) { + this.failuresCounter.incrementBy(1L, attributesBuilder.build(context, failureReason)); + } + + /** + * Records a time in nanoseconds. This method should be called after the execution with provided start time. + * The {@link #relativeTimeInNanos()} should be used to record the start time. + * + * @param context The context object which is used to attach additional attributes to collected metric. + * @param startTimeNano The start time (in nanoseconds) before the execution. + */ + public void recordTime(final C context, final long startTimeNano) { + final long timeInNanos = relativeTimeInNanos() - startTimeNano; + this.timeHistogram.record(timeInNanos, this.attributesBuilder.build(context)); + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/operator/DefaultOperatorOnlyRegistry.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/operator/DefaultOperatorOnlyRegistry.java index e335bd8583a88..6cc17e418314a 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/operator/DefaultOperatorOnlyRegistry.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/operator/DefaultOperatorOnlyRegistry.java @@ -20,7 +20,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.license.DeleteLicenseAction; import org.elasticsearch.license.PutLicenseAction; import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestHandler; @@ -37,7 +36,7 @@ public class DefaultOperatorOnlyRegistry implements OperatorOnlyRegistry { TransportAddVotingConfigExclusionsAction.TYPE.name(), TransportClearVotingConfigExclusionsAction.TYPE.name(), PutLicenseAction.NAME, - DeleteLicenseAction.NAME, + "cluster:admin/xpack/license/delete", // Autoscaling does not publish its actions to core, literal strings are needed. "cluster:admin/autoscaling/put_autoscaling_policy", "cluster:admin/autoscaling/delete_autoscaling_policy", diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestBulkUpdateApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestBulkUpdateApiKeyAction.java index c436370d67579..584ad08704ddd 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestBulkUpdateApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestBulkUpdateApiKeyAction.java @@ -9,6 +9,7 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; @@ -35,7 +36,12 @@ public final class RestBulkUpdateApiKeyAction extends ApiKeyBaseRestHandler { @SuppressWarnings("unchecked") static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "bulk_update_api_key_request", - a -> new BulkUpdateApiKeyRequest((List) a[0], (List) a[1], (Map) a[2]) + a -> new BulkUpdateApiKeyRequest( + (List) a[0], + (List) a[1], + (Map) a[2], + TimeValue.parseTimeValue((String) a[3], null, "expiration") + ) ); static { @@ -45,6 +51,7 @@ public final class RestBulkUpdateApiKeyAction extends ApiKeyBaseRestHandler { return RoleDescriptor.parse(n, p, false); }, new ParseField("role_descriptors")); PARSER.declareObject(optionalConstructorArg(), (p, c) -> p.map(), new ParseField("metadata")); + PARSER.declareString(optionalConstructorArg(), new ParseField("expiration")); } public RestBulkUpdateApiKeyAction(final Settings settings, final XPackLicenseState licenseState) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateApiKeyAction.java index 16c323eaca76e..d64e7f4007387 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateApiKeyAction.java @@ -9,6 +9,7 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; @@ -33,7 +34,11 @@ public final class RestUpdateApiKeyAction extends ApiKeyBaseRestHandler { @SuppressWarnings("unchecked") static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "update_api_key_request_payload", - a -> new Payload((List) a[0], (Map) a[1]) + a -> new Payload( + (List) a[0], + (Map) a[1], + TimeValue.parseTimeValue((String) a[2], null, "expiration") + ) ); static { @@ -42,6 +47,7 @@ public final class RestUpdateApiKeyAction extends ApiKeyBaseRestHandler { return RoleDescriptor.parse(n, p, false); }, new ParseField("role_descriptors")); PARSER.declareObject(optionalConstructorArg(), (p, c) -> p.map(), new ParseField("metadata")); + PARSER.declareString(optionalConstructorArg(), new ParseField("expiration")); } public RestUpdateApiKeyAction(final Settings settings, final XPackLicenseState licenseState) { @@ -64,13 +70,13 @@ protected RestChannelConsumer innerPrepareRequest(final RestRequest request, fin // `RestClearApiKeyCacheAction` and our current REST implementation requires that path params have the same wildcard if their paths // share a prefix final var apiKeyId = request.param("ids"); - final var payload = request.hasContent() == false ? new Payload(null, null) : PARSER.parse(request.contentParser(), null); + final var payload = request.hasContent() == false ? new Payload(null, null, null) : PARSER.parse(request.contentParser(), null); return channel -> client.execute( UpdateApiKeyAction.INSTANCE, - new UpdateApiKeyRequest(apiKeyId, payload.roleDescriptors, payload.metadata), + new UpdateApiKeyRequest(apiKeyId, payload.roleDescriptors, payload.metadata, payload.expiration), new RestToXContentListener<>(channel) ); } - record Payload(List roleDescriptors, Map metadata) {} + record Payload(List roleDescriptors, Map metadata, TimeValue expiration) {} } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateCrossClusterApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateCrossClusterApiKeyAction.java index a642723667639..e9244eaea0ec5 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateCrossClusterApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateCrossClusterApiKeyAction.java @@ -9,6 +9,7 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.rest.RestRequest; @@ -32,12 +33,17 @@ public final class RestUpdateCrossClusterApiKeyAction extends ApiKeyBaseRestHand @SuppressWarnings("unchecked") static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "update_cross_cluster_api_key_request_payload", - a -> new Payload((CrossClusterApiKeyRoleDescriptorBuilder) a[0], (Map) a[1]) + a -> new Payload( + (CrossClusterApiKeyRoleDescriptorBuilder) a[0], + (Map) a[1], + TimeValue.parseTimeValue((String) a[2], null, "expiration") + ) ); static { PARSER.declareObject(optionalConstructorArg(), CrossClusterApiKeyRoleDescriptorBuilder.PARSER, new ParseField("access")); PARSER.declareObject(optionalConstructorArg(), (p, c) -> p.map(), new ParseField("metadata")); + PARSER.declareString(optionalConstructorArg(), new ParseField("expiration")); } public RestUpdateCrossClusterApiKeyAction(final Settings settings, final XPackLicenseState licenseState) { @@ -61,7 +67,7 @@ protected RestChannelConsumer innerPrepareRequest(final RestRequest request, fin return channel -> client.execute( UpdateCrossClusterApiKeyAction.INSTANCE, - new UpdateCrossClusterApiKeyRequest(apiKeyId, payload.builder, payload.metadata), + new UpdateCrossClusterApiKeyRequest(apiKeyId, payload.builder, payload.metadata, payload.expiration), new RestToXContentListener<>(channel) ); } @@ -75,5 +81,5 @@ protected Exception innerCheckFeatureAvailable(RestRequest request) { } } - record Payload(CrossClusterApiKeyRoleDescriptorBuilder builder, Map metadata) {} + record Payload(CrossClusterApiKeyRoleDescriptorBuilder builder, Map metadata, TimeValue expiration) {} } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/profile/RestUpdateProfileDataAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/profile/RestUpdateProfileDataAction.java index 6e178f30fe1b3..899d68063cf3b 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/profile/RestUpdateProfileDataAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/profile/RestUpdateProfileDataAction.java @@ -63,7 +63,10 @@ protected RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClien final long ifPrimaryTerm = request.paramAsLong("if_primary_term", -1); final long ifSeqNo = request.paramAsLong("if_seq_no", -1); final RefreshPolicy refreshPolicy = RefreshPolicy.parse(request.param("refresh", "wait_for")); - final Payload payload = PARSER.parse(request.contentParser(), null); + final Payload payload; + try (var parser = request.contentParser()) { + payload = PARSER.parse(parser, null); + } final UpdateProfileDataRequest updateProfileDataRequest = new UpdateProfileDataRequest( uid, diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/settings/RestUpdateSecuritySettingsAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/settings/RestUpdateSecuritySettingsAction.java index 7ee8ea5d41a63..b2e8719b25c24 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/settings/RestUpdateSecuritySettingsAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/settings/RestUpdateSecuritySettingsAction.java @@ -36,7 +36,10 @@ public List routes() { @Override protected RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { - UpdateSecuritySettingsAction.Request req = UpdateSecuritySettingsAction.Request.parse(request.contentParser()); + UpdateSecuritySettingsAction.Request req; + try (var parser = request.contentParser()) { + req = UpdateSecuritySettingsAction.Request.parse(parser); + } return restChannel -> client.execute(UpdateSecuritySettingsAction.INSTANCE, req, new RestToXContentListener<>(restChannel)); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/RemoteClusterCredentialsResolver.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/RemoteClusterCredentialsResolver.java deleted file mode 100644 index 93735a700bf92..0000000000000 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/RemoteClusterCredentialsResolver.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.security.transport; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.SecureString; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.xpack.security.authc.ApiKeyService; - -import java.util.Map; -import java.util.Optional; - -import static org.elasticsearch.transport.RemoteClusterService.REMOTE_CLUSTER_CREDENTIALS; - -public class RemoteClusterCredentialsResolver { - - private static final Logger logger = LogManager.getLogger(RemoteClusterCredentialsResolver.class); - - private final Map clusterCredentials; - - public RemoteClusterCredentialsResolver(final Settings settings) { - this.clusterCredentials = REMOTE_CLUSTER_CREDENTIALS.getAsMap(settings); - logger.debug( - "Read cluster credentials for remote clusters [{}]", - Strings.collectionToCommaDelimitedString(clusterCredentials.keySet()) - ); - } - - public Optional resolve(final String clusterAlias) { - final SecureString apiKey = clusterCredentials.get(clusterAlias); - if (apiKey == null) { - return Optional.empty(); - } else { - return Optional.of(new RemoteClusterCredentials(clusterAlias, ApiKeyService.withApiKeyPrefix(apiKey.toString()))); - } - } - - record RemoteClusterCredentials(String clusterAlias, String credentials) { - @Override - public String toString() { - return "RemoteClusterCredentials{clusterAlias='" + clusterAlias + "', credentials='::es_redacted::'}"; - } - } -} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java index 53dd31fe46793..162cabf5297ce 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; import org.elasticsearch.action.support.DestructiveOperations; +import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.ssl.SslConfiguration; import org.elasticsearch.common.util.Maps; @@ -24,6 +25,7 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteConnectionManager; +import org.elasticsearch.transport.RemoteConnectionManager.RemoteClusterAliasWithCredentials; import org.elasticsearch.transport.SendRequestTransportException; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportChannel; @@ -46,6 +48,7 @@ import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.security.Security; import org.elasticsearch.xpack.security.audit.AuditUtil; +import org.elasticsearch.xpack.security.authc.ApiKeyService; import org.elasticsearch.xpack.security.authc.AuthenticationService; import org.elasticsearch.xpack.security.authc.CrossClusterAccessAuthenticationService; import org.elasticsearch.xpack.security.authc.CrossClusterAccessHeaders; @@ -63,7 +66,6 @@ import static org.elasticsearch.transport.RemoteClusterPortSettings.REMOTE_CLUSTER_PROFILE; import static org.elasticsearch.transport.RemoteClusterPortSettings.REMOTE_CLUSTER_SERVER_ENABLED; import static org.elasticsearch.transport.RemoteClusterPortSettings.TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY; -import static org.elasticsearch.xpack.security.transport.RemoteClusterCredentialsResolver.RemoteClusterCredentials; public class SecurityServerTransportInterceptor implements TransportInterceptor { @@ -85,8 +87,7 @@ public class SecurityServerTransportInterceptor implements TransportInterceptor private final Settings settings; private final SecurityContext securityContext; private final CrossClusterAccessAuthenticationService crossClusterAccessAuthcService; - private final RemoteClusterCredentialsResolver remoteClusterCredentialsResolver; - private final Function> remoteClusterAliasResolver; + private final Function> remoteClusterCredentialsResolver; private final XPackLicenseState licenseState; public SecurityServerTransportInterceptor( @@ -98,7 +99,6 @@ public SecurityServerTransportInterceptor( SecurityContext securityContext, DestructiveOperations destructiveOperations, CrossClusterAccessAuthenticationService crossClusterAccessAuthcService, - RemoteClusterCredentialsResolver remoteClusterCredentialsResolver, XPackLicenseState licenseState ) { this( @@ -110,9 +110,8 @@ public SecurityServerTransportInterceptor( securityContext, destructiveOperations, crossClusterAccessAuthcService, - remoteClusterCredentialsResolver, licenseState, - RemoteConnectionManager::resolveRemoteClusterAlias + RemoteConnectionManager::resolveRemoteClusterAliasWithCredentials ); } @@ -125,10 +124,9 @@ public SecurityServerTransportInterceptor( SecurityContext securityContext, DestructiveOperations destructiveOperations, CrossClusterAccessAuthenticationService crossClusterAccessAuthcService, - RemoteClusterCredentialsResolver remoteClusterCredentialsResolver, XPackLicenseState licenseState, // Inject for simplified testing - Function> remoteClusterAliasResolver + Function> remoteClusterCredentialsResolver ) { this.settings = settings; this.threadPool = threadPool; @@ -139,7 +137,6 @@ public SecurityServerTransportInterceptor( this.crossClusterAccessAuthcService = crossClusterAccessAuthcService; this.licenseState = licenseState; this.remoteClusterCredentialsResolver = remoteClusterCredentialsResolver; - this.remoteClusterAliasResolver = remoteClusterAliasResolver; this.profileFilters = initializeProfileFilters(destructiveOperations); } @@ -159,7 +156,8 @@ public void sendRequest( TransportResponseHandler handler ) { assertNoCrossClusterAccessHeadersInContext(); - final Optional remoteClusterAlias = remoteClusterAliasResolver.apply(connection); + final Optional remoteClusterAlias = remoteClusterCredentialsResolver.apply(connection) + .map(RemoteClusterAliasWithCredentials::clusterAlias); if (PreAuthorizationUtils.shouldRemoveParentAuthorizationFromThreadContext(remoteClusterAlias, action, securityContext)) { securityContext.executeAfterRemovingParentAuthorization(original -> { sendRequestInner( @@ -278,22 +276,23 @@ public void sendRequest( * Returns cluster credentials if the connection is remote, and cluster credentials are set up for the target cluster. */ private Optional getRemoteClusterCredentials(Transport.Connection connection) { - final Optional optionalRemoteClusterAlias = remoteClusterAliasResolver.apply(connection); - if (optionalRemoteClusterAlias.isEmpty()) { + final Optional remoteClusterAliasWithCredentials = remoteClusterCredentialsResolver + .apply(connection); + if (remoteClusterAliasWithCredentials.isEmpty()) { logger.trace("Connection is not remote"); return Optional.empty(); } - final String remoteClusterAlias = optionalRemoteClusterAlias.get(); - final Optional remoteClusterCredentials = remoteClusterCredentialsResolver.resolve( - remoteClusterAlias - ); - if (remoteClusterCredentials.isEmpty()) { + final String remoteClusterAlias = remoteClusterAliasWithCredentials.get().clusterAlias(); + final SecureString remoteClusterCredentials = remoteClusterAliasWithCredentials.get().credentials(); + if (remoteClusterCredentials == null) { logger.trace("No cluster credentials are configured for remote cluster [{}]", remoteClusterAlias); return Optional.empty(); } - return remoteClusterCredentials; + return Optional.of( + new RemoteClusterCredentials(remoteClusterAlias, ApiKeyService.withApiKeyPrefix(remoteClusterCredentials.toString())) + ); } private void sendWithCrossClusterAccessHeaders( @@ -442,7 +441,7 @@ private void sendWithUser( throw new IllegalStateException("there should always be a user when sending a message for action [" + action + "]"); } - assert securityContext.getParentAuthorization() == null || remoteClusterAliasResolver.apply(connection).isPresent() == false + assert securityContext.getParentAuthorization() == null || remoteClusterCredentialsResolver.apply(connection).isEmpty() : "parent authorization header should not be set for remote cluster requests"; try { @@ -663,4 +662,12 @@ public void onFailure(Exception e) { } } } + + record RemoteClusterCredentials(String clusterAlias, String credentials) { + + @Override + public String toString() { + return "RemoteClusterCredentials{clusterAlias='" + clusterAlias + "', credentials='::es_redacted::'}"; + } + } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/LocalStateSecurity.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/LocalStateSecurity.java index d44e7c27d760e..a2aa04e0f56c3 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/LocalStateSecurity.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/LocalStateSecurity.java @@ -16,6 +16,7 @@ import org.elasticsearch.license.LicenseService; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.ReloadablePlugin; import org.elasticsearch.protocol.xpack.XPackInfoRequest; import org.elasticsearch.protocol.xpack.XPackInfoResponse; import org.elasticsearch.protocol.xpack.XPackUsageRequest; @@ -36,7 +37,7 @@ import java.util.Collections; import java.util.List; -public class LocalStateSecurity extends LocalStateCompositeXPackPlugin { +public class LocalStateSecurity extends LocalStateCompositeXPackPlugin implements ReloadablePlugin { public static class SecurityTransportXPackUsageAction extends TransportXPackUsageAction { @Inject @@ -130,4 +131,15 @@ protected Class> public List plugins() { return plugins; } + + @Override + public void reload(Settings settings) throws Exception { + plugins.stream().filter(p -> p instanceof ReloadablePlugin).forEach(p -> { + try { + ((ReloadablePlugin) p).reload(settings); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java index 6773da137ac96..1735b9443c78f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java @@ -9,10 +9,13 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionModule; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -55,6 +58,7 @@ import org.elasticsearch.rest.RestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.TelemetryProvider; import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; @@ -72,6 +76,7 @@ import org.elasticsearch.xpack.core.security.SecurityContext; import org.elasticsearch.xpack.core.security.SecurityExtension; import org.elasticsearch.xpack.core.security.SecurityField; +import org.elasticsearch.xpack.core.security.action.ActionTypes; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.AuthenticationTestHelper; import org.elasticsearch.xpack.core.security.authc.Realm; @@ -116,8 +121,11 @@ import java.util.function.Function; import java.util.function.Predicate; import java.util.stream.Collectors; +import java.util.stream.Stream; import static java.util.Collections.emptyMap; +import static org.elasticsearch.test.LambdaMatchers.falseWith; +import static org.elasticsearch.test.LambdaMatchers.trueWith; import static org.elasticsearch.xpack.core.security.authc.RealmSettings.getFullSettingKey; import static org.elasticsearch.xpack.security.operator.OperatorPrivileges.NOOP_OPERATOR_PRIVILEGES_SERVICE; import static org.elasticsearch.xpack.security.operator.OperatorPrivileges.OPERATOR_PRIVILEGES_ENABLED; @@ -133,7 +141,10 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class SecurityTests extends ESTestCase { @@ -214,7 +225,8 @@ private Collection createComponentsUtil(Settings settings) throws Except xContentRegistry(), env, nodeMetadata, - TestIndexNameExpressionResolver.newInstance(threadContext) + TestIndexNameExpressionResolver.newInstance(threadContext), + TelemetryProvider.NOOP ); } @@ -475,10 +487,10 @@ public void testGetFieldFilterSecurityEnabled() throws Exception { IndicesAccessControl indicesAccessControl = new IndicesAccessControl(true, permissionsMap); securityContext.putIndicesAccessControl(indicesAccessControl); - assertTrue(fieldFilter.apply("index_granted").test("field_granted")); - assertFalse(fieldFilter.apply("index_granted").test(randomAlphaOfLengthBetween(3, 10))); + assertThat(fieldFilter.apply("index_granted"), trueWith("field_granted")); + assertThat(fieldFilter.apply("index_granted"), falseWith(randomAlphaOfLengthBetween(3, 10))); assertEquals(MapperPlugin.NOOP_FIELD_PREDICATE, fieldFilter.apply("index_granted_all_permissions")); - assertTrue(fieldFilter.apply("index_granted_all_permissions").test(randomAlphaOfLengthBetween(3, 10))); + assertThat(fieldFilter.apply("index_granted_all_permissions"), trueWith(randomAlphaOfLengthBetween(3, 10))); assertEquals(MapperPlugin.NOOP_FIELD_PREDICATE, fieldFilter.apply("index_other")); } @@ -574,6 +586,32 @@ public void testValidateForFipsInvalidPasswordHashingAlgorithm() { assertThat(iae.getMessage(), containsString("Only PBKDF2 is allowed for stored credential hashing in a FIPS 140 JVM.")); } + public void testValidateForFipsRequiredProvider() { + final Settings settings = Settings.builder() + .put(XPackSettings.FIPS_MODE_ENABLED.getKey(), true) + .putList(XPackSettings.FIPS_REQUIRED_PROVIDERS.getKey(), List.of("BCFIPS")) + .build(); + if (inFipsJvm()) { + Security.validateForFips(settings); + // no exceptions since gradle has wired in the bouncy castle FIPS provider + } else { + final IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> Security.validateForFips(settings)); + assertThat(iae.getMessage(), containsString("Could not find required FIPS security provider: [bcfips]")); + } + + final Settings settings2 = Settings.builder() + .put(XPackSettings.FIPS_MODE_ENABLED.getKey(), true) + .putList(XPackSettings.FIPS_REQUIRED_PROVIDERS.getKey(), List.of("junk0", "BCFIPS", "junk1", "junk2")) + .build(); + if (inFipsJvm()) { + final IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> Security.validateForFips(settings2)); + assertThat(iae.getMessage(), containsString("Could not find required FIPS security provider: [junk0, junk1, junk2]")); + } else { + final IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> Security.validateForFips(settings2)); + assertThat(iae.getMessage(), containsString("Could not find required FIPS security provider: [junk0, bcfips, junk1, junk2]")); + } + } + public void testValidateForFipsMultipleValidationErrors() { final Settings settings = Settings.builder() .put(XPackSettings.FIPS_MODE_ENABLED.getKey(), true) @@ -877,6 +915,23 @@ public void testSecurityMustBeEnableToConnectRemoteClusterWithCredentials() { + "Please either enable security or remove these settings from the keystore." ) ); + + // Security off, remote cluster with credentials on reload call + final MockSecureSettings secureSettings5 = new MockSecureSettings(); + secureSettings5.setString("cluster.remote.my1.credentials", randomAlphaOfLength(20)); + secureSettings5.setString("cluster.remote.my2.credentials", randomAlphaOfLength(20)); + final Settings.Builder builder5 = Settings.builder().setSecureSettings(secureSettings5); + // Use builder with security disabled to construct valid Security instance + final var security = new Security(builder2.build()); + final IllegalArgumentException e5 = expectThrows(IllegalArgumentException.class, () -> security.reload(builder5.build())); + assertThat( + e5.getMessage(), + containsString( + "Found [2] remote clusters with credentials [cluster.remote.my1.credentials,cluster.remote.my2.credentials]. " + + "Security [xpack.security.enabled] must be enabled to connect to them. " + + "Please either enable security or remove these settings from the keystore." + ) + ); } public void testLoadExtensions() throws Exception { @@ -905,6 +960,98 @@ public List loadExtensions(Class extensionPointType) { assertThat(registry, instanceOf(DummyOperatorOnlyRegistry.class)); } + public void testReload() throws Exception { + final Settings settings = Settings.builder().put("xpack.security.enabled", true).put("path.home", createTempDir()).build(); + + final PlainActionFuture value = new PlainActionFuture<>(); + final Client mockedClient = mock(Client.class); + + final Realms mockedRealms = mock(Realms.class); + when(mockedRealms.stream()).thenReturn(Stream.of()); + + doAnswer((inv) -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) inv.getArguments()[2]; + listener.onResponse(ActionResponse.Empty.INSTANCE); + return null; + }).when(mockedClient).execute(eq(ActionTypes.RELOAD_REMOTE_CLUSTER_CREDENTIALS_ACTION), any(), any()); + + security = new Security(settings, Collections.emptyList()) { + @Override + protected Client getClient() { + return mockedClient; + } + + @Override + protected Realms getRealms() { + return mockedRealms; + } + }; + + final Settings inputSettings = Settings.EMPTY; + security.reload(inputSettings); + + verify(mockedClient).execute(eq(ActionTypes.RELOAD_REMOTE_CLUSTER_CREDENTIALS_ACTION), any(), any()); + verify(mockedRealms).stream(); + } + + public void testReloadWithFailures() { + final Settings settings = Settings.builder().put("xpack.security.enabled", true).put("path.home", createTempDir()).build(); + + final boolean failRemoteClusterCredentialsReload = randomBoolean(); + final Client mockedClient = mock(Client.class); + if (failRemoteClusterCredentialsReload) { + doAnswer((inv) -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) inv.getArguments()[2]; + listener.onFailure(new RuntimeException("failed remote cluster credentials reload")); + return null; + }).when(mockedClient).execute(eq(ActionTypes.RELOAD_REMOTE_CLUSTER_CREDENTIALS_ACTION), any(), any()); + } else { + doAnswer((inv) -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) inv.getArguments()[2]; + listener.onResponse(ActionResponse.Empty.INSTANCE); + return null; + }).when(mockedClient).execute(eq(ActionTypes.RELOAD_REMOTE_CLUSTER_CREDENTIALS_ACTION), any(), any()); + } + + final Realms mockedRealms = mock(Realms.class); + final boolean failRealmsReload = (false == failRemoteClusterCredentialsReload) || randomBoolean(); + if (failRealmsReload) { + when(mockedRealms.stream()).thenThrow(new RuntimeException("failed jwt realms reload")); + } else { + when(mockedRealms.stream()).thenReturn(Stream.of()); + } + security = new Security(settings, Collections.emptyList()) { + @Override + protected Client getClient() { + return mockedClient; + } + + @Override + protected Realms getRealms() { + return mockedRealms; + } + }; + + final Settings inputSettings = Settings.EMPTY; + final var exception = expectThrows(ElasticsearchException.class, () -> security.reload(inputSettings)); + + assertThat(exception.getMessage(), containsString("secure settings reload failed for one or more security component")); + if (failRemoteClusterCredentialsReload) { + assertThat(exception.getSuppressed()[0].getMessage(), containsString("failed remote cluster credentials reload")); + if (failRealmsReload) { + assertThat(exception.getSuppressed()[1].getMessage(), containsString("failed jwt realms reload")); + } + } else { + assertThat(exception.getSuppressed()[0].getMessage(), containsString("failed jwt realms reload")); + } + // Verify both called despite failure + verify(mockedClient).execute(eq(ActionTypes.RELOAD_REMOTE_CLUSTER_CREDENTIALS_ACTION), any(), any()); + verify(mockedRealms).stream(); + } + public void testLoadNoExtensions() throws Exception { Settings settings = Settings.builder() .put("xpack.security.enabled", true) diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/apikey/TransportUpdateCrossClusterApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/apikey/TransportUpdateCrossClusterApiKeyActionTests.java index 7ce920506d7d1..70190b70f3f1a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/apikey/TransportUpdateCrossClusterApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/apikey/TransportUpdateCrossClusterApiKeyActionTests.java @@ -74,7 +74,12 @@ public void testExecute() throws IOException { } final String id = randomAlphaOfLength(10); - final var request = new UpdateCrossClusterApiKeyRequest(id, roleDescriptorBuilder, metadata); + final var request = new UpdateCrossClusterApiKeyRequest( + id, + roleDescriptorBuilder, + metadata, + ApiKeyTests.randomFutureExpirationTime() + ); final int updateStatus = randomIntBetween(0, 2); // 0 - success, 1 - noop, 2 - error doAnswer(invocation -> { @@ -129,7 +134,12 @@ public void testAuthenticationCheck() { mock(ApiKeyService.class), securityContext ); - final var request = new UpdateCrossClusterApiKeyRequest(randomAlphaOfLength(10), null, Map.of()); + final var request = new UpdateCrossClusterApiKeyRequest( + randomAlphaOfLength(10), + null, + Map.of(), + ApiKeyTests.randomFutureExpirationTime() + ); // null authentication error when(securityContext.getAuthentication()).thenReturn(null); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java index 8743453d33a35..a088e6c61822a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java @@ -28,7 +28,6 @@ import org.elasticsearch.action.search.ClearScrollResponse; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchResponseSections; import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.action.search.TransportClearScrollAction; import org.elasticsearch.action.search.TransportSearchAction; @@ -130,7 +129,7 @@ public class TransportSamlInvalidateSessionActionTests extends SamlTestCase { private List searchRequests; private TransportSamlInvalidateSessionAction action; private SamlLogoutRequestHandler.Result logoutRequest; - private Function searchFunction = ignore -> new SearchHit[0]; + private Function searchFunction = ignore -> SearchHits.EMPTY; @Before public void setup() throws Exception { @@ -201,15 +200,13 @@ protected void ActionListener.respondAndRelease( listener, (Response) new SearchResponse( - new SearchResponseSections( - new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), - null, - null, - false, - false, - null, - 1 - ), + new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), + null, + null, + false, + false, + null, + 1, "_scrollId1", 1, 1, @@ -221,19 +218,16 @@ protected void ); } else if (TransportSearchScrollAction.TYPE.name().equals(action.name())) { assertThat(request, instanceOf(SearchScrollRequest.class)); - final SearchHit[] hits = new SearchHit[0]; ActionListener.respondAndRelease( listener, (Response) new SearchResponse( - new SearchResponseSections( - new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), - null, - null, - false, - false, - null, - 1 - ), + SearchHits.EMPTY_WITH_TOTAL_HITS, + null, + null, + false, + false, + null, + 1, "_scrollId1", 1, 1, @@ -368,7 +362,7 @@ public void testInvalidateCorrectTokensFromLogoutRequest() throws Exception { .filter(r -> r.id().startsWith("token")) .map(r -> tokenHit(counter.incrementAndGet(), r.source())) .collect(Collectors.toList()) - .toArray(new SearchHit[0]); + .toArray(SearchHits.EMPTY); assertThat(searchHits.length, equalTo(4)); searchFunction = req1 -> { searchFunction = findTokenByRefreshToken(searchHits); @@ -469,7 +463,7 @@ private Function findTokenByRefreshToken(SearchHit[] return new SearchHit[] { hit }; } } - return new SearchHit[0]; + return SearchHits.EMPTY; }; } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java index 3385b02147890..9c48354b951d8 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java @@ -46,6 +46,7 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.action.apikey.ApiKeyTests; import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyRequest; import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyAction; @@ -629,7 +630,8 @@ public void testSecurityConfigChangeEventFormattingForRoles() throws IOException final var updateApiKeyRequest = new UpdateApiKeyRequest( keyId, randomBoolean() ? null : keyRoleDescriptors, - metadataWithSerialization.metadata() + metadataWithSerialization.metadata(), + ApiKeyTests.randomFutureExpirationTime() ); auditTrail.accessGranted(requestId, authentication, UpdateApiKeyAction.NAME, updateApiKeyRequest, authorizationInfo); final var expectedUpdateKeyAuditEventString = String.format( @@ -661,7 +663,8 @@ public void testSecurityConfigChangeEventFormattingForRoles() throws IOException final var bulkUpdateApiKeyRequest = new BulkUpdateApiKeyRequest( keyIds, randomBoolean() ? null : keyRoleDescriptors, - metadataWithSerialization.metadata() + metadataWithSerialization.metadata(), + ApiKeyTests.randomFutureExpirationTime() ); auditTrail.accessGranted(requestId, authentication, BulkUpdateApiKeyAction.NAME, bulkUpdateApiKeyRequest, authorizationInfo); final var expectedBulkUpdateKeyAuditEventString = String.format( @@ -875,7 +878,8 @@ public void testSecurityConfigChangeEventForCrossClusterApiKeys() throws IOExcep final var updateRequest = new UpdateCrossClusterApiKeyRequest( createRequest.getId(), updateAccess, - updateMetadataWithSerialization.metadata() + updateMetadataWithSerialization.metadata(), + ApiKeyTests.randomFutureExpirationTime() ); auditTrail.accessGranted(requestId, authentication, UpdateCrossClusterApiKeyAction.NAME, updateRequest, authorizationInfo); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyAuthenticatorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyAuthenticatorTests.java index ab11b6bf4e572..e5a7456e79d6d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyAuthenticatorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyAuthenticatorTests.java @@ -14,16 +14,26 @@ import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.telemetry.Measurement; +import org.elasticsearch.telemetry.TestTelemetryPlugin; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.security.action.apikey.ApiKey; import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.AuthenticationField; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.ApiKeyService.ApiKeyCredentials; import org.elasticsearch.xpack.security.authc.AuthenticationService.AuditableRequest; +import org.elasticsearch.xpack.security.metric.SecurityMetricType; + +import java.util.List; +import java.util.Map; +import java.util.function.LongSupplier; import static org.elasticsearch.test.ActionListenerUtils.anyActionListener; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.sameInstance; import static org.mockito.ArgumentMatchers.any; @@ -37,15 +47,15 @@ public class ApiKeyAuthenticatorTests extends ESTestCase { public void testAuditingOnAuthenticationTermination() { final ApiKeyService apiKeyService = mock(ApiKeyService.class); - final ApiKeyAuthenticator apiKeyAuthenticator = new ApiKeyAuthenticator(apiKeyService, randomAlphaOfLengthBetween(3, 8)); + final ApiKeyAuthenticator apiKeyAuthenticator = new ApiKeyAuthenticator( + apiKeyService, + randomAlphaOfLengthBetween(3, 8), + MeterRegistry.NOOP + ); final Authenticator.Context context = mock(Authenticator.Context.class); - final ApiKeyCredentials apiKeyCredentials = new ApiKeyCredentials( - randomAlphaOfLength(20), - new SecureString(randomAlphaOfLength(20).toCharArray()), - randomFrom(ApiKey.Type.values()) - ); + final ApiKeyCredentials apiKeyCredentials = randomApiKeyCredentials(); when(context.getMostRecentAuthenticationToken()).thenReturn(apiKeyCredentials); final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); when(context.getThreadContext()).thenReturn(threadContext); @@ -72,4 +82,253 @@ public void testAuditingOnAuthenticationTermination() { } } + public void testRecordingSuccessfulAuthenticationMetrics() { + final TestTelemetryPlugin telemetryPlugin = new TestTelemetryPlugin(); + final long initialNanoTime = randomLongBetween(0, 100); + final TestNanoTimeSupplier nanoTimeSupplier = new TestNanoTimeSupplier(initialNanoTime); + final ApiKeyService apiKeyService = mock(ApiKeyService.class); + final ApiKeyAuthenticator apiKeyAuthenticator = createApiKeyAuthenticator(apiKeyService, telemetryPlugin, nanoTimeSupplier); + + final ApiKeyCredentials apiKeyCredentials = randomApiKeyCredentials(); + final Authenticator.Context context = mockApiKeyAuthenticationContext(apiKeyCredentials); + + final long executionTimeInNanos = randomLongBetween(0, 500); + doAnswer(invocation -> { + final ActionListener> listener = invocation.getArgument(2); + nanoTimeSupplier.advanceTime(executionTimeInNanos); + listener.onResponse( + AuthenticationResult.success( + new User(randomAlphaOfLengthBetween(3, 8)), + Map.ofEntries( + Map.entry(AuthenticationField.API_KEY_ID_KEY, apiKeyCredentials.getId()), + Map.entry(AuthenticationField.API_KEY_TYPE_KEY, apiKeyCredentials.getExpectedType().value()) + ) + ) + ); + return null; + }).when(apiKeyService).tryAuthenticate(any(), same(apiKeyCredentials), anyActionListener()); + + final PlainActionFuture> future = new PlainActionFuture<>(); + apiKeyAuthenticator.authenticate(context, future); + final AuthenticationResult authResult = future.actionGet(); + assertThat(authResult.isAuthenticated(), equalTo(true)); + + List successMetrics = telemetryPlugin.getLongCounterMeasurement( + SecurityMetricType.AUTHC_API_KEY.successMetricInfo().name() + ); + assertThat(successMetrics.size(), equalTo(1)); + + // verify that we always record a single authentication + assertThat(successMetrics.get(0).getLong(), equalTo(1L)); + // and that all attributes are present + assertThat( + successMetrics.get(0).attributes(), + equalTo( + Map.ofEntries( + Map.entry(ApiKeyAuthenticator.ATTRIBUTE_API_KEY_ID, apiKeyCredentials.getId()), + Map.entry(ApiKeyAuthenticator.ATTRIBUTE_API_KEY_TYPE, apiKeyCredentials.getExpectedType().value()) + ) + ) + ); + + // verify that there were no failures recorded + assertZeroFailedAuthMetrics(telemetryPlugin); + + // verify we recorded authentication time + assertAuthenticationTimeMetric(telemetryPlugin, apiKeyCredentials, executionTimeInNanos); + } + + public void testRecordingFailedAuthenticationMetrics() { + final TestTelemetryPlugin telemetryPlugin = new TestTelemetryPlugin(); + final long initialNanoTime = randomLongBetween(1, 100); + final TestNanoTimeSupplier nanoTimeSupplier = new TestNanoTimeSupplier(initialNanoTime); + final ApiKeyService apiKeyService = mock(ApiKeyService.class); + final ApiKeyAuthenticator apiKeyAuthenticator = createApiKeyAuthenticator(apiKeyService, telemetryPlugin, nanoTimeSupplier); + + final ApiKeyCredentials apiKeyCredentials = randomApiKeyCredentials(); + final Authenticator.Context context = mockApiKeyAuthenticationContext(apiKeyCredentials); + + final Exception exception = randomFrom(new ElasticsearchException("API key auth exception"), null); + final boolean failWithTermination = randomBoolean(); + final AuthenticationResult failedAuth; + if (failWithTermination) { + failedAuth = AuthenticationResult.terminate("terminated API key auth", exception); + } else { + failedAuth = AuthenticationResult.unsuccessful("unsuccessful API key auth", exception); + } + + final long executionTimeInNanos = randomLongBetween(0, 500); + doAnswer(invocation -> { + nanoTimeSupplier.advanceTime(executionTimeInNanos); + final ActionListener> listener = invocation.getArgument(2); + listener.onResponse(failedAuth); + return Void.TYPE; + }).when(apiKeyService).tryAuthenticate(any(), same(apiKeyCredentials), anyActionListener()); + final PlainActionFuture> future = new PlainActionFuture<>(); + apiKeyAuthenticator.authenticate(context, future); + + if (failWithTermination) { + final Exception e = expectThrows(Exception.class, future::actionGet); + if (exception == null) { + assertThat(e, instanceOf(ElasticsearchSecurityException.class)); + assertThat(e.getMessage(), containsString("terminated API key auth")); + } else { + assertThat(e, sameInstance(exception)); + } + assertSingleFailedAuthMetric(telemetryPlugin, apiKeyCredentials, "terminated API key auth"); + } else { + var authResult = future.actionGet(); + assertThat(authResult.isAuthenticated(), equalTo(false)); + assertSingleFailedAuthMetric(telemetryPlugin, apiKeyCredentials, "unsuccessful API key auth"); + } + + // verify that there were no successes recorded + assertZeroSuccessAuthMetrics(telemetryPlugin); + + // verify we recorded authentication time + assertAuthenticationTimeMetric(telemetryPlugin, apiKeyCredentials, executionTimeInNanos); + } + + public void testRecordingFailedAuthenticationMetricsOnExceptions() { + final TestTelemetryPlugin telemetryPlugin = new TestTelemetryPlugin(); + final long initialNanoTime = randomLongBetween(0, 100); + final TestNanoTimeSupplier nanoTimeSupplier = new TestNanoTimeSupplier(initialNanoTime); + final ApiKeyService apiKeyService = mock(ApiKeyService.class); + final ApiKeyAuthenticator apiKeyAuthenticator = createApiKeyAuthenticator(apiKeyService, telemetryPlugin, nanoTimeSupplier); + + final ApiKeyCredentials apiKeyCredentials = randomApiKeyCredentials(); + final Authenticator.Context context = mockApiKeyAuthenticationContext(apiKeyCredentials); + + final ElasticsearchSecurityException exception = new ElasticsearchSecurityException("API key auth exception"); + when(context.getRequest().exceptionProcessingRequest(same(exception), any())).thenReturn(exception); + + final long executionTimeInNanos = randomLongBetween(0, 500); + doAnswer(invocation -> { + nanoTimeSupplier.advanceTime(executionTimeInNanos); + final ActionListener> listener = invocation.getArgument(2); + listener.onFailure(exception); + return Void.TYPE; + }).when(apiKeyService).tryAuthenticate(any(), same(apiKeyCredentials), anyActionListener()); + + final PlainActionFuture> future = new PlainActionFuture<>(); + apiKeyAuthenticator.authenticate(context, future); + + var e = expectThrows(ElasticsearchSecurityException.class, future::actionGet); + assertThat(e, sameInstance(exception)); + + // expecting single recorded auth failure with message same as the thrown exception + assertSingleFailedAuthMetric(telemetryPlugin, apiKeyCredentials, "API key auth exception"); + + // verify that there were no successes recorded + assertZeroSuccessAuthMetrics(telemetryPlugin); + + // verify we recorded authentication time + assertAuthenticationTimeMetric(telemetryPlugin, apiKeyCredentials, executionTimeInNanos); + } + + private void assertSingleFailedAuthMetric( + TestTelemetryPlugin telemetryPlugin, + ApiKeyCredentials apiKeyCredentials, + String failureMessage + ) { + List failuresMetrics = telemetryPlugin.getLongCounterMeasurement( + SecurityMetricType.AUTHC_API_KEY.failuresMetricInfo().name() + ); + assertThat(failuresMetrics.size(), equalTo(1)); + assertThat( + failuresMetrics.get(0).attributes(), + equalTo( + Map.ofEntries( + Map.entry(ApiKeyAuthenticator.ATTRIBUTE_API_KEY_ID, apiKeyCredentials.getId()), + Map.entry(ApiKeyAuthenticator.ATTRIBUTE_API_KEY_TYPE, apiKeyCredentials.getExpectedType().value()), + Map.entry(ApiKeyAuthenticator.ATTRIBUTE_API_KEY_AUTHC_FAILURE_REASON, failureMessage) + ) + ) + ); + } + + private void assertAuthenticationTimeMetric( + TestTelemetryPlugin telemetryPlugin, + ApiKeyCredentials credentials, + long expectedAuthenticationTime + ) { + List authTimeMetrics = telemetryPlugin.getLongHistogramMeasurement( + SecurityMetricType.AUTHC_API_KEY.timeMetricInfo().name() + ); + assertThat(authTimeMetrics.size(), equalTo(1)); + assertThat(authTimeMetrics.get(0).getLong(), equalTo(expectedAuthenticationTime)); + assertThat( + authTimeMetrics.get(0).attributes(), + equalTo( + Map.ofEntries( + Map.entry(ApiKeyAuthenticator.ATTRIBUTE_API_KEY_ID, credentials.getId()), + Map.entry(ApiKeyAuthenticator.ATTRIBUTE_API_KEY_TYPE, credentials.getExpectedType().value()) + ) + ) + ); + } + + private void assertZeroSuccessAuthMetrics(TestTelemetryPlugin telemetryPlugin) { + List successMetrics = telemetryPlugin.getLongCounterMeasurement( + SecurityMetricType.AUTHC_API_KEY.successMetricInfo().name() + ); + assertThat(successMetrics.size(), equalTo(0)); + } + + private void assertZeroFailedAuthMetrics(TestTelemetryPlugin telemetryPlugin) { + List failuresMetrics = telemetryPlugin.getLongCounterMeasurement( + SecurityMetricType.AUTHC_API_KEY.failuresMetricInfo().name() + ); + assertThat(failuresMetrics.size(), equalTo(0)); + } + + private static ApiKeyCredentials randomApiKeyCredentials() { + return new ApiKeyCredentials( + randomAlphaOfLength(12), + new SecureString(randomAlphaOfLength(20).toCharArray()), + randomFrom(ApiKey.Type.values()) + ); + } + + private static ApiKeyAuthenticator createApiKeyAuthenticator( + ApiKeyService apiKeyService, + TestTelemetryPlugin telemetryPlugin, + LongSupplier nanoTimeSupplier + ) { + return new ApiKeyAuthenticator( + apiKeyService, + randomAlphaOfLengthBetween(3, 8), + telemetryPlugin.getTelemetryProvider(Settings.EMPTY).getMeterRegistry(), + nanoTimeSupplier + ); + } + + private static Authenticator.Context mockApiKeyAuthenticationContext(ApiKeyCredentials apiKeyCredentials) { + final Authenticator.Context context = mock(Authenticator.Context.class); + final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + when(context.getMostRecentAuthenticationToken()).thenReturn(apiKeyCredentials); + when(context.getThreadContext()).thenReturn(threadContext); + final AuditableRequest auditableRequest = mock(AuditableRequest.class); + when(context.getRequest()).thenReturn(auditableRequest); + return context; + } + + private static class TestNanoTimeSupplier implements LongSupplier { + + private long currentTime; + + TestNanoTimeSupplier(long initialTime) { + this.currentTime = initialTime; + } + + public void advanceTime(long timeToAdd) { + this.currentTime += timeToAdd; + } + + @Override + public long getAsLong() { + return currentTime; + } + } + } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java index 25194ca1e0234..b921fef9fd917 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java @@ -60,7 +60,6 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLogAppender; @@ -411,25 +410,31 @@ public void testInvalidateApiKeysWillSetInvalidatedFlagAndRecordTimestamp() { builder.map(buildApiKeySourceDoc("some_hash".toCharArray())); searchHit.sourceRef(BytesReference.bytes(builder)); } - final var internalSearchResponse = new InternalSearchResponse( - new SearchHits( - new SearchHit[] { searchHit }, - new TotalHits(1, TotalHits.Relation.EQUAL_TO), - randomFloat(), + ActionListener.respondAndRelease( + listener, + new SearchResponse( + new SearchHits( + new SearchHit[] { searchHit }, + new TotalHits(1, TotalHits.Relation.EQUAL_TO), + randomFloat(), + null, + null, + null + ), null, null, + false, + null, + null, + 0, + randomAlphaOfLengthBetween(3, 8), + 1, + 1, + 0, + 10, + null, null - ), - null, - null, - null, - false, - null, - 0 - ); - ActionListener.respondAndRelease( - listener, - new SearchResponse(internalSearchResponse, randomAlphaOfLengthBetween(3, 8), 1, 1, 0, 10, null, null) + ) ); return null; }).when(client).search(any(SearchRequest.class), anyActionListener()); @@ -753,22 +758,20 @@ public void testCrossClusterApiKeyUsageStats() { ActionListener.respondAndRelease( listener, new SearchResponse( - new InternalSearchResponse( - new SearchHits( - searchHits.toArray(SearchHit[]::new), - new TotalHits(searchHits.size(), TotalHits.Relation.EQUAL_TO), - randomFloat(), - null, - null, - null - ), + new SearchHits( + searchHits.toArray(SearchHit[]::new), + new TotalHits(searchHits.size(), TotalHits.Relation.EQUAL_TO), + randomFloat(), null, null, - null, - false, - null, - 0 + null ), + null, + null, + false, + null, + null, + 0, randomAlphaOfLengthBetween(3, 8), 1, 1, @@ -2116,6 +2119,8 @@ public void testMaybeBuildUpdatedDocument() throws IOException { } else { oldKeyRoles = randomList(3, RoleDescriptorTests::randomRoleDescriptor); } + final long now = randomMillisUpToYear9999(); + when(clock.instant()).thenReturn(Instant.ofEpochMilli(now)); final Map oldMetadata = ApiKeyTests.randomMetadata(); final Version oldVersion = VersionUtils.randomVersion(random()); final ApiKeyDoc oldApiKeyDoc = ApiKeyDoc.fromXContent( @@ -2144,6 +2149,8 @@ public void testMaybeBuildUpdatedDocument() throws IOException { final boolean changeMetadata = randomBoolean(); final boolean changeVersion = randomBoolean(); final boolean changeCreator = randomBoolean(); + final boolean changeExpiration = randomBoolean(); + final Set newUserRoles = changeUserRoles ? randomValueOtherThan(oldUserRoles, () -> randomSet(0, 3, RoleDescriptorTests::randomRoleDescriptor)) : oldUserRoles; @@ -2177,11 +2184,14 @@ public void testMaybeBuildUpdatedDocument() throws IOException { .build(false) ) : oldAuthentication; + final TimeValue newExpiration = changeExpiration ? randomFrom(ApiKeyTests.randomFutureExpirationTime()) : null; final String apiKeyId = randomAlphaOfLength(10); final BaseUpdateApiKeyRequest request = mock(BaseUpdateApiKeyRequest.class); when(request.getType()).thenReturn(type); when(request.getRoleDescriptors()).thenReturn(newKeyRoles); when(request.getMetadata()).thenReturn(newMetadata); + when(request.getExpiration()).thenReturn(newExpiration); + final var service = createApiKeyService(); final XContentBuilder builder = ApiKeyService.maybeBuildUpdatedDocument( @@ -2190,10 +2200,16 @@ public void testMaybeBuildUpdatedDocument() throws IOException { newVersion, newAuthentication, request, - newUserRoles + newUserRoles, + clock ); - final boolean noop = (changeCreator || changeMetadata || changeKeyRoles || changeUserRoles || changeVersion) == false; + final boolean noop = (changeCreator + || changeMetadata + || changeKeyRoles + || changeUserRoles + || changeVersion + || changeExpiration) == false; if (noop) { assertNull(builder); } else { @@ -2204,7 +2220,6 @@ public void testMaybeBuildUpdatedDocument() throws IOException { assertEquals(oldApiKeyDoc.type, updatedApiKeyDoc.type); assertEquals(oldApiKeyDoc.name, updatedApiKeyDoc.name); assertEquals(oldApiKeyDoc.hash, updatedApiKeyDoc.hash); - assertEquals(oldApiKeyDoc.expirationTime, updatedApiKeyDoc.expirationTime); assertEquals(oldApiKeyDoc.creationTime, updatedApiKeyDoc.creationTime); assertEquals(oldApiKeyDoc.invalidated, updatedApiKeyDoc.invalidated); assertEquals(newVersion.id, updatedApiKeyDoc.version); @@ -2234,6 +2249,11 @@ public void testMaybeBuildUpdatedDocument() throws IOException { } else { assertEquals(newMetadata, XContentHelper.convertToMap(updatedApiKeyDoc.metadataFlattened, true, XContentType.JSON).v2()); } + if (newExpiration != null) { + assertEquals(clock.instant().plusSeconds(newExpiration.getSeconds()).toEpochMilli(), updatedApiKeyDoc.expirationTime); + } else { + assertEquals(oldApiKeyDoc.expirationTime, updatedApiKeyDoc.expirationTime); + } assertEquals(newAuthentication.getEffectiveSubject().getUser().principal(), updatedApiKeyDoc.creator.get("principal")); assertEquals(newAuthentication.getEffectiveSubject().getUser().fullName(), updatedApiKeyDoc.creator.get("full_name")); assertEquals(newAuthentication.getEffectiveSubject().getUser().email(), updatedApiKeyDoc.creator.get("email")); @@ -2243,13 +2263,11 @@ public void testMaybeBuildUpdatedDocument() throws IOException { assertEquals(realm.getType(), updatedApiKeyDoc.creator.get("realm_type")); if (realm.getDomain() != null) { @SuppressWarnings("unchecked") - final var actualRealmDomain = RealmDomain.fromXContent( - XContentHelper.mapToXContentParser( - XContentParserConfiguration.EMPTY, - (Map) updatedApiKeyDoc.creator.get("realm_domain") - ) - ); - assertEquals(realm.getDomain(), actualRealmDomain); + var m = (Map) updatedApiKeyDoc.creator.get("realm_domain"); + try (var p = XContentHelper.mapToXContentParser(XContentParserConfiguration.EMPTY, m)) { + final var actualRealmDomain = RealmDomain.fromXContent(p); + assertEquals(realm.getDomain(), actualRealmDomain); + } } else { assertFalse(updatedApiKeyDoc.creator.containsKey("realm_domain")); } @@ -2601,7 +2619,8 @@ public void testCreateOrUpdateApiKeyWithWorkflowsRestrictionForUnsupportedVersio final BulkUpdateApiKeyRequest updateRequest = new BulkUpdateApiKeyRequest( randomList(1, 3, () -> randomAlphaOfLengthBetween(3, 5)), roleDescriptorsWithWorkflowsRestriction, - Map.of() + Map.of(), + ApiKeyTests.randomFutureExpirationTime() ); final PlainActionFuture updateFuture = new PlainActionFuture<>(); service.updateApiKeys(authentication, updateRequest, Set.of(), updateFuture); @@ -2663,7 +2682,8 @@ public void testValidateOwnerUserRoleDescriptorsWithWorkflowsRestriction() { final BulkUpdateApiKeyRequest updateRequest = new BulkUpdateApiKeyRequest( randomList(1, 3, () -> randomAlphaOfLengthBetween(3, 5)), requestRoleDescriptors, - Map.of() + Map.of(), + ApiKeyTests.randomFutureExpirationTime() ); final PlainActionFuture updateFuture = new PlainActionFuture<>(); service.updateApiKeys(authentication, updateRequest, userRoleDescriptorsWithWorkflowsRestriction, updateFuture); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java index 6fb0d69175307..c524847e9dbbb 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java @@ -53,6 +53,7 @@ import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLogAppender; @@ -364,7 +365,8 @@ public void init() throws Exception { tokenService, apiKeyService, serviceAccountService, - operatorPrivilegesService + operatorPrivilegesService, + MeterRegistry.NOOP ); } @@ -660,7 +662,8 @@ public void testAuthenticateSmartRealmOrderingDisabled() { tokenService, apiKeyService, serviceAccountService, - operatorPrivilegesService + operatorPrivilegesService, + MeterRegistry.NOOP ); User user = new User("_username", "r1"); when(firstRealm.supports(token)).thenReturn(true); @@ -1040,7 +1043,8 @@ public void testAuthenticateTransportContextAndHeader() throws Exception { tokenService, apiKeyService, serviceAccountService, - operatorPrivilegesService + operatorPrivilegesService, + MeterRegistry.NOOP ); boolean requestIdAlreadyPresent = randomBoolean(); SetOnce reqId = new SetOnce<>(); @@ -1090,7 +1094,8 @@ public void testAuthenticateTransportContextAndHeader() throws Exception { tokenService, apiKeyService, serviceAccountService, - operatorPrivilegesService + operatorPrivilegesService, + MeterRegistry.NOOP ); threadContext2.putHeader(AuthenticationField.AUTHENTICATION_KEY, authHeaderRef.get()); @@ -1113,7 +1118,8 @@ public void testAuthenticateTransportContextAndHeader() throws Exception { tokenService, apiKeyService, serviceAccountService, - operatorPrivilegesService + operatorPrivilegesService, + MeterRegistry.NOOP ); service.authenticate("_action", new InternalRequest(), InternalUsers.SYSTEM_USER, ActionListener.wrap(result -> { if (requestIdAlreadyPresent) { @@ -1175,7 +1181,8 @@ public void testWrongTokenDoesNotFallbackToAnonymous() { tokenService, apiKeyService, serviceAccountService, - operatorPrivilegesService + operatorPrivilegesService, + MeterRegistry.NOOP ); try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { @@ -1219,7 +1226,8 @@ public void testWrongApiKeyDoesNotFallbackToAnonymous() { tokenService, apiKeyService, serviceAccountService, - operatorPrivilegesService + operatorPrivilegesService, + MeterRegistry.NOOP ); doAnswer(invocationOnMock -> { final GetRequest request = (GetRequest) invocationOnMock.getArguments()[0]; @@ -1283,7 +1291,8 @@ public void testAnonymousUserRest() throws Exception { tokenService, apiKeyService, serviceAccountService, - operatorPrivilegesService + operatorPrivilegesService, + MeterRegistry.NOOP ); RestRequest request = new FakeRestRequest(); @@ -1319,7 +1328,8 @@ public void testAuthenticateRestRequestDisallowAnonymous() throws Exception { tokenService, apiKeyService, serviceAccountService, - operatorPrivilegesService + operatorPrivilegesService, + MeterRegistry.NOOP ); RestRequest request = new FakeRestRequest(); @@ -1350,7 +1360,8 @@ public void testAnonymousUserTransportNoDefaultUser() throws Exception { tokenService, apiKeyService, serviceAccountService, - operatorPrivilegesService + operatorPrivilegesService, + MeterRegistry.NOOP ); InternalRequest message = new InternalRequest(); boolean requestIdAlreadyPresent = randomBoolean(); @@ -1385,7 +1396,8 @@ public void testAnonymousUserTransportWithDefaultUser() throws Exception { tokenService, apiKeyService, serviceAccountService, - operatorPrivilegesService + operatorPrivilegesService, + MeterRegistry.NOOP ); InternalRequest message = new InternalRequest(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/ElasticServiceAccountsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/ElasticServiceAccountsTests.java index 772512a7f69d0..ecef71f1c4a68 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/ElasticServiceAccountsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/ElasticServiceAccountsTests.java @@ -12,13 +12,13 @@ import org.elasticsearch.action.admin.indices.create.AutoCreateAction; import org.elasticsearch.action.admin.indices.create.CreateIndexAction; import org.elasticsearch.action.admin.indices.delete.TransportDeleteIndexAction; -import org.elasticsearch.action.admin.indices.mapping.put.AutoPutMappingAction; +import org.elasticsearch.action.admin.indices.mapping.put.TransportAutoPutMappingAction; import org.elasticsearch.action.admin.indices.refresh.RefreshAction; -import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction; +import org.elasticsearch.action.admin.indices.settings.put.TransportUpdateSettingsAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; -import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.delete.TransportDeleteIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesAction; -import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutIndexTemplateAction; import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.delete.TransportDeleteAction; import org.elasticsearch.action.get.TransportGetAction; @@ -32,7 +32,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.xpack.core.ilm.action.GetLifecycleAction; -import org.elasticsearch.xpack.core.ilm.action.PutLifecycleAction; +import org.elasticsearch.xpack.core.ilm.action.ILMActions; import org.elasticsearch.xpack.core.monitoring.action.MonitoringBulkAction; import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequest; @@ -131,7 +131,7 @@ public void testElasticFleetServerPrivileges() { ".logs-endpoint.action.responses-" + randomAlphaOfLengthBetween(1, 20), ".logs-endpoint.heartbeat-" + randomAlphaOfLengthBetween(1, 20) ).stream().map(this::mockIndexAbstraction).forEach(index -> { - assertThat(role.indices().allowedIndicesMatcher(AutoPutMappingAction.NAME).test(index), is(true)); + assertThat(role.indices().allowedIndicesMatcher(TransportAutoPutMappingAction.TYPE.name()).test(index), is(true)); assertThat(role.indices().allowedIndicesMatcher(AutoCreateAction.NAME).test(index), is(true)); assertThat(role.indices().allowedIndicesMatcher(TransportDeleteAction.NAME).test(index), is(true)); assertThat(role.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(index), is(true)); @@ -142,11 +142,11 @@ public void testElasticFleetServerPrivileges() { assertThat(role.indices().allowedIndicesMatcher(TransportMultiGetAction.NAME).test(index), is(false)); assertThat(role.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(index), is(false)); assertThat(role.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(index), is(false)); - assertThat(role.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(index), is(false)); + assertThat(role.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(index), is(false)); }); final IndexAbstraction profilingIndex = mockIndexAbstraction("profiling-" + randomAlphaOfLengthBetween(1, 20)); - assertThat(role.indices().allowedIndicesMatcher(AutoPutMappingAction.NAME).test(profilingIndex), is(true)); + assertThat(role.indices().allowedIndicesMatcher(TransportAutoPutMappingAction.TYPE.name()).test(profilingIndex), is(true)); assertThat(role.indices().allowedIndicesMatcher(AutoCreateAction.NAME).test(profilingIndex), is(false)); assertThat(role.indices().allowedIndicesMatcher(TransportDeleteAction.NAME).test(profilingIndex), is(true)); assertThat(role.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(profilingIndex), is(false)); @@ -157,10 +157,10 @@ public void testElasticFleetServerPrivileges() { assertThat(role.indices().allowedIndicesMatcher(TransportMultiGetAction.NAME).test(profilingIndex), is(true)); assertThat(role.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(profilingIndex), is(true)); assertThat(role.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(profilingIndex), is(true)); - assertThat(role.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(profilingIndex), is(false)); + assertThat(role.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(profilingIndex), is(false)); List.of("synthetics-" + randomAlphaOfLengthBetween(1, 20)).stream().map(this::mockIndexAbstraction).forEach(index -> { - assertThat(role.indices().allowedIndicesMatcher(AutoPutMappingAction.NAME).test(index), is(true)); + assertThat(role.indices().allowedIndicesMatcher(TransportAutoPutMappingAction.TYPE.name()).test(index), is(true)); assertThat(role.indices().allowedIndicesMatcher(AutoCreateAction.NAME).test(index), is(true)); assertThat(role.indices().allowedIndicesMatcher(TransportDeleteAction.NAME).test(index), is(true)); assertThat(role.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(index), is(true)); @@ -171,7 +171,7 @@ public void testElasticFleetServerPrivileges() { assertThat(role.indices().allowedIndicesMatcher(TransportMultiGetAction.NAME).test(index), is(true)); assertThat(role.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(index), is(true)); assertThat(role.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(index), is(true)); - assertThat(role.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(index), is(false)); + assertThat(role.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(index), is(false)); }); List.of( @@ -196,7 +196,7 @@ public void testElasticFleetServerPrivileges() { assertThat(role.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(dotFleetIndex), is(true)); assertThat(role.indices().allowedIndicesMatcher(IndicesStatsAction.NAME).test(dotFleetIndex), is(true)); assertThat(role.indices().allowedIndicesMatcher(TransportDeleteIndexAction.TYPE.name()).test(dotFleetIndex), is(false)); - assertThat(role.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(dotFleetIndex), is(false)); + assertThat(role.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(dotFleetIndex), is(false)); assertThat(role.indices().allowedIndicesMatcher("indices:foo").test(dotFleetIndex), is(false)); }); @@ -211,7 +211,7 @@ public void testElasticFleetServerPrivileges() { assertThat(role.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(dotFleetSecretsIndex), is(true)); assertThat(role.indices().allowedIndicesMatcher(IndicesStatsAction.NAME).test(dotFleetSecretsIndex), is(false)); assertThat(role.indices().allowedIndicesMatcher(TransportDeleteIndexAction.TYPE.name()).test(dotFleetSecretsIndex), is(false)); - assertThat(role.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(dotFleetSecretsIndex), is(false)); + assertThat(role.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(dotFleetSecretsIndex), is(false)); assertThat(role.indices().allowedIndicesMatcher("indices:foo").test(dotFleetSecretsIndex), is(false)); final TransportRequest request = mock(TransportRequest.class); @@ -230,7 +230,7 @@ public void testElasticFleetServerPrivileges() { assertThat(role.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(apmSampledTracesIndex), is(true)); assertThat(role.indices().allowedIndicesMatcher(IndicesStatsAction.NAME).test(apmSampledTracesIndex), is(true)); assertThat(role.indices().allowedIndicesMatcher(TransportDeleteIndexAction.TYPE.name()).test(apmSampledTracesIndex), is(false)); - assertThat(role.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(apmSampledTracesIndex), is(false)); + assertThat(role.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(apmSampledTracesIndex), is(false)); final String privilegeName = randomAlphaOfLengthBetween(3, 16); assertThat( @@ -332,9 +332,9 @@ public void testElasticEnterpriseSearchServerAccount() { assertThat(role.cluster().check(PutRoleAction.NAME, request, authentication), is(true)); // manage_index_templates - assertThat(role.cluster().check(PutIndexTemplateAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(TransportPutIndexTemplateAction.TYPE.name(), request, authentication), is(true)); assertThat(role.cluster().check(GetIndexTemplatesAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(DeleteIndexTemplateAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(TransportDeleteIndexTemplateAction.TYPE.name(), request, authentication), is(true)); // monitoring assertThat(role.cluster().check(MonitoringBulkAction.NAME, request, authentication), is(true)); @@ -342,7 +342,7 @@ public void testElasticEnterpriseSearchServerAccount() { // manage_ilm assertThat(role.cluster().check(GetLifecycleAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(PutLifecycleAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(ILMActions.PUT.name(), request, authentication), is(true)); List.of( "search-" + randomAlphaOfLengthBetween(1, 20), @@ -375,7 +375,10 @@ public void testElasticEnterpriseSearchServerAccount() { assertThat(role.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(enterpriseSearchIndex), is(true)); assertThat(role.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(enterpriseSearchIndex), is(true)); assertThat(role.indices().allowedIndicesMatcher(IndicesStatsAction.NAME).test(enterpriseSearchIndex), is(true)); - assertThat(role.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(enterpriseSearchIndex), is(true)); + assertThat( + role.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(enterpriseSearchIndex), + is(true) + ); assertThat(role.indices().allowedIndicesMatcher(RefreshAction.NAME).test(enterpriseSearchIndex), is(true)); assertThat(role.indices().allowedIndicesMatcher("indices:foo").test(enterpriseSearchIndex), is(false)); }); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/IndexServiceAccountTokenStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/IndexServiceAccountTokenStoreTests.java index 8d5d89b4c5054..3a9fee4288bf2 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/IndexServiceAccountTokenStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/IndexServiceAccountTokenStoreTests.java @@ -42,7 +42,6 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.XContentTestUtils; import org.elasticsearch.threadpool.ThreadPool; @@ -270,19 +269,24 @@ public void testFindTokensFor() { ) ) .toArray(SearchHit[]::new); - final InternalSearchResponse internalSearchResponse; - internalSearchResponse = new InternalSearchResponse( - new SearchHits(hits, new TotalHits(nhits, TotalHits.Relation.EQUAL_TO), randomFloat(), null, null, null), - null, - null, - null, - false, - null, - 0 - ); ActionListener.respondAndRelease( l, - new SearchResponse(internalSearchResponse, randomAlphaOfLengthBetween(3, 8), 1, 1, 0, 10, null, null) + new SearchResponse( + new SearchHits(hits, new TotalHits(nhits, TotalHits.Relation.EQUAL_TO), randomFloat(), null, null, null), + null, + null, + false, + null, + null, + 0, + randomAlphaOfLengthBetween(3, 8), + 1, + 1, + 0, + 10, + null, + null + ) ); } else if (r instanceof ClearScrollRequest) { l.onResponse(new ClearScrollResponse(true, 1)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/SecondaryAuthenticatorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/SecondaryAuthenticatorTests.java index b3a6bed9a5a94..9a8bb5764ce2d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/SecondaryAuthenticatorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/SecondaryAuthenticatorTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.license.TestUtils; import org.elasticsearch.license.internal.XPackLicenseStatus; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.TestMatchers; import org.elasticsearch.test.rest.FakeRestRequest; @@ -156,7 +157,8 @@ public void setupMocks() throws Exception { tokenService, apiKeyService, serviceAccountService, - OperatorPrivileges.NOOP_OPERATOR_PRIVILEGES_SERVICE + OperatorPrivileges.NOOP_OPERATOR_PRIVILEGES_SERVICE, + MeterRegistry.NOOP ); authenticator = new SecondaryAuthenticator(securityContext, authenticationService, auditTrail); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java index 3b52f86c00ba8..169275ccc3ee3 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java @@ -27,7 +27,6 @@ import org.elasticsearch.script.mustache.MustacheScriptEngine; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.ToXContent; @@ -355,25 +354,31 @@ private void doAnswerWithSearchResult(Client client, ExpressionRoleMapping mappi mapping.toXContent(builder, ToXContent.EMPTY_PARAMS); searchHit.sourceRef(BytesReference.bytes(builder)); } - final var internalSearchResponse = new InternalSearchResponse( - new SearchHits( - new SearchHit[] { searchHit }, - new TotalHits(1, TotalHits.Relation.EQUAL_TO), - randomFloat(), + ActionListener.respondAndRelease( + listener, + new SearchResponse( + new SearchHits( + new SearchHit[] { searchHit }, + new TotalHits(1, TotalHits.Relation.EQUAL_TO), + randomFloat(), + null, + null, + null + ), + null, + null, + false, + null, null, + 0, + randomAlphaOfLengthBetween(3, 8), + 1, + 1, + 0, + 10, null, null - ), - null, - null, - null, - false, - null, - 0 - ); - ActionListener.respondAndRelease( - listener, - new SearchResponse(internalSearchResponse, randomAlphaOfLengthBetween(3, 8), 1, 1, 0, 10, null, null) + ) ); return null; }).when(client).search(any(SearchRequest.class), anyActionListener()); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java index 4cabe5a8ec3ba..74ddd4a90b0ec 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.security.authz; import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.CompositeIndicesRequest; import org.elasticsearch.action.DocWriteRequest; @@ -25,22 +26,22 @@ import org.elasticsearch.action.admin.indices.delete.TransportDeleteIndexAction; import org.elasticsearch.action.admin.indices.get.GetIndexAction; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.action.admin.indices.mapping.put.TransportPutMappingAction; import org.elasticsearch.action.admin.indices.recovery.RecoveryAction; import org.elasticsearch.action.admin.indices.recovery.RecoveryRequest; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsAction; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsAction; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; -import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction; +import org.elasticsearch.action.admin.indices.settings.put.TransportUpdateSettingsAction; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; -import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresAction; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest; +import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; -import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; +import org.elasticsearch.action.admin.indices.template.put.TransportPutIndexTemplateAction; import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.bulk.BulkItemRequest; import org.elasticsearch.action.bulk.BulkRequest; @@ -61,6 +62,7 @@ import org.elasticsearch.action.search.MultiSearchRequest; import org.elasticsearch.action.search.OpenPointInTimeRequest; import org.elasticsearch.action.search.ParsedScrollId; +import org.elasticsearch.action.search.SearchContextId; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.action.search.SearchTransportService; @@ -101,6 +103,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.bulk.stats.BulkOperationListener; @@ -110,7 +113,12 @@ import org.elasticsearch.license.MockLicenseState; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.search.SearchPhaseResult; +import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.builder.PointInTimeBuilder; +import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.internal.AliasFilter; +import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -1229,6 +1237,72 @@ public void testSearchAgainstIndex() { verifyNoMoreInteractions(auditTrail); } + public void testSearchPITAgainstIndex() { + RoleDescriptor role = new RoleDescriptor( + "search_index", + null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices("index-*").privileges("read").build() }, + null + ); + roleMap.put(role.getName(), role); + final Authentication authentication = createAuthentication(new User("test search user", role.getName())); + + final String requestId = AuditUtil.getOrGenerateRequestId(threadContext); + final String indexName = "index-" + randomAlphaOfLengthBetween(1, 5); + + final ClusterState clusterState = mockMetadataWithIndex(indexName); + final IndexMetadata indexMetadata = clusterState.metadata().index(indexName); + + PointInTimeBuilder pit = new PointInTimeBuilder(createEncodedPIT(indexMetadata.getIndex())); + SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder().pointInTimeBuilder(pit)) + .allowPartialSearchResults(false); + final ShardSearchRequest shardRequest = new ShardSearchRequest( + new OriginalIndices(new String[] { indexName }, searchRequest.indicesOptions()), + searchRequest, + new ShardId(indexMetadata.getIndex(), 0), + 0, + 1, + AliasFilter.EMPTY, + 1.0f, + System.currentTimeMillis(), + null + ); + this.setFakeOriginatingAction = false; + authorize(authentication, TransportSearchAction.TYPE.name(), searchRequest, true, () -> { + verify(rolesStore).getRoles(Mockito.same(authentication), Mockito.any()); + IndicesAccessControl iac = threadContext.getTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY); + // Successful search action authorization should set a parent authorization header. + assertThat(securityContext.getParentAuthorization().action(), equalTo(TransportSearchAction.TYPE.name())); + // Within the action handler, execute a child action (the query phase of search) + authorize(authentication, SearchTransportService.QUERY_ACTION_NAME, shardRequest, false, () -> { + // This child action triggers a second interaction with the role store (which is cached) + verify(rolesStore, times(2)).getRoles(Mockito.same(authentication), Mockito.any()); + // But it does not create a new IndicesAccessControl + assertThat(threadContext.getTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY), sameInstance(iac)); + // The parent authorization header should only be present for direct child actions + // and not be carried over for a child of a child actions. + // Meaning, only query phase action should be pre-authorized in this case and potential sub-actions should not. + assertThat(securityContext.getParentAuthorization(), nullValue()); + }); + }); + assertThat(searchRequest.indices().length, equalTo(0)); + verify(auditTrail).accessGranted( + eq(requestId), + eq(authentication), + eq(TransportSearchAction.TYPE.name()), + eq(searchRequest), + authzInfoRoles(new String[] { role.getName() }) + ); + verify(auditTrail).accessGranted( + eq(requestId), + eq(authentication), + eq(SearchTransportService.QUERY_ACTION_NAME), + eq(shardRequest), + authzInfoRoles(new String[] { role.getName() }) + ); + verifyNoMoreInteractions(auditTrail); + } + public void testScrollRelatedRequestsAllowed() { RoleDescriptor role = new RoleDescriptor( "a_all", @@ -1575,12 +1649,12 @@ public void testDenialErrorMessagesForIndexTemplateAction() { ElasticsearchSecurityException securityException = expectThrows( ElasticsearchSecurityException.class, - () -> authorize(authentication, PutIndexTemplateAction.NAME, request) + () -> authorize(authentication, TransportPutIndexTemplateAction.TYPE.name(), request) ); assertThat( securityException, throwableWithMessage( - containsString("[" + PutIndexTemplateAction.NAME + "] is unauthorized for user [" + user.principal() + "]") + containsString("[" + TransportPutIndexTemplateAction.TYPE.name() + "] is unauthorized for user [" + user.principal() + "]") ) ); assertThat( @@ -2013,7 +2087,7 @@ public void testGrantAllRestrictedUserCannotExecuteOperationAgainstSecurityIndic ); requests.add( new Tuple<>( - UpdateSettingsAction.NAME, + TransportUpdateSettingsAction.TYPE.name(), new UpdateSettingsRequest().indices(randomFrom(SECURITY_MAIN_ALIAS, INTERNAL_SECURITY_MAIN_INDEX_7)) ) ); @@ -2041,7 +2115,7 @@ public void testGrantAllRestrictedUserCannotExecuteOperationAgainstSecurityIndic ); requests.add( new Tuple<>( - IndicesShardStoresAction.NAME, + TransportIndicesShardStoresAction.TYPE.name(), new IndicesShardStoresRequest().indices(randomFrom(SECURITY_MAIN_ALIAS, INTERNAL_SECURITY_MAIN_INDEX_7)) ) ); @@ -2124,7 +2198,9 @@ public void testMonitoringOperationsAgainstSecurityIndexRequireAllowRestricted() requests.add(new Tuple<>(RecoveryAction.NAME, new RecoveryRequest().indices(SECURITY_MAIN_ALIAS))); requests.add(new Tuple<>(IndicesSegmentsAction.NAME, new IndicesSegmentsRequest().indices(SECURITY_MAIN_ALIAS))); requests.add(new Tuple<>(GetSettingsAction.NAME, new GetSettingsRequest().indices(SECURITY_MAIN_ALIAS))); - requests.add(new Tuple<>(IndicesShardStoresAction.NAME, new IndicesShardStoresRequest().indices(SECURITY_MAIN_ALIAS))); + requests.add( + new Tuple<>(TransportIndicesShardStoresAction.TYPE.name(), new IndicesShardStoresRequest().indices(SECURITY_MAIN_ALIAS)) + ); for (final Tuple requestTuple : requests) { final String action = requestTuple.v1(); @@ -2274,7 +2350,10 @@ public void testSuperusersCannotExecuteWriteOperationAgainstSecurityIndex() { ) ); requests.add( - new Tuple<>(PutMappingAction.NAME, new PutMappingRequest(randomFrom(SECURITY_MAIN_ALIAS, INTERNAL_SECURITY_MAIN_INDEX_7))) + new Tuple<>( + TransportPutMappingAction.TYPE.name(), + new PutMappingRequest(randomFrom(SECURITY_MAIN_ALIAS, INTERNAL_SECURITY_MAIN_INDEX_7)) + ) ); requests.add( new Tuple<>( @@ -3545,6 +3624,26 @@ static AuthorizationInfo authzInfoRoles(String[] expectedRoles) { return ArgumentMatchers.argThat(new RBACAuthorizationInfoRoleMatcher(expectedRoles)); } + private static class TestSearchPhaseResult extends SearchPhaseResult { + final DiscoveryNode node; + + TestSearchPhaseResult(ShardSearchContextId contextId, DiscoveryNode node) { + this.contextId = contextId; + this.node = node; + } + } + + private static String createEncodedPIT(Index index) { + DiscoveryNode node1 = DiscoveryNodeUtils.create("node_1"); + TestSearchPhaseResult testSearchPhaseResult1 = new TestSearchPhaseResult(new ShardSearchContextId("a", 1), node1); + testSearchPhaseResult1.setSearchShardTarget( + new SearchShardTarget("node_1", new ShardId(index.getName(), index.getUUID(), 0), null) + ); + List results = new ArrayList<>(); + results.add(testSearchPhaseResult1); + return SearchContextId.encode(results, Collections.emptyMap(), TransportVersion.current()); + } + private static class RBACAuthorizationInfoRoleMatcher implements ArgumentMatcher { private final String[] wanted; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java index 45838e75940b4..7a70bb23e0db4 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java @@ -17,8 +17,8 @@ import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.delete.TransportDeleteIndexAction; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.action.admin.indices.mapping.put.TransportPutMappingAction; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; @@ -1545,7 +1545,10 @@ public void testNonRemotableRequestDoesNotAllowRemoteIndices() { new DeleteIndexRequest("remote:foo").indicesOptions(options), TransportDeleteIndexAction.TYPE.name() ), - new Tuple(new PutMappingRequest("remote:foo").indicesOptions(options), PutMappingAction.NAME) + new Tuple( + new PutMappingRequest("remote:foo").indicesOptions(options), + TransportPutMappingAction.TYPE.name() + ) ); IndexNotFoundException e = expectThrows( IndexNotFoundException.class, @@ -1562,7 +1565,7 @@ public void testNonRemotableRequestDoesNotAllowRemoteWildcardIndices() { new DeleteIndexRequest("*:*").indicesOptions(options), TransportDeleteIndexAction.TYPE.name() ), - new Tuple(new PutMappingRequest("*:*").indicesOptions(options), PutMappingAction.NAME) + new Tuple(new PutMappingRequest("*:*").indicesOptions(options), TransportPutMappingAction.TYPE.name()) ); final ResolvedIndices resolved = resolveIndices(tuple.v1(), buildAuthorizedIndices(user, tuple.v2())); assertNoIndices((IndicesRequest.Replaceable) tuple.v1(), resolved); @@ -1765,7 +1768,7 @@ public void testAliasDateMathExpressionNotSupported() { public void testDynamicPutMappingRequestFromAlias() { PutMappingRequest request = new PutMappingRequest(Strings.EMPTY_ARRAY).setConcreteIndex(new Index("foofoo", UUIDs.base64UUID())); User user = new User("alias-writer", "alias_read_write"); - AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, PutMappingAction.NAME); + AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, TransportPutMappingAction.TYPE.name()); String putMappingIndexOrAlias = IndicesAndAliasesResolver.getPutMappingIndexOrAlias(request, authorizedIndices::check, metadata); assertEquals("barbaz", putMappingIndexOrAlias); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java index 753f498e2fb90..fa699d8e6e72e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java @@ -12,8 +12,8 @@ import org.elasticsearch.action.admin.cluster.health.TransportClusterHealthAction; import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; import org.elasticsearch.action.admin.cluster.stats.ClusterStatsAction; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.action.admin.indices.mapping.put.TransportPutMappingAction; import org.elasticsearch.action.delete.TransportDeleteAction; import org.elasticsearch.action.index.TransportIndexAction; import org.elasticsearch.action.search.SearchRequest; @@ -1428,7 +1428,7 @@ public void testExplicitMappingUpdatesAreNotGrantedWithIngestPrivileges() { request.source("{ \"properties\": { \"message\": { \"type\": \"text\" } } }", XContentType.JSON); AuthorizedIndices authorizedIndices = RBACEngine.resolveAuthorizedIndicesFromRole( role, - getRequestInfo(request, PutMappingAction.NAME), + getRequestInfo(request, TransportPutMappingAction.TYPE.name()), lookup, () -> ignore -> {} ); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java index afc1d0931547a..ac834911fc4e6 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java @@ -8,8 +8,8 @@ import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.TransportVersion; -import org.elasticsearch.action.admin.indices.mapping.put.AutoPutMappingAction; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; +import org.elasticsearch.action.admin.indices.mapping.put.TransportAutoPutMappingAction; +import org.elasticsearch.action.admin.indices.mapping.put.TransportPutMappingAction; import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.cluster.metadata.AliasMetadata; import org.elasticsearch.cluster.metadata.DataStream; @@ -283,8 +283,8 @@ public void testCorePermissionAuthorize() { assertFalse(iac.hasIndexPermissions("ba")); assertTrue(core.check(TransportSearchAction.TYPE.name())); - assertTrue(core.check(PutMappingAction.NAME)); - assertTrue(core.check(AutoPutMappingAction.NAME)); + assertTrue(core.check(TransportPutMappingAction.TYPE.name())); + assertTrue(core.check(TransportAutoPutMappingAction.TYPE.name())); assertFalse(core.check("unknown")); // test with two indices @@ -326,8 +326,8 @@ public void testCorePermissionAuthorize() { assertTrue(iac.getIndexPermissions("a2").getFieldPermissions().hasFieldLevelSecurity()); assertTrue(core.check(TransportSearchAction.TYPE.name())); - assertTrue(core.check(PutMappingAction.NAME)); - assertTrue(core.check(AutoPutMappingAction.NAME)); + assertTrue(core.check(TransportPutMappingAction.TYPE.name())); + assertTrue(core.check(TransportAutoPutMappingAction.TYPE.name())); assertFalse(core.check("unknown")); } @@ -506,7 +506,7 @@ public void testAuthorizationForBackingIndices() { dataStreamName ).build(); iac = indicesPermission.authorize( - randomFrom(PutMappingAction.NAME, AutoPutMappingAction.NAME), + randomFrom(TransportPutMappingAction.TYPE.name(), TransportAutoPutMappingAction.TYPE.name()), Sets.newHashSet(backingIndices.stream().map(im -> im.getIndex().getName()).collect(Collectors.toList())), lookup, fieldPermissionsCache @@ -559,7 +559,7 @@ public void testAuthorizationForMappingUpdates() { ) .build(); IndicesAccessControl iac = core.authorize( - PutMappingAction.NAME, + TransportPutMappingAction.TYPE.name(), Sets.newHashSet("test1", "test_write1"), lookup, fieldPermissionsCache @@ -571,22 +571,27 @@ public void testAuthorizationForMappingUpdates() { assertThat(iac.hasIndexPermissions("test_write1"), is(true)); assertWarnings( "the index privilege [index] allowed the update mapping action [" - + PutMappingAction.NAME + + TransportPutMappingAction.TYPE.name() + "] on " + "index [test1], this privilege will not permit mapping updates in the next major release - " + "users who require access to update mappings must be granted explicit privileges", "the index privilege [index] allowed the update mapping action [" - + PutMappingAction.NAME + + TransportPutMappingAction.TYPE.name() + "] on " + "index [test_write1], this privilege will not permit mapping updates in the next major release - " + "users who require access to update mappings must be granted explicit privileges", "the index privilege [write] allowed the update mapping action [" - + PutMappingAction.NAME + + TransportPutMappingAction.TYPE.name() + "] on " + "index [test_write1], this privilege will not permit mapping updates in the next major release - " + "users who require access to update mappings must be granted explicit privileges" ); - iac = core.authorize(AutoPutMappingAction.NAME, Sets.newHashSet("test1", "test_write1"), lookup, fieldPermissionsCache); + iac = core.authorize( + TransportAutoPutMappingAction.TYPE.name(), + Sets.newHashSet("test1", "test_write1"), + lookup, + fieldPermissionsCache + ); assertThat(iac.isGranted(), is(true)); assertThat(iac.getIndexPermissions("test1"), is(notNullValue())); assertThat(iac.hasIndexPermissions("test1"), is(true)); @@ -594,21 +599,21 @@ public void testAuthorizationForMappingUpdates() { assertThat(iac.hasIndexPermissions("test_write1"), is(true)); assertWarnings( "the index privilege [index] allowed the update mapping action [" - + AutoPutMappingAction.NAME + + TransportAutoPutMappingAction.TYPE.name() + "] on " + "index [test1], this privilege will not permit mapping updates in the next major release - " + "users who require access to update mappings must be granted explicit privileges" ); - iac = core.authorize(AutoPutMappingAction.NAME, Sets.newHashSet("test_write2"), lookup, fieldPermissionsCache); + iac = core.authorize(TransportAutoPutMappingAction.TYPE.name(), Sets.newHashSet("test_write2"), lookup, fieldPermissionsCache); assertThat(iac.isGranted(), is(true)); assertThat(iac.getIndexPermissions("test_write2"), is(notNullValue())); assertThat(iac.hasIndexPermissions("test_write2"), is(true)); - iac = core.authorize(PutMappingAction.NAME, Sets.newHashSet("test_write2"), lookup, fieldPermissionsCache); + iac = core.authorize(TransportPutMappingAction.TYPE.name(), Sets.newHashSet("test_write2"), lookup, fieldPermissionsCache); assertThat(iac.getIndexPermissions("test_write2"), is(nullValue())); assertThat(iac.hasIndexPermissions("test_write2"), is(false)); iac = core.authorize( - AutoPutMappingAction.NAME, + TransportAutoPutMappingAction.TYPE.name(), Sets.newHashSet(backingIndices.stream().map(im -> im.getIndex().getName()).collect(Collectors.toList())), lookup, fieldPermissionsCache @@ -619,7 +624,7 @@ public void testAuthorizationForMappingUpdates() { assertThat(iac.hasIndexPermissions(im.getIndex().getName()), is(true)); } iac = core.authorize( - PutMappingAction.NAME, + TransportPutMappingAction.TYPE.name(), Sets.newHashSet(backingIndices.stream().map(im -> im.getIndex().getName()).collect(Collectors.toList())), lookup, fieldPermissionsCache diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/permission/PermissionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/permission/PermissionTests.java index c52409d6e6797..6356cde16b3cc 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/permission/PermissionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/permission/PermissionTests.java @@ -6,8 +6,8 @@ */ package org.elasticsearch.xpack.security.authz.permission; -import org.elasticsearch.action.admin.indices.mapping.put.AutoPutMappingAction; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; +import org.elasticsearch.action.admin.indices.mapping.put.TransportAutoPutMappingAction; +import org.elasticsearch.action.admin.indices.mapping.put.TransportPutMappingAction; import org.elasticsearch.action.get.TransportGetAction; import org.elasticsearch.cluster.metadata.DataStreamTestHelper; import org.elasticsearch.cluster.metadata.IndexAbstraction; @@ -50,7 +50,7 @@ public void testAllowedIndicesMatcherAction() throws Exception { } public void testAllowedIndicesMatcherForMappingUpdates() throws Exception { - for (String mappingUpdateActionName : List.of(PutMappingAction.NAME, AutoPutMappingAction.NAME)) { + for (String mappingUpdateActionName : List.of(TransportPutMappingAction.TYPE.name(), TransportAutoPutMappingAction.TYPE.name())) { IndexAbstraction mockIndexAbstraction = mock(IndexAbstraction.class); IsResourceAuthorizedPredicate indexPredicate = permission.indices().allowedIndicesMatcher(mappingUpdateActionName); // mapping updates are still permitted on indices and aliases diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java index d229124419cb2..0c2f9cefbcffb 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java @@ -22,7 +22,6 @@ import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchResponseSections; import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.WriteRequest; @@ -197,8 +196,7 @@ public void testGetSinglePrivilegeByName() throws Exception { public void testGetMissingPrivilege() throws InterruptedException, ExecutionException, TimeoutException { final PlainActionFuture> future = new PlainActionFuture<>(); store.getPrivileges(List.of("myapp"), List.of("admin"), future); - final SearchHit[] hits = new SearchHit[0]; - ActionListener.respondAndRelease(listener.get(), buildSearchResponse(hits)); + ActionListener.respondAndRelease(listener.get(), buildSearchResponse(SearchHits.EMPTY)); final Collection applicationPrivilegeDescriptors = future.get(1, TimeUnit.SECONDS); assertThat(applicationPrivilegeDescriptors, empty()); @@ -299,8 +297,7 @@ public void testGetPrivilegesByStarApplicationName() throws Exception { assertThat(query, containsString("{\"exists\":{\"field\":\"application\"")); assertThat(query, containsString("{\"term\":{\"type\":{\"value\":\"application-privilege\"")); - final SearchHit[] hits = new SearchHit[0]; - ActionListener.respondAndRelease(listener.get(), buildSearchResponse(hits)); + ActionListener.respondAndRelease(listener.get(), buildSearchResponse(SearchHits.EMPTY)); } public void testGetAllPrivileges() throws Exception { @@ -822,15 +819,13 @@ private SearchHit[] buildHits(List sourcePrivile private static SearchResponse buildSearchResponse(SearchHit[] hits) { return new SearchResponse( - new SearchResponseSections( - new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), - null, - null, - false, - false, - null, - 1 - ), + new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), + null, + null, + false, + false, + null, + 1, "_scrollId1", 1, 1, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/profile/ProfileServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/profile/ProfileServiceTests.java index 3512ac4b613d5..e4ccc635e3be2 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/profile/ProfileServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/profile/ProfileServiceTests.java @@ -55,7 +55,6 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.sort.FieldSortBuilder; @@ -1031,7 +1030,7 @@ public void testUsageStats() { } else { final var searchResponse = mock(SearchResponse.class); when(searchResponse.getHits()).thenReturn( - new SearchHits(new SearchHit[0], new TotalHits(metrics.get(name), TotalHits.Relation.EQUAL_TO), 1) + SearchHits.empty(new TotalHits(metrics.get(name), TotalHits.Relation.EQUAL_TO), 1) ); return new MultiSearchResponse.Item(searchResponse, null); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/SecurityRestFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/SecurityRestFilterTests.java index 69884cd1e6dbd..d3b46f5847636 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/SecurityRestFilterTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/SecurityRestFilterTests.java @@ -230,13 +230,17 @@ public Set getFilteredFields() { assertEquals(restRequest, handlerRequest.get()); assertEquals(restRequest.content(), handlerRequest.get().content()); - Map original = XContentType.JSON.xContent() - .createParser( - NamedXContentRegistry.EMPTY, - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - handlerRequest.get().content().streamInput() - ) - .map(); + Map original; + try ( + var parser = XContentType.JSON.xContent() + .createParser( + NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + handlerRequest.get().content().streamInput() + ) + ) { + original = parser.map(); + } assertEquals(2, original.size()); assertEquals(SecuritySettingsSourceField.TEST_PASSWORD, original.get("password")); assertEquals("bar", original.get("foo")); @@ -244,13 +248,17 @@ public Set getFilteredFields() { assertNotEquals(restRequest, auditTrailRequest.get()); assertNotEquals(restRequest.content(), auditTrailRequest.get().content()); - Map map = XContentType.JSON.xContent() - .createParser( - NamedXContentRegistry.EMPTY, - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - auditTrailRequest.get().content().streamInput() - ) - .map(); + Map map; + try ( + var parser = XContentType.JSON.xContent() + .createParser( + NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + auditTrailRequest.get().content().streamInput() + ) + ) { + map = parser.map(); + } assertEquals(1, map.size()); assertEquals("bar", map.get("foo")); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/ApiKeyBoolQueryBuilderTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/ApiKeyBoolQueryBuilderTests.java index d7f994d7499fe..477409f22369f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/ApiKeyBoolQueryBuilderTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/ApiKeyBoolQueryBuilderTests.java @@ -37,6 +37,8 @@ import java.util.List; import java.util.function.Predicate; +import static org.elasticsearch.test.LambdaMatchers.falseWith; +import static org.elasticsearch.test.LambdaMatchers.trueWith; import static org.elasticsearch.xpack.security.support.ApiKeyFieldNameTranslators.FIELD_NAME_TRANSLATORS; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; @@ -318,10 +320,10 @@ private void testAllowedIndexFieldName(Predicate predicate) { "metadata_flattened." + randomAlphaOfLengthBetween(1, 10), "creator." + randomAlphaOfLengthBetween(1, 10) ); - assertTrue(predicate.test(allowedField)); + assertThat(predicate, trueWith(allowedField)); final String disallowedField = randomBoolean() ? (randomAlphaOfLengthBetween(1, 3) + allowedField) : (allowedField.substring(1)); - assertFalse(predicate.test(disallowedField)); + assertThat(predicate, falseWith(disallowedField)); } private void assertCommonFilterQueries(ApiKeyBoolQueryBuilder qb, Authentication authentication) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/RemoteClusterCredentialsResolverTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/RemoteClusterCredentialsResolverTests.java deleted file mode 100644 index debb50384e217..0000000000000 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/RemoteClusterCredentialsResolverTests.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.security.transport; - -import org.elasticsearch.common.settings.MockSecureSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.security.authc.ApiKeyService; - -import java.util.Optional; - -import static org.elasticsearch.xpack.security.transport.RemoteClusterCredentialsResolver.RemoteClusterCredentials; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; - -public class RemoteClusterCredentialsResolverTests extends ESTestCase { - - public void testResolveRemoteClusterCredentials() { - final String clusterNameA = "clusterA"; - final String clusterDoesNotExist = randomAlphaOfLength(10); - final Settings.Builder builder = Settings.builder(); - - final String secret = randomAlphaOfLength(20); - final MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString("cluster.remote." + clusterNameA + ".credentials", secret); - final Settings settings = builder.setSecureSettings(secureSettings).build(); - RemoteClusterCredentialsResolver remoteClusterAuthorizationResolver = new RemoteClusterCredentialsResolver(settings); - final Optional remoteClusterCredentials = remoteClusterAuthorizationResolver.resolve(clusterNameA); - assertThat(remoteClusterCredentials.isPresent(), is(true)); - assertThat(remoteClusterCredentials.get().clusterAlias(), equalTo(clusterNameA)); - assertThat(remoteClusterCredentials.get().credentials(), equalTo(ApiKeyService.withApiKeyPrefix(secret))); - assertThat(remoteClusterAuthorizationResolver.resolve(clusterDoesNotExist), is(Optional.empty())); - } -} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java index 57e48581d159c..46b0fac78ad8e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.ssl.SslClientAuthenticationMode; import org.elasticsearch.common.ssl.SslConfiguration; @@ -33,6 +34,7 @@ import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteClusterPortSettings; +import org.elasticsearch.transport.RemoteConnectionManager.RemoteClusterAliasWithCredentials; import org.elasticsearch.transport.SendRequestTransportException; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.Transport.Connection; @@ -77,6 +79,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; +import java.util.function.Function; import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.DATA_STREAM_LIFECYCLE_ORIGIN; import static org.elasticsearch.test.ActionListenerUtils.anyActionListener; @@ -87,7 +90,6 @@ import static org.elasticsearch.xpack.core.security.authc.CrossClusterAccessSubjectInfo.CROSS_CLUSTER_ACCESS_SUBJECT_INFO_HEADER_KEY; import static org.elasticsearch.xpack.core.security.authz.RoleDescriptorTests.randomUniquelyNamedRoleDescriptors; import static org.elasticsearch.xpack.security.authc.CrossClusterAccessHeaders.CROSS_CLUSTER_ACCESS_CREDENTIALS_HEADER_KEY; -import static org.elasticsearch.xpack.security.transport.RemoteClusterCredentialsResolver.RemoteClusterCredentials; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; @@ -153,7 +155,6 @@ public void testSendAsync() throws Exception { new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) ), mock(CrossClusterAccessAuthenticationService.class), - new RemoteClusterCredentialsResolver(settings), mockLicenseState ); ClusterServiceUtils.setState(clusterService, clusterService.state()); // force state update to trigger listener @@ -205,7 +206,6 @@ public void testSendAsyncSwitchToSystem() throws Exception { new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) ), mock(CrossClusterAccessAuthenticationService.class), - new RemoteClusterCredentialsResolver(settings), mockLicenseState ); ClusterServiceUtils.setState(clusterService, clusterService.state()); // force state update to trigger listener @@ -250,7 +250,6 @@ public void testSendWithoutUser() throws Exception { new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) ), mock(CrossClusterAccessAuthenticationService.class), - new RemoteClusterCredentialsResolver(settings), mockLicenseState ) { @Override @@ -313,7 +312,6 @@ public void testSendToNewerVersionSetsCorrectVersion() throws Exception { new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) ), mock(CrossClusterAccessAuthenticationService.class), - new RemoteClusterCredentialsResolver(settings), mockLicenseState ); ClusterServiceUtils.setState(clusterService, clusterService.state()); // force state update to trigger listener @@ -382,7 +380,6 @@ public void testSendToOlderVersionSetsCorrectVersion() throws Exception { new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) ), mock(CrossClusterAccessAuthenticationService.class), - new RemoteClusterCredentialsResolver(settings), mockLicenseState ); ClusterServiceUtils.setState(clusterService, clusterService.state()); // force state update to trigger listener @@ -449,7 +446,6 @@ public void testSetUserBasedOnActionOrigin() { new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) ), mock(CrossClusterAccessAuthenticationService.class), - new RemoteClusterCredentialsResolver(settings), mockLicenseState ); @@ -604,7 +600,6 @@ public void testSendWithCrossClusterAccessHeadersWithUnsupportedLicense() throws AuthenticationTestHelper.builder().build().writeToContext(threadContext); final String remoteClusterAlias = randomAlphaOfLengthBetween(5, 10); - final RemoteClusterCredentialsResolver remoteClusterCredentialsResolver = mockRemoteClusterCredentialsResolver(remoteClusterAlias); final SecurityServerTransportInterceptor interceptor = new SecurityServerTransportInterceptor( settings, @@ -618,9 +613,8 @@ public void testSendWithCrossClusterAccessHeadersWithUnsupportedLicense() throws new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) ), mock(CrossClusterAccessAuthenticationService.class), - remoteClusterCredentialsResolver, unsupportedLicenseState, - ignored -> Optional.of(remoteClusterAlias) + mockRemoteClusterCredentialsResolver(remoteClusterAlias) ); final AsyncSender sender = interceptor.interceptSender(mock(AsyncSender.class, ignored -> { @@ -661,18 +655,16 @@ public TransportResponse read(StreamInput in) { actualException.get().getCause().getMessage(), equalTo("current license is non-compliant for [" + Security.ADVANCED_REMOTE_CLUSTER_SECURITY_FEATURE.getName() + "]") ); - verify(remoteClusterCredentialsResolver, times(1)).resolve(eq(remoteClusterAlias)); assertThat(securityContext.getThreadContext().getHeader(CROSS_CLUSTER_ACCESS_SUBJECT_INFO_HEADER_KEY), nullValue()); assertThat(securityContext.getThreadContext().getHeader(CROSS_CLUSTER_ACCESS_CREDENTIALS_HEADER_KEY), nullValue()); } - private RemoteClusterCredentialsResolver mockRemoteClusterCredentialsResolver(String remoteClusterAlias) { - final RemoteClusterCredentialsResolver remoteClusterCredentialsResolver = mock(RemoteClusterCredentialsResolver.class); - final String remoteClusterCredential = ApiKeyService.withApiKeyPrefix(randomAlphaOfLengthBetween(10, 42)); - when(remoteClusterCredentialsResolver.resolve(any())).thenReturn( - Optional.of(new RemoteClusterCredentials(remoteClusterAlias, remoteClusterCredential)) + private Function> mockRemoteClusterCredentialsResolver( + String remoteClusterAlias + ) { + return connection -> Optional.of( + new RemoteClusterAliasWithCredentials(remoteClusterAlias, new SecureString(randomAlphaOfLengthBetween(10, 42).toCharArray())) ); - return remoteClusterCredentialsResolver; } public void testSendWithCrossClusterAccessHeadersForSystemUserRegularAction() throws Exception { @@ -736,12 +728,9 @@ private void doTestSendWithCrossClusterAccessHeaders( ) throws IOException { authentication.writeToContext(threadContext); final String expectedRequestId = AuditUtil.getOrGenerateRequestId(threadContext); - final RemoteClusterCredentialsResolver remoteClusterCredentialsResolver = mock(RemoteClusterCredentialsResolver.class); final String remoteClusterAlias = randomAlphaOfLengthBetween(5, 10); - final String remoteClusterCredential = ApiKeyService.withApiKeyPrefix(randomAlphaOfLengthBetween(10, 42)); - when(remoteClusterCredentialsResolver.resolve(any())).thenReturn( - Optional.of(new RemoteClusterCredentials(remoteClusterAlias, remoteClusterCredential)) - ); + final String encodedApiKey = randomAlphaOfLengthBetween(10, 42); + final String remoteClusterCredential = ApiKeyService.withApiKeyPrefix(encodedApiKey); final AuthorizationService authzService = mock(AuthorizationService.class); // We capture the listener so that we can complete the full flow, by calling onResponse further down @SuppressWarnings("unchecked") @@ -760,9 +749,8 @@ private void doTestSendWithCrossClusterAccessHeaders( new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) ), mock(CrossClusterAccessAuthenticationService.class), - remoteClusterCredentialsResolver, mockLicenseState, - ignored -> Optional.of(remoteClusterAlias) + ignored -> Optional.of(new RemoteClusterAliasWithCredentials(remoteClusterAlias, new SecureString(encodedApiKey.toCharArray()))) ); final AtomicBoolean calledWrappedSender = new AtomicBoolean(false); @@ -861,7 +849,6 @@ public TransportResponse read(StreamInput in) { } assertThat(sentCredential.get(), equalTo(remoteClusterCredential)); verify(securityContext, never()).executeAsInternalUser(any(), any(), anyConsumer()); - verify(remoteClusterCredentialsResolver, times(1)).resolve(eq(remoteClusterAlias)); assertThat(securityContext.getThreadContext().getHeader(CROSS_CLUSTER_ACCESS_SUBJECT_INFO_HEADER_KEY), nullValue()); assertThat(securityContext.getThreadContext().getHeader(CROSS_CLUSTER_ACCESS_CREDENTIALS_HEADER_KEY), nullValue()); assertThat(AuditUtil.extractRequestId(securityContext.getThreadContext()), equalTo(expectedRequestId)); @@ -874,15 +861,9 @@ public void testSendWithUserIfCrossClusterAccessHeadersConditionNotMet() throws if (false == (notRemoteConnection || noCredential)) { noCredential = true; } + final boolean finalNoCredential = noCredential; final String remoteClusterAlias = randomAlphaOfLengthBetween(5, 10); - final RemoteClusterCredentialsResolver remoteClusterCredentialsResolver = mock(RemoteClusterCredentialsResolver.class); - when(remoteClusterCredentialsResolver.resolve(any())).thenReturn( - noCredential - ? Optional.empty() - : Optional.of( - new RemoteClusterCredentials(remoteClusterAlias, ApiKeyService.withApiKeyPrefix(randomAlphaOfLengthBetween(10, 42))) - ) - ); + final String encodedApiKey = randomAlphaOfLengthBetween(10, 42); final AuthenticationTestHelper.AuthenticationTestBuilder builder = AuthenticationTestHelper.builder(); final Authentication authentication = randomFrom( builder.apiKey().build(), @@ -904,9 +885,12 @@ public void testSendWithUserIfCrossClusterAccessHeadersConditionNotMet() throws new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) ), mock(CrossClusterAccessAuthenticationService.class), - remoteClusterCredentialsResolver, mockLicenseState, - ignored -> notRemoteConnection ? Optional.empty() : Optional.of(remoteClusterAlias) + ignored -> notRemoteConnection + ? Optional.empty() + : (finalNoCredential + ? Optional.of(new RemoteClusterAliasWithCredentials(remoteClusterAlias, null)) + : Optional.of(new RemoteClusterAliasWithCredentials(remoteClusterAlias, new SecureString(encodedApiKey.toCharArray())))) ); final AtomicBoolean calledWrappedSender = new AtomicBoolean(false); @@ -944,12 +928,9 @@ public void testSendWithCrossClusterAccessHeadersThrowsOnOldConnection() throws .realm() .build(); authentication.writeToContext(threadContext); - final RemoteClusterCredentialsResolver remoteClusterCredentialsResolver = mock(RemoteClusterCredentialsResolver.class); final String remoteClusterAlias = randomAlphaOfLengthBetween(5, 10); - final String remoteClusterCredential = ApiKeyService.withApiKeyPrefix(randomAlphaOfLengthBetween(10, 42)); - when(remoteClusterCredentialsResolver.resolve(any())).thenReturn( - Optional.of(new RemoteClusterCredentials(remoteClusterAlias, remoteClusterCredential)) - ); + final String encodedApiKey = randomAlphaOfLengthBetween(10, 42); + final String remoteClusterCredential = ApiKeyService.withApiKeyPrefix(encodedApiKey); final SecurityServerTransportInterceptor interceptor = new SecurityServerTransportInterceptor( settings, @@ -963,9 +944,8 @@ public void testSendWithCrossClusterAccessHeadersThrowsOnOldConnection() throws new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) ), mock(CrossClusterAccessAuthenticationService.class), - remoteClusterCredentialsResolver, mockLicenseState, - ignored -> Optional.of(remoteClusterAlias) + ignored -> Optional.of(new RemoteClusterAliasWithCredentials(remoteClusterAlias, new SecureString(encodedApiKey.toCharArray()))) ); final AsyncSender sender = interceptor.interceptSender(new AsyncSender() { @@ -1029,7 +1009,6 @@ public TransportResponse read(StreamInput in) { + "] does not support receiving them" ) ); - verify(remoteClusterCredentialsResolver, times(1)).resolve(eq(remoteClusterAlias)); assertThat(securityContext.getThreadContext().getHeader(CROSS_CLUSTER_ACCESS_SUBJECT_INFO_HEADER_KEY), nullValue()); assertThat(securityContext.getThreadContext().getHeader(CROSS_CLUSTER_ACCESS_CREDENTIALS_HEADER_KEY), nullValue()); } @@ -1040,12 +1019,9 @@ public void testSendRemoteRequestFailsIfUserHasNoRemoteIndicesPrivileges() throw .realm() .build(); authentication.writeToContext(threadContext); - final RemoteClusterCredentialsResolver remoteClusterCredentialsResolver = mock(RemoteClusterCredentialsResolver.class); final String remoteClusterAlias = randomAlphaOfLengthBetween(5, 10); - final String remoteClusterCredential = ApiKeyService.withApiKeyPrefix(randomAlphaOfLengthBetween(10, 42)); - when(remoteClusterCredentialsResolver.resolve(any())).thenReturn( - Optional.of(new RemoteClusterCredentials(remoteClusterAlias, remoteClusterCredential)) - ); + final String encodedApiKey = randomAlphaOfLengthBetween(10, 42); + final String remoteClusterCredential = ApiKeyService.withApiKeyPrefix(encodedApiKey); final AuthorizationService authzService = mock(AuthorizationService.class); doAnswer(invocation -> { @@ -1067,9 +1043,8 @@ public void testSendRemoteRequestFailsIfUserHasNoRemoteIndicesPrivileges() throw new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) ), mock(CrossClusterAccessAuthenticationService.class), - remoteClusterCredentialsResolver, mockLicenseState, - ignored -> Optional.of(remoteClusterAlias) + ignored -> Optional.of(new RemoteClusterAliasWithCredentials(remoteClusterAlias, new SecureString(encodedApiKey.toCharArray()))) ); final AsyncSender sender = interceptor.interceptSender(new AsyncSender() { @@ -1171,7 +1146,6 @@ public void testProfileFiltersCreatedDifferentlyForDifferentTransportAndRemoteCl new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) ), mock(CrossClusterAccessAuthenticationService.class), - new RemoteClusterCredentialsResolver(settings), mockLicenseState ); @@ -1225,7 +1199,6 @@ public void testNoProfileFilterForRemoteClusterWhenTheFeatureIsDisabled() { new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) ), mock(CrossClusterAccessAuthenticationService.class), - new RemoteClusterCredentialsResolver(settings), mockLicenseState ); diff --git a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/RestGetShutdownStatusAction.java b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/RestGetShutdownStatusAction.java index 5d10f1a3d517e..babe2174b0952 100644 --- a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/RestGetShutdownStatusAction.java +++ b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/RestGetShutdownStatusAction.java @@ -14,7 +14,7 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.util.List; @@ -40,7 +40,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).execute( GetShutdownStatusAction.INSTANCE, new GetShutdownStatusAction.Request(nodeIds), - new RestChunkedToXContentListener<>(channel) + new RestRefCountedChunkedToXContentListener<>(channel) ); } } diff --git a/x-pack/plugin/slm/src/main/java/module-info.java b/x-pack/plugin/slm/src/main/java/module-info.java index 77fa369c7ef4b..bdfdbd85a434e 100644 --- a/x-pack/plugin/slm/src/main/java/module-info.java +++ b/x-pack/plugin/slm/src/main/java/module-info.java @@ -18,4 +18,6 @@ provides org.elasticsearch.reservedstate.ReservedClusterStateHandlerProvider with org.elasticsearch.xpack.slm.ReservedLifecycleStateHandlerProvider; + + provides org.elasticsearch.features.FeatureSpecification with org.elasticsearch.xpack.slm.SnapshotLifecycleFeatures; } diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycle.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycle.java index 74094c83d4bcb..0233db5af081f 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycle.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycle.java @@ -124,6 +124,7 @@ public Collection createComponents(PluginServices services) { SnapshotLifecycleTemplateRegistry templateRegistry = new SnapshotLifecycleTemplateRegistry( settings, clusterService, + services.featureService(), threadPool, client, services.xContentRegistry() @@ -214,7 +215,7 @@ public List getRestHandlers( new ActionHandler<>(ExecuteSnapshotLifecycleAction.INSTANCE, TransportExecuteSnapshotLifecycleAction.class), new ActionHandler<>(GetSnapshotLifecycleStatsAction.INSTANCE, TransportGetSnapshotLifecycleStatsAction.class), new ActionHandler<>(ExecuteSnapshotRetentionAction.INSTANCE, TransportExecuteSnapshotRetentionAction.class), - new ActionHandler<>(SLMGetExpiredSnapshotsAction.INSTANCE, SLMGetExpiredSnapshotsAction.LocalAction.class), + new ActionHandler<>(TransportSLMGetExpiredSnapshotsAction.INSTANCE, TransportSLMGetExpiredSnapshotsAction.class), new ActionHandler<>(StartSLMAction.INSTANCE, TransportStartSLMAction.class), new ActionHandler<>(StopSLMAction.INSTANCE, TransportStopSLMAction.class), new ActionHandler<>(GetSLMStatusAction.INSTANCE, TransportGetSLMStatusAction.class) diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleFeatures.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleFeatures.java new file mode 100644 index 0000000000000..f3dfe4fb26f65 --- /dev/null +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleFeatures.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.slm; + +import org.elasticsearch.Version; +import org.elasticsearch.features.FeatureSpecification; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.xpack.slm.history.SnapshotLifecycleTemplateRegistry; + +import java.util.Map; + +public class SnapshotLifecycleFeatures implements FeatureSpecification { + @Override + public Map getHistoricalFeatures() { + return Map.of(SnapshotLifecycleTemplateRegistry.MANAGED_BY_DATA_STREAM_LIFECYCLE, Version.V_8_12_0); + } +} diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java index d063941aa42d5..71afcb4548a06 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java @@ -184,8 +184,8 @@ void getSnapshotsEligibleForDeletion( ActionListener>>> listener ) { client.execute( - SLMGetExpiredSnapshotsAction.INSTANCE, - new SLMGetExpiredSnapshotsAction.Request(repositories, policies), + TransportSLMGetExpiredSnapshotsAction.INSTANCE, + new TransportSLMGetExpiredSnapshotsAction.Request(repositories, policies), listener.delegateFailureAndWrap((l, m) -> l.onResponse(m.snapshotsToDelete())) ); } diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SLMGetExpiredSnapshotsAction.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/TransportSLMGetExpiredSnapshotsAction.java similarity index 64% rename from x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SLMGetExpiredSnapshotsAction.java rename to x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/TransportSLMGetExpiredSnapshotsAction.java index 550410d1d59aa..050562f0162c9 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SLMGetExpiredSnapshotsAction.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/TransportSLMGetExpiredSnapshotsAction.java @@ -23,9 +23,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Tuple; import org.elasticsearch.repositories.GetSnapshotInfoContext; import org.elasticsearch.repositories.RepositoriesService; @@ -56,100 +54,96 @@ * Computes the expired snapshots for SLM. Called by {@link SnapshotRetentionTask}, but made into a separate (local-only) transport action * so that it can access the {@link RepositoriesService} directly. */ -public class SLMGetExpiredSnapshotsAction extends ActionType { +public class TransportSLMGetExpiredSnapshotsAction extends TransportAction< + TransportSLMGetExpiredSnapshotsAction.Request, + TransportSLMGetExpiredSnapshotsAction.Response> { - public static final SLMGetExpiredSnapshotsAction INSTANCE = new SLMGetExpiredSnapshotsAction(); + public static final ActionType INSTANCE = ActionType.localOnly("cluster:admin/slm/execute/get_expired_snapshots"); - private static final Logger logger = LogManager.getLogger(SLMGetExpiredSnapshotsAction.class); + private static final Logger logger = LogManager.getLogger(TransportSLMGetExpiredSnapshotsAction.class); - private SLMGetExpiredSnapshotsAction() { - super("cluster:admin/slm/execute/get_expired_snapshots", Writeable.Reader.localOnly()); - } + private final RepositoriesService repositoriesService; + private final Executor retentionExecutor; - public static class LocalAction extends TransportAction { - private final RepositoriesService repositoriesService; - private final Executor retentionExecutor; - private final ThreadContext threadContext; + @Inject + public TransportSLMGetExpiredSnapshotsAction( + TransportService transportService, + RepositoriesService repositoriesService, + ActionFilters actionFilters + ) { + super(INSTANCE.name(), actionFilters, transportService.getTaskManager()); + this.repositoriesService = repositoriesService; + this.retentionExecutor = transportService.getThreadPool().executor(ThreadPool.Names.MANAGEMENT); + } - private static final Logger logger = SLMGetExpiredSnapshotsAction.logger; + private static class ResultsBuilder { + private final Map>> resultsByRepository = ConcurrentCollections.newConcurrentMap(); - @Inject - public LocalAction(TransportService transportService, RepositoriesService repositoriesService, ActionFilters actionFilters) { - super(INSTANCE.name(), actionFilters, transportService.getTaskManager()); - this.repositoriesService = repositoriesService; - final var threadPool = transportService.getThreadPool(); - this.retentionExecutor = threadPool.executor(ThreadPool.Names.MANAGEMENT); - this.threadContext = threadPool.getThreadContext(); + Response getResponse() { + // copyOf just so we aren't returning the CHM + return new Response(Map.copyOf(resultsByRepository)); } - private static class ResultsBuilder { - private final Map>> resultsByRepository = ConcurrentCollections.newConcurrentMap(); - - Response getResponse() { - // copyOf just so we aren't returning the CHM - return new Response(Map.copyOf(resultsByRepository)); + void addResult(String repository, List> snapshotsToDelete) { + // snapshotsToDelete is immutable because it comes from a Stream#toList() so no further copying needed + if (snapshotsToDelete.isEmpty()) { + assert resultsByRepository.containsKey(repository) == false; + } else { + final var previousValue = resultsByRepository.put(repository, snapshotsToDelete); + assert previousValue == null : repository + ": " + previousValue + " vs " + snapshotsToDelete; } + } + } - void addResult(String repository, List> snapshotsToDelete) { - // snapshotsToDelete is immutable because it comes from a Stream#toList() so no further copying needed - if (snapshotsToDelete.isEmpty()) { - assert resultsByRepository.containsKey(repository) == false; - } else { - final var previousValue = resultsByRepository.put(repository, snapshotsToDelete); - assert previousValue == null : repository + ": " + previousValue + " vs " + snapshotsToDelete; + @Override + protected void doExecute(Task task, Request request, ActionListener listener) { + final var resultsBuilder = new ResultsBuilder(); + try (var refs = new RefCountingRunnable(() -> listener.onResponse(resultsBuilder.getResponse()))) { + for (final var repositoryName : request.repositories()) { + + final Repository repository; + try { + repository = repositoriesService.repository(repositoryName); + } catch (RepositoryMissingException e) { + logger.debug("[{}]: repository not found", repositoryName); + continue; } - } - } - @Override - protected void doExecute(Task task, Request request, ActionListener listener) { - final var resultsBuilder = new ResultsBuilder(); - try (var refs = new RefCountingRunnable(() -> listener.onResponse(resultsBuilder.getResponse()))) { - for (final var repositoryName : request.repositories()) { - - final Repository repository; - try { - repository = repositoriesService.repository(repositoryName); - } catch (RepositoryMissingException e) { - logger.debug("[{}]: repository not found", repositoryName); - continue; - } + if (repository.isReadOnly()) { + logger.debug("[{}]: skipping readonly repository", repositoryName); + continue; + } - if (repository.isReadOnly()) { - logger.debug("[{}]: skipping readonly repository", repositoryName); - continue; + retentionExecutor.execute(ActionRunnable.wrap(ActionListener.releaseAfter(new ActionListener() { + @Override + public void onResponse(Void unused) {} + + @Override + public void onFailure(Exception e) { + logger.debug(Strings.format("[%s]: could not compute expired snapshots", repositoryName), e); } + }, refs.acquire()), + perRepositoryListener -> SubscribableListener + + // Get repository data + .newForked(l -> repository.getRepositoryData(retentionExecutor, l)) - retentionExecutor.execute( - ActionRunnable.wrap( - refs.acquireListener(), - perRepositoryListener -> SubscribableListener - - // Get repository data - .newForked(l -> repository.getRepositoryData(retentionExecutor, l)) - - // Collect snapshot details by policy, and get any missing details by reading SnapshotInfo - .andThen( - (l, repositoryData) -> getSnapshotDetailsByPolicy(retentionExecutor, repository, repositoryData, l) - ) - - // Compute snapshots to delete for each (relevant) policy - .andThen((l, snapshotDetailsByPolicy) -> ActionListener.completeWith(l, () -> { - resultsBuilder.addResult( - repositoryName, - getSnapshotsToDelete(repositoryName, request.policies(), snapshotDetailsByPolicy) - ); - return null; - })) - - // And notify this repository's listener on completion - .addListener(perRepositoryListener.delegateResponse((l, e) -> { - logger.debug(Strings.format("[%s]: could not compute expired snapshots", repositoryName), e); - l.onResponse(null); - })) + // Collect snapshot details by policy, and get any missing details by reading SnapshotInfo + .andThen( + (l, repositoryData) -> getSnapshotDetailsByPolicy(retentionExecutor, repository, repositoryData, l) ) - ); - } + + // Compute snapshots to delete for each (relevant) policy + .andThenAccept(snapshotDetailsByPolicy -> { + resultsBuilder.addResult( + repositoryName, + getSnapshotsToDelete(repositoryName, request.policies(), snapshotDetailsByPolicy) + ); + }) + + // And notify this repository's listener on completion + .addListener(perRepositoryListener) + )); } } } diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/history/SnapshotHistoryItem.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/history/SnapshotHistoryItem.java index 93c20840bba6e..60fdba2051041 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/history/SnapshotHistoryItem.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/history/SnapshotHistoryItem.java @@ -168,7 +168,7 @@ public SnapshotHistoryItem(StreamInput in) throws IOException { this.snapshotName = in.readString(); this.operation = in.readString(); this.success = in.readBoolean(); - this.snapshotConfiguration = in.readMap(); + this.snapshotConfiguration = in.readGenericMap(); this.errorDetails = in.readOptionalString(); } diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/history/SnapshotLifecycleTemplateRegistry.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/history/SnapshotLifecycleTemplateRegistry.java index a72283682b258..f40ea5a56463a 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/history/SnapshotLifecycleTemplateRegistry.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/history/SnapshotLifecycleTemplateRegistry.java @@ -8,10 +8,13 @@ package org.elasticsearch.xpack.slm.history; import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.features.FeatureService; +import org.elasticsearch.features.NodeFeature; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.core.ilm.IndexLifecycleMetadata; @@ -42,12 +45,15 @@ public class SnapshotLifecycleTemplateRegistry extends IndexTemplateRegistry { // version 4:converted data stream // version 5: add `allow_auto_create` setting // version 6: manage by data stream lifecycle - public static final int INDEX_TEMPLATE_VERSION = 6; + // version 7: version the index template name so we can upgrade existing deployments + public static final int INDEX_TEMPLATE_VERSION = 7; + public static final NodeFeature MANAGED_BY_DATA_STREAM_LIFECYCLE = new NodeFeature("slm-history-managed-by-dsl"); public static final String SLM_TEMPLATE_VERSION_VARIABLE = "xpack.slm.template.version"; - public static final String SLM_TEMPLATE_NAME = ".slm-history"; + public static final String SLM_TEMPLATE_NAME = ".slm-history-" + INDEX_TEMPLATE_VERSION; public static final String SLM_POLICY_NAME = "slm-history-ilm-policy"; + private final FeatureService featureService; @Override protected boolean requiresMasterNode() { @@ -59,11 +65,13 @@ protected boolean requiresMasterNode() { public SnapshotLifecycleTemplateRegistry( Settings nodeSettings, ClusterService clusterService, + FeatureService featureService, ThreadPool threadPool, Client client, NamedXContentRegistry xContentRegistry ) { super(nodeSettings, clusterService, threadPool, client, xContentRegistry); + this.featureService = featureService; slmHistoryEnabled = SLM_HISTORY_INDEX_ENABLED_SETTING.get(nodeSettings); } @@ -114,4 +122,9 @@ public boolean validate(ClusterState state) { boolean allPoliciesPresent = maybePolicies.map(policies -> policies.keySet().containsAll(policyNames)).orElse(false); return allTemplatesPresent && allPoliciesPresent; } + + @Override + protected boolean isClusterReady(ClusterChangedEvent event) { + return featureService.clusterHasFeature(event.state(), MANAGED_BY_DATA_STREAM_LIFECYCLE); + } } diff --git a/x-pack/plugin/slm/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification b/x-pack/plugin/slm/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification new file mode 100644 index 0000000000000..e8a2270235898 --- /dev/null +++ b/x-pack/plugin/slm/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification @@ -0,0 +1,8 @@ +# +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License +# 2.0; you may not use this file except in compliance with the Elastic License +# 2.0. +# + +org.elasticsearch.xpack.slm.SnapshotLifecycleFeatures diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionTaskTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionTaskTests.java index 5ba343de28752..1384cd4499624 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionTaskTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionTaskTests.java @@ -243,9 +243,9 @@ protected void Request request, ActionListener listener ) { - if (action == SLMGetExpiredSnapshotsAction.INSTANCE) { + if (action == TransportSLMGetExpiredSnapshotsAction.INSTANCE) { logger.info("--> called"); - listener.onResponse((Response) new SLMGetExpiredSnapshotsAction.Response(Map.of())); + listener.onResponse((Response) new TransportSLMGetExpiredSnapshotsAction.Response(Map.of())); } else { super.doExecute(action, request, listener); } diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SLMGetExpiredSnapshotsActionTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/TransportSLMGetExpiredSnapshotsActionTests.java similarity index 86% rename from x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SLMGetExpiredSnapshotsActionTests.java rename to x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/TransportSLMGetExpiredSnapshotsActionTests.java index eda0e4f8ae39c..1a49ad114f33f 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SLMGetExpiredSnapshotsActionTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/TransportSLMGetExpiredSnapshotsActionTests.java @@ -56,7 +56,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -public class SLMGetExpiredSnapshotsActionTests extends ESTestCase { +public class TransportSLMGetExpiredSnapshotsActionTests extends ESTestCase { public void testEmpty() { runActionTest(List.of(), Set.of()); @@ -137,8 +137,8 @@ private static void runActionTest(List snapshotInfos, Set } }); - final var action = new SLMGetExpiredSnapshotsAction.LocalAction(transportService, repositoriesService, new ActionFilters(Set.of())); - final var task = new Task(1, "direct", SLMGetExpiredSnapshotsAction.INSTANCE.name(), "", TaskId.EMPTY_TASK_ID, Map.of()); + final var action = new TransportSLMGetExpiredSnapshotsAction(transportService, repositoriesService, new ActionFilters(Set.of())); + final var task = new Task(1, "direct", TransportSLMGetExpiredSnapshotsAction.INSTANCE.name(), "", TaskId.EMPTY_TASK_ID, Map.of()); final var policyMap = createPolicies( snapshotInfos.stream() @@ -148,8 +148,8 @@ private static void runActionTest(List snapshotInfos, Set snapshotsToDelete ); - final var responseFuture = new PlainActionFuture(); - action.doExecute(task, new SLMGetExpiredSnapshotsAction.Request(List.of(REPO_NAME), policyMap), responseFuture); + final var responseFuture = new PlainActionFuture(); + action.doExecute(task, new TransportSLMGetExpiredSnapshotsAction.Request(List.of(REPO_NAME), policyMap), responseFuture); deterministicTaskQueue.runAllTasks(); assertTrue(responseFuture.isDone()); final var deletedSnapshots = responseFuture.actionGet().snapshotsToDelete(); @@ -175,15 +175,20 @@ record SeenSnapshotInfo(SnapshotId snapshotId, String policyId) {} .map(si -> new SeenSnapshotInfo(si.snapshotId(), RepositoryData.SnapshotDetails.fromSnapshotInfo(si).getSlmPolicy())) .collect(Collectors.toSet()); - SubscribableListener + final var testListener = SubscribableListener .newForked(l -> repository.getRepositoryData(EsExecutors.DIRECT_EXECUTOR_SERVICE, l)) - .andThen( - (l, rd) -> SLMGetExpiredSnapshotsAction.getSnapshotDetailsByPolicy(EsExecutors.DIRECT_EXECUTOR_SERVICE, repository, rd, l) + .andThen( + (l, rd) -> TransportSLMGetExpiredSnapshotsAction.getSnapshotDetailsByPolicy( + EsExecutors.DIRECT_EXECUTOR_SERVICE, + repository, + rd, + l + ) ) - .andThen((l, snapshotDetailsByPolicy) -> { + .andThenAccept(snapshotDetailsByPolicy -> { snapshotDetailsByPolicy.flatMap((policyId, snapshotsMap) -> snapshotsMap.entrySet().stream().map(entry -> { assertThat(policyId, oneOf(policyNames)); assertEquals(policyId, entry.getValue().getSlmPolicy()); @@ -192,43 +197,60 @@ record SeenSnapshotInfo(SnapshotId snapshotId, String policyId) {} }); deterministicTaskQueue.runAllTasks(); + assertTrue(testListener.isDone()); assertThat(seenSnapshotInfos, empty()); } public void testGetSnapshotsToDelete() { - final var snapshotDetailsByPolicy = new SLMGetExpiredSnapshotsAction.SnapshotDetailsByPolicy(); + final var snapshotDetailsByPolicy = new TransportSLMGetExpiredSnapshotsAction.SnapshotDetailsByPolicy(); assertEquals( List.of(), - SLMGetExpiredSnapshotsAction.getSnapshotsToDelete(REPO_NAME, createPolicies(Set.of(), Set.of()), snapshotDetailsByPolicy) + TransportSLMGetExpiredSnapshotsAction.getSnapshotsToDelete( + REPO_NAME, + createPolicies(Set.of(), Set.of()), + snapshotDetailsByPolicy + ) ); snapshotDetailsByPolicy.add(mkId("snapshot-with-unknown-policy"), mkDetails("unknown-policy-id")); assertEquals( List.of(), - SLMGetExpiredSnapshotsAction.getSnapshotsToDelete(REPO_NAME, createPolicies(Set.of(), Set.of()), snapshotDetailsByPolicy) + TransportSLMGetExpiredSnapshotsAction.getSnapshotsToDelete( + REPO_NAME, + createPolicies(Set.of(), Set.of()), + snapshotDetailsByPolicy + ) ); snapshotDetailsByPolicy.add(mkId("no-retention"), mkDetails(NO_RETENTION_POLICY_ID)); assertEquals( List.of(), - SLMGetExpiredSnapshotsAction.getSnapshotsToDelete(REPO_NAME, createPolicies(Set.of(), Set.of()), snapshotDetailsByPolicy) + TransportSLMGetExpiredSnapshotsAction.getSnapshotsToDelete( + REPO_NAME, + createPolicies(Set.of(), Set.of()), + snapshotDetailsByPolicy + ) ); snapshotDetailsByPolicy.add(mkId("other-repo-policy"), mkDetails(OTHER_REPO_POLICY_ID)); assertEquals( List.of(), - SLMGetExpiredSnapshotsAction.getSnapshotsToDelete(REPO_NAME, createPolicies(Set.of(), Set.of()), snapshotDetailsByPolicy) + TransportSLMGetExpiredSnapshotsAction.getSnapshotsToDelete( + REPO_NAME, + createPolicies(Set.of(), Set.of()), + snapshotDetailsByPolicy + ) ); snapshotDetailsByPolicy.add(mkId("expiry-candidate"), mkDetails(POLICY_ID)); assertEquals( List.of(), - SLMGetExpiredSnapshotsAction.getSnapshotsToDelete( + TransportSLMGetExpiredSnapshotsAction.getSnapshotsToDelete( REPO_NAME, createPolicies(Set.of(mkId("expiry-candidate")), Set.of()), snapshotDetailsByPolicy @@ -237,7 +259,7 @@ public void testGetSnapshotsToDelete() { assertEquals( List.of(Tuple.tuple(mkId("expiry-candidate"), POLICY_ID)), - SLMGetExpiredSnapshotsAction.getSnapshotsToDelete( + TransportSLMGetExpiredSnapshotsAction.getSnapshotsToDelete( REPO_NAME, createPolicies(Set.of(mkId("expiry-candidate")), Set.of(mkId("expiry-candidate"))), snapshotDetailsByPolicy diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/action/TransportStopSLMActionTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/action/TransportStopSLMActionTests.java index 5d0e002c49036..64ede24c9f65a 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/action/TransportStopSLMActionTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/action/TransportStopSLMActionTests.java @@ -20,7 +20,7 @@ import org.elasticsearch.test.MockUtils; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.ilm.action.StopILMAction; +import org.elasticsearch.xpack.core.ilm.action.ILMActions; import org.elasticsearch.xpack.core.slm.action.StopSLMAction; import org.mockito.ArgumentMatcher; @@ -46,7 +46,7 @@ public void testStopILMClusterStatePriorityIsImmediate() { Task task = new Task( randomLong(), "transport", - StopILMAction.NAME, + ILMActions.STOP.name(), "description", new TaskId(randomLong() + ":" + randomLong()), emptyMap() diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/history/SnapshotLifecycleTemplateRegistryTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/history/SnapshotLifecycleTemplateRegistryTests.java index 813d239ba9099..d5a8faea1c0a0 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/history/SnapshotLifecycleTemplateRegistryTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/history/SnapshotLifecycleTemplateRegistryTests.java @@ -11,7 +11,7 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterModule; @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpClient; @@ -45,7 +46,9 @@ import org.elasticsearch.xpack.core.ilm.OperationMode; import org.elasticsearch.xpack.core.ilm.RolloverAction; import org.elasticsearch.xpack.core.ilm.TimeseriesLifecycleType; -import org.elasticsearch.xpack.core.ilm.action.PutLifecycleAction; +import org.elasticsearch.xpack.core.ilm.action.ILMActions; +import org.elasticsearch.xpack.core.ilm.action.PutLifecycleRequest; +import org.elasticsearch.xpack.slm.SnapshotLifecycleFeatures; import org.junit.After; import org.junit.Before; @@ -64,10 +67,12 @@ import static org.elasticsearch.xpack.slm.history.SnapshotLifecycleTemplateRegistry.SLM_POLICY_NAME; import static org.elasticsearch.xpack.slm.history.SnapshotLifecycleTemplateRegistry.SLM_TEMPLATE_NAME; import static org.hamcrest.Matchers.anEmptyMap; +import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.sameInstance; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; @@ -97,7 +102,14 @@ public void createRegistryAndClient() { ) ); xContentRegistry = new NamedXContentRegistry(entries); - registry = new SnapshotLifecycleTemplateRegistry(Settings.EMPTY, clusterService, threadPool, client, xContentRegistry); + registry = new SnapshotLifecycleTemplateRegistry( + Settings.EMPTY, + clusterService, + new FeatureService(List.of(new SnapshotLifecycleFeatures())), + threadPool, + client, + xContentRegistry + ); } @After @@ -112,6 +124,7 @@ public void testDisabledDoesNotAddTemplates() { SnapshotLifecycleTemplateRegistry disabledRegistry = new SnapshotLifecycleTemplateRegistry( settings, clusterService, + new FeatureService(List.of(new SnapshotLifecycleFeatures())), threadPool, client, xContentRegistry @@ -151,15 +164,14 @@ public void testThatNonExistingPoliciesAreAddedImmediately() throws Exception { AtomicInteger calledTimes = new AtomicInteger(0); client.setVerifier((action, request, listener) -> { - if (action instanceof PutLifecycleAction) { + if (action == ILMActions.PUT) { calledTimes.incrementAndGet(); - assertThat(action, instanceOf(PutLifecycleAction.class)); - assertThat(request, instanceOf(PutLifecycleAction.Request.class)); - final PutLifecycleAction.Request putRequest = (PutLifecycleAction.Request) request; + assertThat(request, instanceOf(PutLifecycleRequest.class)); + final PutLifecycleRequest putRequest = (PutLifecycleRequest) request; assertThat(putRequest.getPolicy().getName(), equalTo(SLM_POLICY_NAME)); assertNotNull(listener); return AcknowledgedResponse.TRUE; - } else if (action instanceof PutComposableIndexTemplateAction) { + } else if (action == TransportPutComposableIndexTemplateAction.TYPE) { // Ignore this, it's verified in another test return new TestPutIndexTemplateResponse(true); } else { @@ -184,10 +196,10 @@ public void testPolicyAlreadyExists() { policyMap.put(policy.getName(), policy); client.setVerifier((action, request, listener) -> { - if (action instanceof PutComposableIndexTemplateAction) { + if (action == TransportPutComposableIndexTemplateAction.TYPE) { // Ignore this, it's verified in another test return new TestPutIndexTemplateResponse(true); - } else if (action instanceof PutLifecycleAction) { + } else if (action == ILMActions.PUT) { fail("if the policy already exists it should be re-put"); } else { fail("client called with unexpected request:" + request.toString()); @@ -210,10 +222,10 @@ public void testPolicyAlreadyExistsButDiffers() throws IOException { LifecyclePolicy policy = policies.get(0); client.setVerifier((action, request, listener) -> { - if (action instanceof PutComposableIndexTemplateAction) { + if (action == TransportPutComposableIndexTemplateAction.TYPE) { // Ignore this, it's verified in another test return new TestPutIndexTemplateResponse(true); - } else if (action instanceof PutLifecycleAction) { + } else if (action == ILMActions.PUT) { fail("if the policy already exists it should be re-put"); } else { fail("client called with unexpected request:" + request.toString()); @@ -267,10 +279,10 @@ public void testSameOrHigherVersionTemplateNotUpgraded() throws Exception { ); AtomicInteger calledTimes = new AtomicInteger(0); client.setVerifier((action, request, listener) -> { - if (action instanceof PutComposableIndexTemplateAction) { + if (action == TransportPutComposableIndexTemplateAction.TYPE) { fail("template should not have been re-installed"); return null; - } else if (action instanceof PutLifecycleAction) { + } else if (action == ILMActions.PUT) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; } else { @@ -317,6 +329,10 @@ public void testValidate() { ); } + public void testTemplateNameIsVersioned() { + assertThat(SLM_TEMPLATE_NAME, endsWith("-" + INDEX_TEMPLATE_VERSION)); + } + // ------------- /** @@ -359,16 +375,17 @@ private ActionResponse verifyTemplateInstalled( ActionRequest request, ActionListener listener ) { - if (action instanceof PutComposableIndexTemplateAction) { + if (action == TransportPutComposableIndexTemplateAction.TYPE) { calledTimes.incrementAndGet(); - assertThat(action, instanceOf(PutComposableIndexTemplateAction.class)); - assertThat(request, instanceOf(PutComposableIndexTemplateAction.Request.class)); - final PutComposableIndexTemplateAction.Request putRequest = (PutComposableIndexTemplateAction.Request) request; + assertThat(action, sameInstance(TransportPutComposableIndexTemplateAction.TYPE)); + assertThat(request, instanceOf(TransportPutComposableIndexTemplateAction.Request.class)); + final TransportPutComposableIndexTemplateAction.Request putRequest = + (TransportPutComposableIndexTemplateAction.Request) request; assertThat(putRequest.name(), equalTo(SLM_TEMPLATE_NAME)); assertThat(putRequest.indexTemplate().version(), equalTo((long) INDEX_TEMPLATE_VERSION)); assertNotNull(listener); return new TestPutIndexTemplateResponse(true); - } else if (action instanceof PutLifecycleAction) { + } else if (action == ILMActions.PUT) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; } else { diff --git a/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java b/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java index d19747578b537..88d7968e1660e 100644 --- a/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java +++ b/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java @@ -694,11 +694,6 @@ public String getProfileName() { return channel.getProfileName(); } - @Override - public String getChannelType() { - return channel.getChannelType(); - } - @Override public void sendResponse(TransportResponse response) { fail("recovery should not succeed"); diff --git a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/CartesianShapeQueryTestCase.java b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/CartesianShapeQueryTestCase.java index ca7f59b703f28..dd90112439cb9 100644 --- a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/CartesianShapeQueryTestCase.java +++ b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/CartesianShapeQueryTestCase.java @@ -9,6 +9,7 @@ import org.elasticsearch.common.geo.GeometryNormalizer; import org.elasticsearch.common.geo.Orientation; +import org.elasticsearch.geo.ShapeTestUtils; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.GeometryCollection; import org.elasticsearch.geometry.Line; @@ -17,7 +18,6 @@ import org.elasticsearch.search.geo.BaseShapeQueryTestCase; import org.elasticsearch.search.geo.SpatialQueryBuilders; import org.elasticsearch.xpack.spatial.index.query.ShapeQueryBuilder; -import org.elasticsearch.xpack.spatial.util.ShapeTestUtils; import java.util.ArrayList; import java.util.Collections; diff --git a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryOverShapeTests.java b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryOverShapeTests.java index 5e4b778d6c093..554c9ff2904dc 100644 --- a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryOverShapeTests.java +++ b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryOverShapeTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Strings; +import org.elasticsearch.geo.ShapeTestUtils; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.GeometryCollection; import org.elasticsearch.geometry.MultiPoint; @@ -25,7 +26,6 @@ import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.spatial.index.query.ShapeQueryBuilder; -import org.elasticsearch.xpack.spatial.util.ShapeTestUtils; import java.io.IOException; import java.util.List; diff --git a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianBoundsIT.java b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianBoundsIT.java index 85e371023348f..b66daeaa820b5 100644 --- a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianBoundsIT.java +++ b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianBoundsIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.spatial.search.aggregations.metrics; import org.elasticsearch.common.geo.SpatialPoint; +import org.elasticsearch.geo.ShapeTestUtils; import org.elasticsearch.geometry.Point; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.aggregations.metrics.SpatialBounds; @@ -15,7 +16,6 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.xpack.spatial.LocalStateSpatialPlugin; import org.elasticsearch.xpack.spatial.common.CartesianPoint; -import org.elasticsearch.xpack.spatial.util.ShapeTestUtils; import java.util.Collection; import java.util.Collections; diff --git a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianCentroidIT.java b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianCentroidIT.java index 7e1458f9d63b1..cc372eade8c90 100644 --- a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianCentroidIT.java +++ b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianCentroidIT.java @@ -8,13 +8,13 @@ package org.elasticsearch.xpack.spatial.search.aggregations.metrics; import org.elasticsearch.common.geo.SpatialPoint; +import org.elasticsearch.geo.ShapeTestUtils; import org.elasticsearch.geometry.Point; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.aggregations.metrics.CentroidAggregationTestBase; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.xpack.spatial.LocalStateSpatialPlugin; import org.elasticsearch.xpack.spatial.common.CartesianPoint; -import org.elasticsearch.xpack.spatial.util.ShapeTestUtils; import java.util.Collection; import java.util.Collections; diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/common/CartesianBoundingBox.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/common/CartesianBoundingBox.java index 53d1144069723..eb9c1432c4775 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/common/CartesianBoundingBox.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/common/CartesianBoundingBox.java @@ -7,17 +7,13 @@ package org.elasticsearch.xpack.spatial.common; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.common.geo.BoundingBox; -import org.elasticsearch.common.geo.GeoUtils; -import org.elasticsearch.common.geo.SpatialPoint; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; @@ -67,37 +63,4 @@ public final String getWriteableName() { public final TransportVersion getMinimalSupportedVersion() { return TransportVersions.GENERIC_NAMED_WRITABLE_ADDED; } - - protected static class CartesianBoundsParser extends BoundsParser { - CartesianBoundsParser(XContentParser parser) { - super(parser); - } - - @Override - protected CartesianBoundingBox createWithEnvelope() { - CartesianPoint topLeft = new CartesianPoint(envelope.getMinLon(), envelope.getMaxLat()); - CartesianPoint bottomRight = new CartesianPoint(envelope.getMaxLon(), envelope.getMinLat()); - return new CartesianBoundingBox(topLeft, bottomRight); - } - - @Override - protected CartesianBoundingBox createWithBounds() { - CartesianPoint topLeft = new CartesianPoint(left, top); - CartesianPoint bottomRight = new CartesianPoint(right, bottom); - return new CartesianBoundingBox(topLeft, bottomRight); - } - - @Override - protected SpatialPoint parsePointWith(XContentParser parser, GeoUtils.EffectivePoint effectivePoint) throws IOException { - return CartesianPoint.parsePoint(parser, false); - } - } - - /** - * Parses the bounding box and returns bottom, top, left, right coordinates - */ - public static CartesianBoundingBox parseBoundingBox(XContentParser parser) throws IOException, ElasticsearchParseException { - CartesianBoundsParser bounds = new CartesianBoundsParser(parser); - return bounds.parseBoundingBox(); - } } diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapper.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapper.java index 6804e5a857f57..01a2b5f0e5598 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapper.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapper.java @@ -21,9 +21,6 @@ import org.elasticsearch.index.fielddata.FieldDataContext; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.mapper.AbstractPointGeometryFieldMapper; -import org.elasticsearch.index.mapper.BlockDocValuesReader; -import org.elasticsearch.index.mapper.BlockLoader; -import org.elasticsearch.index.mapper.BlockSourceReader; import org.elasticsearch.index.mapper.DocumentParserContext; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.GeoShapeFieldMapper; @@ -183,10 +180,9 @@ public FieldMapper.Builder getMergeBuilder() { return new Builder(simpleName(), builder.ignoreMalformed.getDefaultValue().value()).init(this); } - public static class PointFieldType extends AbstractGeometryFieldType implements ShapeQueryable { + public static class PointFieldType extends AbstractPointFieldType implements ShapeQueryable { private final ShapeQueryPointProcessor queryProcessor; - private final CartesianPoint nullValue; private PointFieldType( String name, @@ -197,8 +193,7 @@ private PointFieldType( CartesianPoint nullValue, Map meta ) { - super(name, indexed, stored, hasDocValues, parser, meta); - this.nullValue = nullValue; + super(name, indexed, stored, hasDocValues, parser, nullValue, meta); this.queryProcessor = new ShapeQueryPointProcessor(); } @@ -231,17 +226,6 @@ public Query shapeQuery(Geometry shape, String fieldName, ShapeRelation relation protected Function, List> getFormatter(String format) { return GeometryFormatterFactory.getFormatter(format, p -> new Point(p.getX(), p.getY())); } - - @Override - public BlockLoader blockLoader(BlockLoaderContext blContext) { - if (hasDocValues()) { - return new BlockDocValuesReader.LongsBlockLoader(name()); - } - // TODO: Currently we use longs in the compute engine and render to WKT in ESQL - return new BlockSourceReader.LongsBlockLoader( - valueFetcher(blContext.sourcePaths(name()), nullValue, GeometryFormatterFactory.WKT) - ); - } } /** CartesianPoint parser implementation */ diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/fielddata/CartesianCentroidCalculatorTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/fielddata/CartesianCentroidCalculatorTests.java index b776e27ce4cfb..4ce533515d20e 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/fielddata/CartesianCentroidCalculatorTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/fielddata/CartesianCentroidCalculatorTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.spatial.index.fielddata; +import org.elasticsearch.geo.ShapeTestUtils; import org.elasticsearch.geometry.Line; import org.elasticsearch.geometry.MultiLine; import org.elasticsearch.geometry.MultiPoint; @@ -14,7 +15,6 @@ import org.elasticsearch.geometry.Point; import org.elasticsearch.geometry.Polygon; import org.elasticsearch.geometry.Rectangle; -import org.elasticsearch.xpack.spatial.util.ShapeTestUtils; public class CartesianCentroidCalculatorTests extends CentroidCalculatorTests { protected Point randomPoint() { diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/CartesianShapeDocValuesQueryTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/CartesianShapeDocValuesQueryTests.java index 6eab9964c6ea8..ae5a6f182274b 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/CartesianShapeDocValuesQueryTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/CartesianShapeDocValuesQueryTests.java @@ -10,7 +10,6 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.ShapeField; import org.apache.lucene.document.XYShape; -import org.apache.lucene.geo.XShapeTestUtil; import org.apache.lucene.geo.XYGeometry; import org.apache.lucene.geo.XYPolygon; import org.apache.lucene.geo.XYRectangle; @@ -26,11 +25,12 @@ import org.apache.lucene.tests.search.CheckHits; import org.apache.lucene.tests.search.QueryUtils; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.geo.ShapeTestUtils; +import org.elasticsearch.geo.XShapeTestUtil; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.spatial.common.ShapeUtils; import org.elasticsearch.xpack.spatial.index.fielddata.CoordinateEncoder; -import org.elasticsearch.xpack.spatial.util.ShapeTestUtils; import java.io.IOException; import java.util.List; diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldTypeTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldTypeTests.java index 8364ad3d4c027..7c5fa5053222b 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldTypeTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldTypeTests.java @@ -25,16 +25,18 @@ import org.elasticsearch.index.mapper.MapperBuilderContext; import org.elasticsearch.xpack.vectortile.SpatialGeometryFormatterExtension; import org.elasticsearch.xpack.vectortile.feature.FeatureFactory; -import org.hamcrest.Matchers; import java.io.IOException; import java.nio.ByteOrder; import java.util.List; import java.util.Map; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; + public class GeoShapeWithDocValuesFieldTypeTests extends FieldTypeTestCase { - public void testFetchSourceValue() throws IOException { + public void testFetchSourceValue() throws Exception { final GeoFormatterFactory geoFormatterFactory = new GeoFormatterFactory<>( new SpatialGeometryFormatterExtension().getGeometryFormatterFactories() ); @@ -53,26 +55,43 @@ public void testFetchSourceValue() throws IOException { String wktLineString = "LINESTRING (42.0 27.1, 30.0 50.0)"; String wktPoint = "POINT (14.0 15.0)"; String wktMalformed = "POINT foo"; + byte[] wkbLine = WellKnownBinary.toWKB( + WellKnownText.fromWKT(StandardValidator.NOOP, false, wktLineString), + ByteOrder.LITTLE_ENDIAN + ); + byte[] wkbPoint = WellKnownBinary.toWKB(WellKnownText.fromWKT(StandardValidator.NOOP, false, wktPoint), ByteOrder.LITTLE_ENDIAN); // Test a single shape in geojson format. Object sourceValue = jsonLineString; assertEquals(List.of(jsonLineString), fetchSourceValue(mapper, sourceValue, null)); assertEquals(List.of(wktLineString), fetchSourceValue(mapper, sourceValue, "wkt")); + List wkb = fetchSourceValue(mapper, sourceValue, "wkb"); + assertThat(wkb.size(), equalTo(1)); + assertThat(wkb.get(0), equalTo(wkbLine)); // Test a malformed single shape in geojson format sourceValue = jsonMalformed; assertEquals(List.of(), fetchSourceValue(mapper, sourceValue, null)); assertEquals(List.of(), fetchSourceValue(mapper, sourceValue, "wkt")); + assertEquals(List.of(), fetchSourceValue(mapper, sourceValue, "wkb")); // Test a list of shapes in geojson format. sourceValue = List.of(jsonLineString, jsonPoint); assertEquals(List.of(jsonLineString, jsonPoint), fetchSourceValue(mapper, sourceValue, null)); assertEquals(List.of(wktLineString, wktPoint), fetchSourceValue(mapper, sourceValue, "wkt")); + wkb = fetchSourceValue(mapper, sourceValue, "wkb"); + assertThat(wkb.size(), equalTo(2)); + assertThat(wkb.get(0), equalTo(wkbLine)); + assertThat(wkb.get(1), equalTo(wkbPoint)); // Test a list of shapes including one malformed in geojson format sourceValue = List.of(jsonLineString, jsonMalformed, jsonPoint); assertEquals(List.of(jsonLineString, jsonPoint), fetchSourceValue(mapper, sourceValue, null)); assertEquals(List.of(wktLineString, wktPoint), fetchSourceValue(mapper, sourceValue, "wkt")); + wkb = fetchSourceValue(mapper, sourceValue, "wkb"); + assertThat(wkb.size(), equalTo(2)); + assertThat(wkb.get(0), equalTo(wkbLine)); + assertThat(wkb.get(1), equalTo(wkbPoint)); // Test a single shape in wkt format. sourceValue = wktLineString; @@ -109,26 +128,31 @@ public void testFetchStoredValue() throws IOException { geoFormatterFactory ).setStored(true).build(MapperBuilderContext.root(randomBoolean(), false)).fieldType(); - ByteOrder byteOrder = randomBoolean() ? ByteOrder.BIG_ENDIAN : ByteOrder.LITTLE_ENDIAN; - Map jsonLineString = Map.of("type", "LineString", "coordinates", List.of(List.of(42.0, 27.1), List.of(30.0, 50.0))); Map jsonPoint = Map.of("type", "Point", "coordinates", List.of(14.0, 15.0)); String wktLineString = "LINESTRING (42.0 27.1, 30.0 50.0)"; String wktPoint = "POINT (14.0 15.0)"; BytesRef wkbLineString = new BytesRef( - WellKnownBinary.toWKB(new Line(new double[] { 42.0, 30.0 }, new double[] { 27.1, 50.0 }), byteOrder) + WellKnownBinary.toWKB(new Line(new double[] { 42.0, 30.0 }, new double[] { 27.1, 50.0 }), ByteOrder.LITTLE_ENDIAN) ); - BytesRef wkbPoint = new BytesRef(WellKnownBinary.toWKB(new Point(14.0, 15.0), byteOrder)); + BytesRef wkbPoint = new BytesRef(WellKnownBinary.toWKB(new Point(14.0, 15.0), ByteOrder.LITTLE_ENDIAN)); // Test a single shape in wkb format. List storedValues = List.of(wkbLineString); assertEquals(List.of(jsonLineString), fetchStoredValue(mapper, storedValues, null)); assertEquals(List.of(wktLineString), fetchStoredValue(mapper, storedValues, "wkt")); + List wkb = fetchStoredValue(mapper, storedValues, "wkb"); + assertThat(wkb.size(), equalTo(1)); + assertThat(wkb.get(0), equalTo(wkbLineString.bytes)); // Test a list of shapes in wkb format. storedValues = List.of(wkbLineString, wkbPoint); assertEquals(List.of(jsonLineString, jsonPoint), fetchStoredValue(mapper, storedValues, null)); assertEquals(List.of(wktLineString, wktPoint), fetchStoredValue(mapper, storedValues, "wkt")); + wkb = fetchStoredValue(mapper, storedValues, "wkb"); + assertThat(wkb.size(), equalTo(2)); + assertThat(wkb.get(0), equalTo(wkbLineString.bytes)); + assertThat(wkb.get(1), equalTo(wkbPoint.bytes)); } public void testFetchVectorTile() throws IOException { @@ -180,9 +204,9 @@ private void fetchVectorTile(Geometry geometry) throws IOException { // happen that the geometry is out of range (close to the poles). features = List.of(); } - assertThat(features.size(), Matchers.equalTo(sourceValue.size())); + assertThat(features.size(), equalTo(sourceValue.size())); for (int i = 0; i < features.size(); i++) { - assertThat(sourceValue.get(i), Matchers.equalTo(features.get(i))); + assertThat(sourceValue.get(i), equalTo(features.get(i))); } } @@ -308,10 +332,10 @@ private void assertFetchSourceMVT(Object sourceValue, String mvtEquivalentAsWKT) final int extent = randomIntBetween(256, 4096); List mvtExpected = fetchSourceValue(mapper, mvtEquivalentAsWKT, "mvt(0/0/0@" + extent + ")"); List mvt = fetchSourceValue(mapper, sourceValue, "mvt(0/0/0@" + extent + ")"); - assertThat(mvt.size(), Matchers.equalTo(1)); - assertThat(mvt.size(), Matchers.equalTo(mvtExpected.size())); - assertThat(mvtExpected.get(0), Matchers.instanceOf(byte[].class)); - assertThat(mvt.get(0), Matchers.instanceOf(byte[].class)); - assertThat((byte[]) mvt.get(0), Matchers.equalTo((byte[]) mvtExpected.get(0))); + assertThat(mvt.size(), equalTo(1)); + assertThat(mvt.size(), equalTo(mvtExpected.size())); + assertThat(mvtExpected.get(0), instanceOf(byte[].class)); + assertThat(mvt.get(0), instanceOf(byte[].class)); + assertThat((byte[]) mvt.get(0), equalTo((byte[]) mvtExpected.get(0))); } } diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapperTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapperTests.java index 0e26d9ba0aaf2..d9309cfb16a4c 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapperTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapperTests.java @@ -426,4 +426,10 @@ protected SyntheticSourceSupport syntheticSourceSupport(boolean ignoreMalformed) protected IngestScriptSupport ingestScriptSupport() { throw new AssumptionViolatedException("not supported"); } + + @Override + protected boolean supportsColumnAtATimeReader(MapperService mapper, MappedFieldType ft) { + // Currently ESQL support for cartesian_point is limited to source values + return false; + } } diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldTypeTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldTypeTests.java index ed902b0f8cfe1..6524860e9438c 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldTypeTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldTypeTests.java @@ -7,14 +7,19 @@ package org.elasticsearch.xpack.spatial.index.mapper; +import org.elasticsearch.geometry.Point; +import org.elasticsearch.geometry.utils.WellKnownBinary; import org.elasticsearch.index.mapper.FieldTypeTestCase; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperBuilderContext; import java.io.IOException; +import java.nio.ByteOrder; import java.util.List; import java.util.Map; +import static org.hamcrest.Matchers.equalTo; + public class PointFieldTypeTests extends FieldTypeTestCase { public void testFetchSourceValue() throws IOException { @@ -24,26 +29,39 @@ public void testFetchSourceValue() throws IOException { String wktPoint = "POINT (42.0 27.1)"; Map otherJsonPoint = Map.of("type", "Point", "coordinates", List.of(30.0, 50.0)); String otherWktPoint = "POINT (30.0 50.0)"; + byte[] wkbPoint = WellKnownBinary.toWKB(new Point(42.0, 27.1), ByteOrder.LITTLE_ENDIAN); + byte[] otherWkbPoint = WellKnownBinary.toWKB(new Point(30.0, 50.0), ByteOrder.LITTLE_ENDIAN); // Test a single point in [x, y] array format. Object sourceValue = List.of(42.0, 27.1); assertEquals(List.of(jsonPoint), fetchSourceValue(mapper, sourceValue, null)); assertEquals(List.of(wktPoint), fetchSourceValue(mapper, sourceValue, "wkt")); + List wkb = fetchSourceValue(mapper, sourceValue, "wkb"); + assertThat(wkb.size(), equalTo(1)); + assertThat(wkb.get(0), equalTo(wkbPoint)); // Test a single point in "x, y" string format. sourceValue = "42.0,27.1"; assertEquals(List.of(jsonPoint), fetchSourceValue(mapper, sourceValue, null)); assertEquals(List.of(wktPoint), fetchSourceValue(mapper, sourceValue, "wkt")); + wkb = fetchSourceValue(mapper, sourceValue, "wkb"); + assertThat(wkb.size(), equalTo(1)); + assertThat(wkb.get(0), equalTo(wkbPoint)); // Test a malformed single point sourceValue = "foo"; assertEquals(List.of(), fetchSourceValue(mapper, sourceValue, null)); assertEquals(List.of(), fetchSourceValue(mapper, sourceValue, "wkt")); + assertEquals(List.of(), fetchSourceValue(mapper, sourceValue, "wkb")); // Test a list of points in [x, y] array format. sourceValue = List.of(List.of(42.0, 27.1), List.of(30.0, 50.0)); assertEquals(List.of(jsonPoint, otherJsonPoint), fetchSourceValue(mapper, sourceValue, null)); assertEquals(List.of(wktPoint, otherWktPoint), fetchSourceValue(mapper, sourceValue, "wkt")); + wkb = fetchSourceValue(mapper, sourceValue, "wkb"); + assertThat(wkb.size(), equalTo(2)); + assertThat(wkb.get(0), equalTo(wkbPoint)); + assertThat(wkb.get(1), equalTo(otherWkbPoint)); // Test a single point in well-known text format. sourceValue = "POINT (42.0 27.1)"; diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldTypeTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldTypeTests.java index 1050c9acef11a..c7d87a6c6e8f5 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldTypeTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldTypeTests.java @@ -7,18 +7,23 @@ package org.elasticsearch.xpack.spatial.index.mapper; +import org.elasticsearch.geometry.utils.StandardValidator; +import org.elasticsearch.geometry.utils.WellKnownBinary; +import org.elasticsearch.geometry.utils.WellKnownText; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.mapper.FieldTypeTestCase; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperBuilderContext; -import java.io.IOException; +import java.nio.ByteOrder; import java.util.List; import java.util.Map; +import static org.hamcrest.Matchers.equalTo; + public class ShapeFieldTypeTests extends FieldTypeTestCase { - public void testFetchSourceValue() throws IOException { + public void testFetchSourceValue() throws Exception { MappedFieldType mapper = new ShapeFieldMapper.Builder("field", IndexVersion.current(), false, true).build( MapperBuilderContext.root(false, false) ).fieldType(); @@ -29,26 +34,43 @@ public void testFetchSourceValue() throws IOException { String wktLineString = "LINESTRING (42.0 27.1, 30.0 50.0)"; String wktPoint = "POINT (14.3 15.0)"; String wktMalformed = "POINT foo"; + byte[] wkbLine = WellKnownBinary.toWKB( + WellKnownText.fromWKT(StandardValidator.NOOP, false, wktLineString), + ByteOrder.LITTLE_ENDIAN + ); + byte[] wkbPoint = WellKnownBinary.toWKB(WellKnownText.fromWKT(StandardValidator.NOOP, false, wktPoint), ByteOrder.LITTLE_ENDIAN); // Test a single shape in geojson format. Object sourceValue = jsonLineString; assertEquals(List.of(jsonLineString), fetchSourceValue(mapper, sourceValue, null)); assertEquals(List.of(wktLineString), fetchSourceValue(mapper, sourceValue, "wkt")); + List wkb = fetchSourceValue(mapper, sourceValue, "wkb"); + assertThat(wkb.size(), equalTo(1)); + assertThat(wkb.get(0), equalTo(wkbLine)); // Test a malformed single shape in geojson format sourceValue = jsonMalformed; assertEquals(List.of(), fetchSourceValue(mapper, sourceValue, null)); assertEquals(List.of(), fetchSourceValue(mapper, sourceValue, "wkt")); + assertEquals(List.of(), fetchSourceValue(mapper, sourceValue, "wkb")); // Test a list of shapes in geojson format. sourceValue = List.of(jsonLineString, jsonPoint); assertEquals(List.of(jsonLineString, jsonPoint), fetchSourceValue(mapper, sourceValue, null)); assertEquals(List.of(wktLineString, wktPoint), fetchSourceValue(mapper, sourceValue, "wkt")); + wkb = fetchSourceValue(mapper, sourceValue, "wkb"); + assertThat(wkb.size(), equalTo(2)); + assertThat(wkb.get(0), equalTo(wkbLine)); + assertThat(wkb.get(1), equalTo(wkbPoint)); // Test a list of shapes including one malformed in geojson format sourceValue = List.of(jsonLineString, jsonMalformed, jsonPoint); assertEquals(List.of(jsonLineString, jsonPoint), fetchSourceValue(mapper, sourceValue, null)); assertEquals(List.of(wktLineString, wktPoint), fetchSourceValue(mapper, sourceValue, "wkt")); + wkb = fetchSourceValue(mapper, sourceValue, "wkb"); + assertThat(wkb.size(), equalTo(2)); + assertThat(wkb.get(0), equalTo(wkbLine)); + assertThat(wkb.get(1), equalTo(wkbPoint)); // Test a single shape in wkt format. sourceValue = wktLineString; diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryBuilderOverPointTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryBuilderOverPointTests.java index 90b57b95b03e3..db67b1f1e998b 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryBuilderOverPointTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryBuilderOverPointTests.java @@ -10,10 +10,10 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.geo.ShapeRelation; +import org.elasticsearch.geo.ShapeTestUtils; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.ShapeType; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.xpack.spatial.util.ShapeTestUtils; import java.io.IOException; diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryBuilderOverShapeTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryBuilderOverShapeTests.java index f890947698a97..aa5ae72df2b9e 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryBuilderOverShapeTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryBuilderOverShapeTests.java @@ -10,12 +10,12 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.geo.ShapeRelation; +import org.elasticsearch.geo.ShapeTestUtils; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.ShapeType; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.query.SearchExecutionContext; -import org.elasticsearch.xpack.spatial.util.ShapeTestUtils; import java.io.IOException; diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianBoundsAggregatorTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianBoundsAggregatorTests.java index 8be04619b5d6f..f8285fdd0eef5 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianBoundsAggregatorTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianBoundsAggregatorTests.java @@ -15,6 +15,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.geo.ShapeTestUtils; import org.elasticsearch.geometry.Point; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.plugins.SearchPlugin; @@ -26,7 +27,6 @@ import org.elasticsearch.xpack.spatial.common.CartesianPoint; import org.elasticsearch.xpack.spatial.index.mapper.PointFieldMapper; import org.elasticsearch.xpack.spatial.search.aggregations.support.CartesianPointValuesSourceType; -import org.elasticsearch.xpack.spatial.util.ShapeTestUtils; import java.io.IOException; import java.util.List; diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianCentroidAggregatorTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianCentroidAggregatorTests.java index 6079452c9ca72..4e391c7dd236d 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianCentroidAggregatorTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianCentroidAggregatorTests.java @@ -14,6 +14,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; import org.elasticsearch.common.geo.SpatialPoint; +import org.elasticsearch.geo.ShapeTestUtils; import org.elasticsearch.geometry.Point; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.plugins.SearchPlugin; @@ -25,7 +26,6 @@ import org.elasticsearch.xpack.spatial.common.CartesianPoint; import org.elasticsearch.xpack.spatial.index.mapper.PointFieldMapper; import org.elasticsearch.xpack.spatial.search.aggregations.support.CartesianPointValuesSourceType; -import org.elasticsearch.xpack.spatial.util.ShapeTestUtils; import java.io.IOException; import java.util.List; diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianShapeBoundsAggregatorTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianShapeBoundsAggregatorTests.java index f2ceea6c2e87c..8f479c7ed22c3 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianShapeBoundsAggregatorTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianShapeBoundsAggregatorTests.java @@ -16,6 +16,7 @@ import org.apache.lucene.tests.index.RandomIndexWriter; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.geo.Orientation; +import org.elasticsearch.geo.ShapeTestUtils; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.MultiPoint; import org.elasticsearch.geometry.Point; @@ -30,7 +31,6 @@ import org.elasticsearch.xpack.spatial.search.aggregations.support.CartesianPointValuesSourceType; import org.elasticsearch.xpack.spatial.search.aggregations.support.CartesianShapeValuesSourceType; import org.elasticsearch.xpack.spatial.util.GeoTestUtils; -import org.elasticsearch.xpack.spatial.util.ShapeTestUtils; import java.io.IOException; import java.util.ArrayList; diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianShapeCentroidAggregatorTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianShapeCentroidAggregatorTests.java index d8f00edd7873d..8ade6d8e5695a 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianShapeCentroidAggregatorTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianShapeCentroidAggregatorTests.java @@ -14,6 +14,7 @@ import org.apache.lucene.tests.index.RandomIndexWriter; import org.elasticsearch.common.geo.Orientation; import org.elasticsearch.common.geo.SpatialPoint; +import org.elasticsearch.geo.ShapeTestUtils; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.plugins.SearchPlugin; @@ -31,7 +32,6 @@ import org.elasticsearch.xpack.spatial.search.aggregations.support.CartesianPointValuesSourceType; import org.elasticsearch.xpack.spatial.search.aggregations.support.CartesianShapeValuesSourceType; import org.elasticsearch.xpack.spatial.util.GeoTestUtils; -import org.elasticsearch.xpack.spatial.util.ShapeTestUtils; import java.io.IOException; import java.util.ArrayList; @@ -162,8 +162,9 @@ public void testSingleValuedField() throws Exception { w.addDocument(document); if (targetShapeType.compareTo(calculator.getDimensionalShapeType()) == 0) { double weight = calculator.sumWeight(); - compensatedSumLat.add(weight * calculator.getY()); - compensatedSumLon.add(weight * calculator.getX()); + // compute the centroid of centroids in float space + compensatedSumLat.add(weight * (float) calculator.getY()); + compensatedSumLon.add(weight * (float) calculator.getX()); compensatedSumWeight.add(weight); } } diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/InternalCartesianCentroidTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/InternalCartesianCentroidTests.java index 5a22706becf06..d033b3e51e31b 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/InternalCartesianCentroidTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/InternalCartesianCentroidTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.geo.SpatialPoint; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.util.Maps; +import org.elasticsearch.geo.ShapeTestUtils; import org.elasticsearch.geometry.Point; import org.elasticsearch.plugins.SearchPlugin; import org.elasticsearch.search.aggregations.Aggregation; @@ -21,7 +22,6 @@ import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xpack.spatial.LocalStateSpatialPlugin; import org.elasticsearch.xpack.spatial.common.CartesianPoint; -import org.elasticsearch.xpack.spatial.util.ShapeTestUtils; import java.util.Collections; import java.util.HashMap; diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/util/ShapeUtilTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/util/ShapeUtilTests.java index 442c134c648b2..2b4bf58dd2211 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/util/ShapeUtilTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/util/ShapeUtilTests.java @@ -7,9 +7,10 @@ package org.elasticsearch.xpack.spatial.util; -import org.apache.lucene.geo.XShapeTestUtil; import org.apache.lucene.geo.XYPolygon; import org.apache.lucene.geo.XYRectangle; +import org.elasticsearch.geo.ShapeTestUtils; +import org.elasticsearch.geo.XShapeTestUtil; import org.elasticsearch.test.ESTestCase; import static org.hamcrest.Matchers.greaterThan; diff --git a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/AbstractSqlQueryRequest.java b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/AbstractSqlQueryRequest.java index 0f92b8905ef69..d711538ad1d09 100644 --- a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/AbstractSqlQueryRequest.java +++ b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/AbstractSqlQueryRequest.java @@ -438,7 +438,7 @@ public AbstractSqlQueryRequest(StreamInput in) throws IOException { pageTimeout = in.readTimeValue(); filter = in.readOptionalNamedWriteable(QueryBuilder.class); if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_13_0)) { - runtimeMappings = in.readMap(); + runtimeMappings = in.readGenericMap(); } } diff --git a/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlTestUtils.java b/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlTestUtils.java index bafdbeed8f1a4..42f42e2a26c03 100644 --- a/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlTestUtils.java +++ b/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlTestUtils.java @@ -103,10 +103,13 @@ static XContentBuilder toXContentBuilder(XContentBuilder builder, CheckedConsume objectGenerator.accept(generator); generator.close(); // System.out.println(out.toString(StandardCharsets.UTF_8)); - XContentParser parser = builder.contentType() - .xContent() - .createParser(XContentParserConfiguration.EMPTY, new ByteArrayInputStream(out.toByteArray())); - builder.copyCurrentStructure(parser); + try ( + XContentParser parser = builder.contentType() + .xContent() + .createParser(XContentParserConfiguration.EMPTY, new ByteArrayInputStream(out.toByteArray())) + ) { + builder.copyCurrentStructure(parser); + } builder.flush(); ByteArrayOutputStream stream = (ByteArrayOutputStream) builder.getOutputStream(); assertEquals("serialized objects differ", out.toString(StandardCharsets.UTF_8), stream.toString(StandardCharsets.UTF_8)); diff --git a/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/AsyncSqlSearchActionIT.java b/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/AsyncSqlSearchActionIT.java index a3acd545730c9..55b88a633a58a 100644 --- a/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/AsyncSqlSearchActionIT.java +++ b/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/AsyncSqlSearchActionIT.java @@ -31,14 +31,13 @@ import org.elasticsearch.tasks.TaskId; import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.async.AsyncExecutionId; -import org.elasticsearch.xpack.core.async.DeleteAsyncResultAction; import org.elasticsearch.xpack.core.async.DeleteAsyncResultRequest; import org.elasticsearch.xpack.core.async.GetAsyncResultRequest; import org.elasticsearch.xpack.core.async.StoredAsyncResponse; +import org.elasticsearch.xpack.core.async.TransportDeleteAsyncResultAction; import org.elasticsearch.xpack.sql.plugin.SqlAsyncGetResultsAction; import org.junit.After; -import java.io.InputStream; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Base64; @@ -148,7 +147,7 @@ public void testBasicAsyncExecution() throws Exception { assertThat(ex.getCause().getMessage(), containsString("by zero")); } AcknowledgedResponse deleteResponse = client().execute( - DeleteAsyncResultAction.INSTANCE, + TransportDeleteAsyncResultAction.TYPE, new DeleteAsyncResultRequest(response.id()) ).actionGet(); assertThat(deleteResponse.isAcknowledged(), equalTo(true)); @@ -241,13 +240,13 @@ public void testAsyncCancellation() throws Exception { logger.trace("Block is established"); ActionFuture deleteResponse = client().execute( - DeleteAsyncResultAction.INSTANCE, + TransportDeleteAsyncResultAction.TYPE, new DeleteAsyncResultRequest(response.id()) ); disableBlocks(plugins); assertThat(deleteResponse.actionGet().isAcknowledged(), equalTo(true)); - deleteResponse = client().execute(DeleteAsyncResultAction.INSTANCE, new DeleteAsyncResultRequest(response.id())); + deleteResponse = client().execute(TransportDeleteAsyncResultAction.TYPE, new DeleteAsyncResultRequest(response.id())); assertFutureThrows(deleteResponse, ResourceNotFoundException.class); } @@ -283,7 +282,7 @@ public void testFinishingBeforeTimeout() throws Exception { assertThat(storedResponse, equalTo(response)); AcknowledgedResponse deleteResponse = client().execute( - DeleteAsyncResultAction.INSTANCE, + TransportDeleteAsyncResultAction.TYPE, new DeleteAsyncResultRequest(response.id()) ).actionGet(); assertThat(deleteResponse.isAcknowledged(), equalTo(true)); @@ -301,8 +300,12 @@ public StoredAsyncResponse getStoredRecord(String id) throws E String value = doc.getSource().get("result").toString(); try (ByteBufferStreamInput buf = new ByteBufferStreamInput(ByteBuffer.wrap(Base64.getDecoder().decode(value)))) { TransportVersion version = TransportVersion.readVersion(buf); - final InputStream compressedIn = CompressorFactory.COMPRESSOR.threadLocalInputStream(buf); - try (StreamInput in = new NamedWriteableAwareStreamInput(new InputStreamStreamInput(compressedIn), registry)) { + try ( + StreamInput in = new NamedWriteableAwareStreamInput( + new InputStreamStreamInput(CompressorFactory.COMPRESSOR.threadLocalStreamInput(buf)), + registry + ) + ) { in.setTransportVersion(version); return new StoredAsyncResponse<>(SqlQueryResponse::new, in); } diff --git a/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/SqlActionIT.java b/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/SqlActionIT.java index 5892c5ed967b4..3b7da36eebe9e 100644 --- a/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/SqlActionIT.java +++ b/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/SqlActionIT.java @@ -13,7 +13,6 @@ import org.elasticsearch.xpack.sql.proto.Mode; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; @@ -61,6 +60,6 @@ public void testSqlActionOutdatedVersion() { SqlQueryRequestBuilder request = new SqlQueryRequestBuilder(client()).query("SELECT true") .mode(randomFrom(Mode.CLI, Mode.JDBC)) .version("1.2.3"); - assertRequestBuilderThrows(request, org.elasticsearch.action.ActionRequestValidationException.class); + expectThrows(org.elasticsearch.action.ActionRequestValidationException.class, request); } } diff --git a/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/SqlLicenseIT.java b/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/SqlLicenseIT.java index c16b0554d8738..a24d5ada5746e 100644 --- a/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/SqlLicenseIT.java +++ b/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/SqlLicenseIT.java @@ -111,7 +111,7 @@ public void testSqlQueryActionLicense() throws Exception { ElasticsearchSecurityException e = expectThrows( ElasticsearchSecurityException.class, - () -> new SqlQueryRequestBuilder(client()).query("SELECT * FROM test").get() + new SqlQueryRequestBuilder(client()).query("SELECT * FROM test") ); assertThat(e.getMessage(), equalTo("current license is non-compliant for [sql]")); enableSqlLicensing(); @@ -126,7 +126,7 @@ public void testSqlQueryActionJdbcModeLicense() throws Exception { ElasticsearchSecurityException e = expectThrows( ElasticsearchSecurityException.class, - () -> new SqlQueryRequestBuilder(client()).query("SELECT * FROM test").mode("jdbc").get() + new SqlQueryRequestBuilder(client()).query("SELECT * FROM test").mode("jdbc") ); assertThat(e.getMessage(), equalTo("current license is non-compliant for [jdbc]")); enableJdbcLicensing(); @@ -141,7 +141,7 @@ public void testSqlTranslateActionLicense() throws Exception { ElasticsearchSecurityException e = expectThrows( ElasticsearchSecurityException.class, - () -> new SqlTranslateRequestBuilder(client()).query("SELECT * FROM test").get() + new SqlTranslateRequestBuilder(client()).query("SELECT * FROM test") ); assertThat(e.getMessage(), equalTo("current license is non-compliant for [sql]")); enableSqlLicensing(); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/SqlServerException.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/SqlServerException.java deleted file mode 100644 index e3a85b03e7a00..0000000000000 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/SqlServerException.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.sql; - -import org.elasticsearch.xpack.ql.QlServerException; - -public abstract class SqlServerException extends QlServerException { - - protected SqlServerException(String message, Object... args) { - super(message, args); - } - - protected SqlServerException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) { - super(message, cause, enableSuppression, writableStackTrace); - } - - protected SqlServerException(String message, Throwable cause) { - super(message, cause); - } - - protected SqlServerException(Throwable cause, String message, Object... args) { - super(cause, message, args); - } - - protected SqlServerException(Throwable cause) { - super(cause); - } -} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/common/io/SqlStreamInput.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/common/io/SqlStreamInput.java index 456067fba6b04..0c6074357975c 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/common/io/SqlStreamInput.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/common/io/SqlStreamInput.java @@ -10,7 +10,6 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.common.compress.CompressorFactory; -import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; @@ -32,8 +31,7 @@ public static SqlStreamInput fromString(String base64encoded, NamedWriteableRegi StreamInput in = StreamInput.wrap(bytes); TransportVersion inVersion = TransportVersion.readVersion(in); validateStreamVersion(version, inVersion); - InputStreamStreamInput uncompressingIn = new InputStreamStreamInput(CompressorFactory.COMPRESSOR.threadLocalInputStream(in)); - return new SqlStreamInput(uncompressingIn, namedWriteableRegistry, inVersion); + return new SqlStreamInput(CompressorFactory.COMPRESSOR.threadLocalStreamInput(in), namedWriteableRegistry, inVersion); } /** diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/PivotCursor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/PivotCursor.java index 2deb79151c0b8..b400aa2a8d6ec 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/PivotCursor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/PivotCursor.java @@ -44,7 +44,7 @@ public class PivotCursor extends CompositeAggCursor { public PivotCursor(StreamInput in) throws IOException { super(in); - previousKey = in.readBoolean() ? in.readMap() : null; + previousKey = in.readBoolean() ? in.readGenericMap() : null; } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java index a0da67f3006a3..936f4aa23cd57 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java @@ -161,6 +161,7 @@ private void searchWithPointInTime(SearchRequest search, ActionListener { String pitId = openPointInTimeResponse.getPointInTimeId(); + search.indicesOptions(SearchRequest.DEFAULT_INDICES_OPTIONS); search.indices(Strings.EMPTY_ARRAY); search.source().pointInTimeBuilder(new PointInTimeBuilder(pitId)); ActionListener closePitOnErrorListener = wrap(searchResponse -> { @@ -201,13 +202,14 @@ public static SearchRequest prepareRequest(SearchSourceBuilder source, SqlConfig source.timeout(cfg.requestTimeout()); SearchRequest searchRequest = new SearchRequest(INTRODUCING_UNSIGNED_LONG); - searchRequest.indices(indices); + if (source.pointInTimeBuilder() == null) { + searchRequest.indices(indices); + searchRequest.indicesOptions( + includeFrozen ? IndexResolver.FIELD_CAPS_FROZEN_INDICES_OPTIONS : IndexResolver.FIELD_CAPS_INDICES_OPTIONS + ); + } searchRequest.source(source); searchRequest.allowPartialSearchResults(cfg.allowPartialSearchResults()); - searchRequest.indicesOptions( - includeFrozen ? IndexResolver.FIELD_CAPS_FROZEN_INDICES_OPTIONS : IndexResolver.FIELD_CAPS_INDICES_OPTIONS - ); - return searchRequest; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SchemaDelegatingRowSet.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SchemaDelegatingRowSet.java deleted file mode 100644 index f7097b7bebfae..0000000000000 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SchemaDelegatingRowSet.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.sql.execution.search; - -import org.elasticsearch.xpack.ql.type.Schema; -import org.elasticsearch.xpack.sql.session.RowSet; -import org.elasticsearch.xpack.sql.session.SchemaRowSet; - -class SchemaDelegatingRowSet implements SchemaRowSet { - - private final Schema schema; - private final RowSet delegate; - - SchemaDelegatingRowSet(Schema schema, RowSet delegate) { - this.schema = schema; - this.delegate = delegate; - } - - @Override - public Schema schema() { - return schema; - } - - @Override - public boolean hasCurrentRow() { - return delegate.hasCurrentRow(); - } - - @Override - public boolean advanceRow() { - return delegate.advanceRow(); - } - - @Override - public int size() { - return delegate.size(); - } - - @Override - public void reset() { - delegate.reset(); - } - - @Override - public Object column(int index) { - return delegate.column(index); - } -} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/literal/geo/GeoShape.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/literal/geo/GeoShape.java index 6e5070a409035..68ac433d470f9 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/literal/geo/GeoShape.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/literal/geo/GeoShape.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.geometry.Circle; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.GeometryCollection; @@ -25,17 +26,16 @@ import org.elasticsearch.geometry.Polygon; import org.elasticsearch.geometry.Rectangle; import org.elasticsearch.geometry.utils.WellKnownText; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.ql.InvalidArgumentException; import org.elasticsearch.xpack.ql.QlIllegalArgumentException; import org.elasticsearch.xpack.ql.expression.gen.processor.ConstantNamedWriteable; import java.io.IOException; -import java.io.InputStream; import java.text.ParseException; import java.util.Objects; @@ -215,11 +215,10 @@ private static Geometry parse(Object value) throws IOException, ParseException { content.endObject(); try ( - InputStream stream = BytesReference.bytes(content).streamInput(); - XContentParser parser = JsonXContent.jsonXContent.createParser( - NamedXContentRegistry.EMPTY, - LoggingDeprecationHandler.INSTANCE, - stream + XContentParser parser = XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG, + BytesReference.bytes(content), + XContentType.JSON ) ) { parser.nextToken(); // start object diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlAsyncDeleteResultsAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlAsyncDeleteResultsAction.java index 8f77eb240c98a..a73a18db816f2 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlAsyncDeleteResultsAction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlAsyncDeleteResultsAction.java @@ -12,8 +12,8 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xpack.core.async.DeleteAsyncResultAction; import org.elasticsearch.xpack.core.async.DeleteAsyncResultRequest; +import org.elasticsearch.xpack.core.async.TransportDeleteAsyncResultAction; import java.util.List; @@ -36,6 +36,6 @@ public String getName() { @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { DeleteAsyncResultRequest delete = new DeleteAsyncResultRequest(request.param(ID_NAME)); - return channel -> client.execute(DeleteAsyncResultAction.INSTANCE, delete, new RestToXContentListener<>(channel)); + return channel -> client.execute(TransportDeleteAsyncResultAction.TYPE, delete, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/RowView.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/RowView.java index 5f3ecd4abdf00..e9301e36d001f 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/RowView.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/RowView.java @@ -29,7 +29,7 @@ default T column(int index, Class type) { @Override default void forEach(Consumer action) { - forEachColumn(action::accept); + forEachColumn(action); } default void forEachColumn(Consumer action) { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/TopHitsAggExtractorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/TopHitsAggExtractorTests.java index b36f286645efc..b7f123f82cf98 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/TopHitsAggExtractorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/TopHitsAggExtractorTests.java @@ -72,8 +72,7 @@ public void testNoAggs() { public void testZeroNullValue() { TopHitsAggExtractor extractor = randomTopHitsAggExtractor(); - TotalHits totalHits = new TotalHits(0, TotalHits.Relation.EQUAL_TO); - Aggregation agg = new InternalTopHits(extractor.name(), 0, 0, null, new SearchHits(null, totalHits, 0.0f), null); + Aggregation agg = new InternalTopHits(extractor.name(), 0, 0, null, SearchHits.EMPTY_WITH_TOTAL_HITS, null); Bucket bucket = new TestBucket(emptyMap(), 0, new Aggregations(singletonList(agg))); assertNull(extractor.extract(bucket)); } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/api_key/10_basic.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/api_key/10_basic.yml index b575ddccb449a..09f08d59049ec 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/api_key/10_basic.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/api_key/10_basic.yml @@ -191,6 +191,7 @@ teardown: Authorization: "Basic YXBpX2tleV91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" # api_key_user security.get_api_key: owner: true + active_only: true - length: { "api_keys" : 1 } - match: { "api_keys.0.username": "api_key_user" } - match: { "api_keys.0.invalidated": false } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/counted_keyword/10_basic.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/counted_keyword/10_basic.yml index d0f7c7636582f..e5ad63fa31153 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/counted_keyword/10_basic.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/counted_keyword/10_basic.yml @@ -29,6 +29,16 @@ setup: - do: indices.refresh: { } +--- +"Counted keyword is searchable by default": + - do: + field_caps: + index: test-events + fields: [ events ] + + - match: { fields.events.counted_keyword.searchable: true } + - match: { fields.events.counted_keyword.aggregatable: true } + --- "Counted Terms agg": @@ -49,3 +59,22 @@ setup: - match: { aggregations.event_terms.buckets.2.key: "c" } - match: { aggregations.event_terms.buckets.2.doc_count: 2 } - length: { aggregations.event_terms.buckets: 3 } + +--- +# Use a probability of 1.0 to ensure a consistent bucket count. +"Sampled Counted Terms agg": + - do: + search: + index: test-events + body: + size: 0 + aggs: + sample: + random_sampler: + probability: 1.0 + aggs: + event_terms: + counted_terms: + field: events + + - length: { aggregations.sample.event_terms.buckets: 3 } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/counted_keyword/20_no_index.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/counted_keyword/20_no_index.yml new file mode 100644 index 0000000000000..1fe48207b5586 --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/counted_keyword/20_no_index.yml @@ -0,0 +1,54 @@ +setup: + + - skip: + version: " - 8.12.99" + reason: "index option on counted_keyword was added in 8.13" + + - do: + indices.create: + index: test-events-no-index + body: + mappings: + properties: + events: + type: counted_keyword + index: false + + - do: + index: + index: test-events-no-index + id: "1" + body: { "events": [ "a", "a", "b" ] } + + + - do: + indices.refresh: { } + +--- +"Counted keyword with index false is not searchable": + - do: + field_caps: + index: test-events-no-index + fields: [ events ] + + - match: { fields.events.counted_keyword.searchable: false } + - match: { fields.events.counted_keyword.aggregatable: true } + +--- +"Counted Terms agg only relies on doc values": +# although the field is not indexed, the counted_terms agg should still work + - do: + search: + index: test-events-no-index + body: + size: 0 + aggs: + event_terms: + counted_terms: + field: events + + - match: { aggregations.event_terms.buckets.0.key: "a" } + - match: { aggregations.event_terms.buckets.0.doc_count: 2 } + - match: { aggregations.event_terms.buckets.1.key: "b" } + - match: { aggregations.event_terms.buckets.1.doc_count: 1 } + - length: { aggregations.event_terms.buckets: 2 } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/100_bug_fix.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/100_bug_fix.yml index e768a6b348959..6cbc9a225588b 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/100_bug_fix.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/100_bug_fix.yml @@ -1,10 +1,8 @@ --- "Coalesce and to_ip functions": - skip: - version: all - reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/102871" - # version: " - 8.11.99" - # reason: "fixes in 8.12 or later" + version: " - 8.11.99" + reason: "fixes in 8.12 or later" features: warnings - do: bulk: @@ -126,3 +124,71 @@ - match: { values.2.0: null } - match: { values.2.1: null } - match: { values.2.2: index1 } + + +--- +"null MappedFieldType on single value detection #103141": + - skip: + version: " - 8.12.99" + reason: "fixes in 8.13 or later" + - do: + indices.create: + index: npe_single_value_1 + body: + mappings: + properties: + field1: + type: long + - do: + indices.create: + index: npe_single_value_2 + body: + mappings: + properties: + field2: + type: long + - do: + indices.create: + index: npe_single_value_3 + body: + mappings: + properties: + field3: + type: long + + - do: + bulk: + refresh: true + body: + - { "index": { "_index": "npe_single_value_1" } } + - { "field1": 10 } + - { "index": { "_index": "npe_single_value_2" } } + - { "field2": 20 } + - { "index": { "_index": "npe_single_value_3" } } + - { "field3": 30 } + - do: + esql.query: + body: + query: 'from npe_single_value* | stats x = avg(field1) | limit 10' + - match: { columns.0.name: x } + - match: { columns.0.type: double } + - length: { values: 1 } + - match: { values.0.0: 10.0 } + + - do: + esql.query: + body: + query: 'from npe_single_value* | stats x = avg(field2) | limit 10' + - match: { columns.0.name: x } + - match: { columns.0.type: double } + - length: { values: 1 } + - match: { values.0.0: 20.0 } + + - do: + esql.query: + body: + query: 'from npe_single_value* | stats x = avg(field3) | limit 10' + - match: { columns.0.name: x } + - match: { columns.0.type: double } + - length: { values: 1 } + - match: { values.0.0: 30.0 } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/30_types.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/30_types.yml index 30e56cd9cc748..4d30f3a39afcc 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/30_types.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/30_types.yml @@ -868,6 +868,9 @@ synthetic _source text with parent keyword: --- geo_point: + - skip: + version: " - 8.12.99" + reason: "geo_point precision changed in 8.13.0" - do: indices.create: index: test @@ -894,10 +897,13 @@ geo_point: - match: { columns.0.name: location } - match: { columns.0.type: geo_point } - length: { values: 1 } - - match: { values.0.0: "POINT (0.9999999403953552 -1.000000024214387)" } + - match: { values.0.0: "POINT (1.0 -1.0)" } --- cartesian_point: + - skip: + version: " - 8.12.99" + reason: "cartesian_point precision changed in 8.13.0" - do: indices.create: index: test diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/40_unsupported_types.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/40_unsupported_types.yml index 181cf52b66c7c..06fc2c8a3fa99 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/40_unsupported_types.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/40_unsupported_types.yml @@ -110,8 +110,8 @@ setup: --- unsupported: - skip: - version: " - 8.11.99" - reason: "Latest types supported in ESQL starting with 8.12.0" + version: " - 8.12.99" + reason: "Latest types supported in ESQL as of 8.13.0" - do: allowed_warnings_regex: @@ -189,8 +189,8 @@ unsupported: - match: { values.0.5: null } - match: { values.0.6: null } - match: { values.0.7: null } - - match: { values.0.8: "POINT (9.999999990686774 11.999999997206032)" } - - match: { values.0.9: "POINT (9.999999990686774 11.999999997206032)" } + - match: { values.0.8: "POINT (10.0 12.0)" } + - match: { values.0.9: "POINT (10.0 12.0)" } - match: { values.0.10: null } - match: { values.0.11: null } - match: { values.0.12: null } @@ -198,7 +198,7 @@ unsupported: - match: { values.0.14: null } - match: { values.0.15: "foo bar baz" } - match: { values.0.16: Alice } - - match: { values.0.17: "POINT (-97.15447235107422 25.996152877807617)" } + - match: { values.0.17: "POINT (-97.15447 25.9961525)" } - match: { values.0.18: null } - match: { values.0.19: null } - match: { values.0.20: null } @@ -290,8 +290,8 @@ unsupported: --- unsupported with sort: - skip: - version: " - 8.11.99" - reason: "Latest types supported in ESQL starting with 8.12.0" + version: " - 8.12.99" + reason: "Latest types supported in ESQL as of 8.13.0" - do: allowed_warnings_regex: @@ -369,8 +369,8 @@ unsupported with sort: - match: { values.0.5: null } - match: { values.0.6: null } - match: { values.0.7: null } - - match: { values.0.8: "POINT (9.999999990686774 11.999999997206032)" } - - match: { values.0.9: "POINT (9.999999990686774 11.999999997206032)" } + - match: { values.0.8: "POINT (10.0 12.0)" } + - match: { values.0.9: "POINT (10.0 12.0)" } - match: { values.0.10: null } - match: { values.0.11: null } - match: { values.0.12: null } @@ -378,7 +378,7 @@ unsupported with sort: - match: { values.0.14: null } - match: { values.0.15: "foo bar baz" } - match: { values.0.16: Alice } - - match: { values.0.17: "POINT (-97.15447235107422 25.996152877807617)" } + - match: { values.0.17: "POINT (-97.15447 25.9961525)" } - match: { values.0.18: null } - match: { values.0.19: null } - match: { values.0.20: null } @@ -390,35 +390,3 @@ unsupported with sort: - match: { values.0.26: xy } - match: { values.0.27: "foo bar" } - match: { values.0.28: 3 } - ---- -spatial types unsupported in 8.11: - - skip: - version: " - 8.10.99, 8.12.0 - " - reason: "Elasticsearch 8.11 did not support any spatial types" - - - do: - allowed_warnings_regex: - - "Field \\[.*\\] cannot be retrieved, it is unsupported or not indexed; returning null" - - "No limit defined, adding default limit of \\[.*\\]" - esql.query: - body: - query: 'from test | keep geo_point, geo_point_alias, point, geo_shape, shape' - - - match: { columns.0.name: geo_point } - - match: { columns.0.type: unsupported } - - match: { columns.1.name: geo_point_alias } - - match: { columns.1.type: unsupported } - - match: { columns.2.name: point } - - match: { columns.2.type: unsupported } - - match: { columns.3.name: geo_shape } - - match: { columns.3.type: unsupported } - - match: { columns.4.name: shape } - - match: { columns.4.type: unsupported } - - - length: { values: 1 } - - match: { values.0.0: null } - - match: { values.0.1: null } - - match: { values.0.2: null } - - match: { values.0.3: null } - - match: { values.0.4: null } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/81_text_exact_subfields.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/81_text_exact_subfields.yml new file mode 100644 index 0000000000000..64d4665e3cfe7 --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/81_text_exact_subfields.yml @@ -0,0 +1,471 @@ +--- +setup: + - skip: + version: " - 8.12.99" + reason: "Exact subfields fixed in v 8.13" + features: allowed_warnings_regex + - do: + indices.create: + index: test + body: + settings: + analysis: + normalizer: + my_normalizer: + type: "custom" + filter: [ "lowercase", "asciifolding" ] + mappings: + properties: + emp_no: + type: long + text_ignore_above: + type: text + fields: + raw: + type: keyword + ignore_above: 4 + text_normalizer: + type: text + fields: + raw: + type: keyword + normalizer: my_normalizer + non_indexed: + type: text + fields: + raw: + type: "keyword" + index: false + doc_values: false + store: true + + - do: + bulk: + index: test + refresh: true + body: + - { "index": { } } + - { "emp_no": 10, "text_ignore_above":"this is a long text", "text_normalizer": "CamelCase", "non_indexed": "foo"} + - { "index": { } } + - { "emp_no": 20, "text_ignore_above":"this", "text_normalizer": "abc", "non_indexed": "bar"} + +--- +"extract": + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'from test | sort emp_no | keep text_ignore_above, text_ignore_above.raw, text_normalizer, text_normalizer.raw, non_indexed, non_indexed.raw' + + - match: { columns.0.name: "text_ignore_above" } + - match: { columns.0.type: "text" } + - match: { columns.1.name: "text_ignore_above.raw" } + - match: { columns.1.type: "keyword" } + - match: { columns.2.name: "text_normalizer" } + - match: { columns.2.type: "text" } + - match: { columns.3.name: "text_normalizer.raw" } + - match: { columns.3.type: "keyword" } + - match: { columns.4.name: "non_indexed" } + - match: { columns.4.type: "text" } + - match: { columns.5.name: "non_indexed.raw" } + - match: { columns.5.type: "keyword" } + + - length: { values: 2 } + - match: { values.0: [ "this is a long text", null, "CamelCase", "camelcase", "foo", "foo"] } + - match: { values.1: [ "this", "this", "abc", "abc", "bar", "bar" ]} + +--- +"filter ignore above": + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'from test | where text_ignore_above == "this" | keep text_ignore_above, text_ignore_above.raw, text_normalizer, text_normalizer.raw, non_indexed, non_indexed.raw' + + - match: { columns.0.name: "text_ignore_above" } + - match: { columns.0.type: "text" } + - match: { columns.1.name: "text_ignore_above.raw" } + - match: { columns.1.type: "keyword" } + - match: { columns.2.name: "text_normalizer" } + - match: { columns.2.type: "text" } + - match: { columns.3.name: "text_normalizer.raw" } + - match: { columns.3.type: "keyword" } + - match: { columns.4.name: "non_indexed" } + - match: { columns.4.type: "text" } + - match: { columns.5.name: "non_indexed.raw" } + - match: { columns.5.type: "keyword" } + + - length: { values: 1 } + - match: { values.0: [ "this", "this", "abc", "abc", "bar", "bar" ]} + + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'from test | where text_ignore_above == "this is a long text" | keep text_ignore_above, text_ignore_above.raw, text_normalizer, text_normalizer.raw, non_indexed, non_indexed.raw' + + - match: { columns.0.name: "text_ignore_above" } + - match: { columns.0.type: "text" } + - match: { columns.1.name: "text_ignore_above.raw" } + - match: { columns.1.type: "keyword" } + - match: { columns.2.name: "text_normalizer" } + - match: { columns.2.type: "text" } + - match: { columns.3.name: "text_normalizer.raw" } + - match: { columns.3.type: "keyword" } + - match: { columns.4.name: "non_indexed" } + - match: { columns.4.type: "text" } + - match: { columns.5.name: "non_indexed.raw" } + - match: { columns.5.type: "keyword" } + + - length: { values: 1 } + - match: { values.0: [ "this is a long text", null, "CamelCase", "camelcase", "foo", "foo"] } + + + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'from test | where text_ignore_above is null | keep text_ignore_above, text_ignore_above.raw, text_normalizer, text_normalizer.raw, non_indexed, non_indexed.raw' + + - match: { columns.0.name: "text_ignore_above" } + - match: { columns.0.type: "text" } + - match: { columns.1.name: "text_ignore_above.raw" } + - match: { columns.1.type: "keyword" } + - match: { columns.2.name: "text_normalizer" } + - match: { columns.2.type: "text" } + - match: { columns.3.name: "text_normalizer.raw" } + - match: { columns.3.type: "keyword" } + - match: { columns.4.name: "non_indexed" } + - match: { columns.4.type: "text" } + - match: { columns.5.name: "non_indexed.raw" } + - match: { columns.5.type: "keyword" } + + - length: { values: 0 } + + + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'from test | where text_ignore_above LIKE "*long*" | keep text_ignore_above, text_ignore_above.raw, text_normalizer, text_normalizer.raw, non_indexed, non_indexed.raw' + + - match: { columns.0.name: "text_ignore_above" } + - match: { columns.0.type: "text" } + - match: { columns.1.name: "text_ignore_above.raw" } + - match: { columns.1.type: "keyword" } + - match: { columns.2.name: "text_normalizer" } + - match: { columns.2.type: "text" } + - match: { columns.3.name: "text_normalizer.raw" } + - match: { columns.3.type: "keyword" } + - match: { columns.4.name: "non_indexed" } + - match: { columns.4.type: "text" } + - match: { columns.5.name: "non_indexed.raw" } + - match: { columns.5.type: "keyword" } + + - length: { values: 1 } + - match: { values.0: [ "this is a long text", null, "CamelCase", "camelcase", "foo", "foo"] } + +--- +"filter with normalizer": + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'from test | where text_normalizer == "CamelCase" | keep text_ignore_above, text_ignore_above.raw, text_normalizer, text_normalizer.raw, non_indexed, non_indexed.raw' + + - match: { columns.0.name: "text_ignore_above" } + - match: { columns.0.type: "text" } + - match: { columns.1.name: "text_ignore_above.raw" } + - match: { columns.1.type: "keyword" } + - match: { columns.2.name: "text_normalizer" } + - match: { columns.2.type: "text" } + - match: { columns.3.name: "text_normalizer.raw" } + - match: { columns.3.type: "keyword" } + - match: { columns.4.name: "non_indexed" } + - match: { columns.4.type: "text" } + - match: { columns.5.name: "non_indexed.raw" } + - match: { columns.5.type: "keyword" } + + - length: { values: 1 } + - match: { values.0: [ "this is a long text", null, "CamelCase", "camelcase", "foo", "foo"] } + + + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'from test | where text_normalizer == text_normalizer.raw | keep text_ignore_above, text_ignore_above.raw, text_normalizer, text_normalizer.raw, non_indexed, non_indexed.raw' + + - match: { columns.0.name: "text_ignore_above" } + - match: { columns.0.type: "text" } + - match: { columns.1.name: "text_ignore_above.raw" } + - match: { columns.1.type: "keyword" } + - match: { columns.2.name: "text_normalizer" } + - match: { columns.2.type: "text" } + - match: { columns.3.name: "text_normalizer.raw" } + - match: { columns.3.type: "keyword" } + - match: { columns.4.name: "non_indexed" } + - match: { columns.4.type: "text" } + - match: { columns.5.name: "non_indexed.raw" } + - match: { columns.5.type: "keyword" } + + - length: { values: 1 } + - match: { values.0: [ "this", "this", "abc", "abc", "bar", "bar" ]} + + +--- +"sort ignore above": + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'from test | sort text_ignore_above asc | keep text_ignore_above, text_ignore_above.raw, text_normalizer, text_normalizer.raw, non_indexed, non_indexed.raw' + + - match: { columns.0.name: "text_ignore_above" } + - match: { columns.0.type: "text" } + - match: { columns.1.name: "text_ignore_above.raw" } + - match: { columns.1.type: "keyword" } + - match: { columns.2.name: "text_normalizer" } + - match: { columns.2.type: "text" } + - match: { columns.3.name: "text_normalizer.raw" } + - match: { columns.3.type: "keyword" } + - match: { columns.4.name: "non_indexed" } + - match: { columns.4.type: "text" } + - match: { columns.5.name: "non_indexed.raw" } + - match: { columns.5.type: "keyword" } + + - length: { values: 2 } + - match: { values.0: [ "this", "this", "abc", "abc", "bar", "bar" ]} + - match: { values.1: [ "this is a long text", null, "CamelCase", "camelcase", "foo", "foo"] } + + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'from test | sort text_ignore_above desc | keep text_ignore_above, text_ignore_above.raw, text_normalizer, text_normalizer.raw, non_indexed, non_indexed.raw' + + - match: { columns.0.name: "text_ignore_above" } + - match: { columns.0.type: "text" } + - match: { columns.1.name: "text_ignore_above.raw" } + - match: { columns.1.type: "keyword" } + - match: { columns.2.name: "text_normalizer" } + - match: { columns.2.type: "text" } + - match: { columns.3.name: "text_normalizer.raw" } + - match: { columns.3.type: "keyword" } + - match: { columns.4.name: "non_indexed" } + - match: { columns.4.type: "text" } + - match: { columns.5.name: "non_indexed.raw" } + - match: { columns.5.type: "keyword" } + + - length: { values: 2 } + - match: { values.0: [ "this is a long text", null, "CamelCase", "camelcase", "foo", "foo"] } + - match: { values.1: [ "this", "this", "abc", "abc", "bar", "bar" ]} + + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'from test | sort text_ignore_above asc nulls first | keep text_ignore_above, text_ignore_above.raw, text_normalizer, text_normalizer.raw, non_indexed, non_indexed.raw' + + - match: { columns.0.name: "text_ignore_above" } + - match: { columns.0.type: "text" } + - match: { columns.1.name: "text_ignore_above.raw" } + - match: { columns.1.type: "keyword" } + - match: { columns.2.name: "text_normalizer" } + - match: { columns.2.type: "text" } + - match: { columns.3.name: "text_normalizer.raw" } + - match: { columns.3.type: "keyword" } + - match: { columns.4.name: "non_indexed" } + - match: { columns.4.type: "text" } + - match: { columns.5.name: "non_indexed.raw" } + - match: { columns.5.type: "keyword" } + + - length: { values: 2 } + - match: { values.0: [ "this", "this", "abc", "abc", "bar", "bar" ]} + - match: { values.1: [ "this is a long text", null, "CamelCase", "camelcase", "foo", "foo"] } + + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'from test | sort text_ignore_above asc nulls last | keep text_ignore_above, text_ignore_above.raw, text_normalizer, text_normalizer.raw, non_indexed, non_indexed.raw' + + - match: { columns.0.name: "text_ignore_above" } + - match: { columns.0.type: "text" } + - match: { columns.1.name: "text_ignore_above.raw" } + - match: { columns.1.type: "keyword" } + - match: { columns.2.name: "text_normalizer" } + - match: { columns.2.type: "text" } + - match: { columns.3.name: "text_normalizer.raw" } + - match: { columns.3.type: "keyword" } + - match: { columns.4.name: "non_indexed" } + - match: { columns.4.type: "text" } + - match: { columns.5.name: "non_indexed.raw" } + - match: { columns.5.type: "keyword" } + + - length: { values: 2 } + - match: { values.0: [ "this", "this", "abc", "abc", "bar", "bar" ]} + - match: { values.1: [ "this is a long text", null, "CamelCase", "camelcase", "foo", "foo"] } + + + +--- +"sort normalizer": + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'from test | sort text_normalizer asc | keep text_ignore_above, text_ignore_above.raw, text_normalizer, text_normalizer.raw, non_indexed, non_indexed.raw' + + - match: { columns.0.name: "text_ignore_above" } + - match: { columns.0.type: "text" } + - match: { columns.1.name: "text_ignore_above.raw" } + - match: { columns.1.type: "keyword" } + - match: { columns.2.name: "text_normalizer" } + - match: { columns.2.type: "text" } + - match: { columns.3.name: "text_normalizer.raw" } + - match: { columns.3.type: "keyword" } + - match: { columns.4.name: "non_indexed" } + - match: { columns.4.type: "text" } + - match: { columns.5.name: "non_indexed.raw" } + - match: { columns.5.type: "keyword" } + + - length: { values: 2 } + - match: { values.0: [ "this is a long text", null, "CamelCase", "camelcase", "foo", "foo"] } + - match: { values.1: [ "this", "this", "abc", "abc", "bar", "bar" ]} + + + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'from test | sort text_normalizer desc | keep text_ignore_above, text_ignore_above.raw, text_normalizer, text_normalizer.raw, non_indexed, non_indexed.raw' + + - match: { columns.0.name: "text_ignore_above" } + - match: { columns.0.type: "text" } + - match: { columns.1.name: "text_ignore_above.raw" } + - match: { columns.1.type: "keyword" } + - match: { columns.2.name: "text_normalizer" } + - match: { columns.2.type: "text" } + - match: { columns.3.name: "text_normalizer.raw" } + - match: { columns.3.type: "keyword" } + - match: { columns.4.name: "non_indexed" } + - match: { columns.4.type: "text" } + - match: { columns.5.name: "non_indexed.raw" } + - match: { columns.5.type: "keyword" } + + - length: { values: 2 } + - match: { values.0: [ "this", "this", "abc", "abc", "bar", "bar" ]} + - match: { values.1: [ "this is a long text", null, "CamelCase", "camelcase", "foo", "foo"] } + + + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'from test | sort text_normalizer.raw asc | keep text_ignore_above, text_ignore_above.raw, text_normalizer, text_normalizer.raw, non_indexed, non_indexed.raw' + + - match: { columns.0.name: "text_ignore_above" } + - match: { columns.0.type: "text" } + - match: { columns.1.name: "text_ignore_above.raw" } + - match: { columns.1.type: "keyword" } + - match: { columns.2.name: "text_normalizer" } + - match: { columns.2.type: "text" } + - match: { columns.3.name: "text_normalizer.raw" } + - match: { columns.3.type: "keyword" } + - match: { columns.4.name: "non_indexed" } + - match: { columns.4.type: "text" } + - match: { columns.5.name: "non_indexed.raw" } + - match: { columns.5.type: "keyword" } + + - length: { values: 2 } + - match: { values.0: [ "this", "this", "abc", "abc", "bar", "bar" ]} + - match: { values.1: [ "this is a long text", null, "CamelCase", "camelcase", "foo", "foo"] } + + +--- +"non indexed": + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'from test | sort non_indexed asc | keep text_ignore_above, text_ignore_above.raw, text_normalizer, text_normalizer.raw, non_indexed, non_indexed.raw' + + - match: { columns.0.name: "text_ignore_above" } + - match: { columns.0.type: "text" } + - match: { columns.1.name: "text_ignore_above.raw" } + - match: { columns.1.type: "keyword" } + - match: { columns.2.name: "text_normalizer" } + - match: { columns.2.type: "text" } + - match: { columns.3.name: "text_normalizer.raw" } + - match: { columns.3.type: "keyword" } + - match: { columns.4.name: "non_indexed" } + - match: { columns.4.type: "text" } + - match: { columns.5.name: "non_indexed.raw" } + - match: { columns.5.type: "keyword" } + + - length: { values: 2 } + - match: { values.0: [ "this", "this", "abc", "abc", "bar", "bar" ]} + - match: { values.1: [ "this is a long text", null, "CamelCase", "camelcase", "foo", "foo"] } + + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'from test | sort non_indexed desc | keep text_ignore_above, text_ignore_above.raw, text_normalizer, text_normalizer.raw, non_indexed, non_indexed.raw' + + - match: { columns.0.name: "text_ignore_above" } + - match: { columns.0.type: "text" } + - match: { columns.1.name: "text_ignore_above.raw" } + - match: { columns.1.type: "keyword" } + - match: { columns.2.name: "text_normalizer" } + - match: { columns.2.type: "text" } + - match: { columns.3.name: "text_normalizer.raw" } + - match: { columns.3.type: "keyword" } + - match: { columns.4.name: "non_indexed" } + - match: { columns.4.type: "text" } + - match: { columns.5.name: "non_indexed.raw" } + - match: { columns.5.type: "keyword" } + + - length: { values: 2 } + - match: { values.0: [ "this is a long text", null, "CamelCase", "camelcase", "foo", "foo"] } + - match: { values.1: [ "this", "this", "abc", "abc", "bar", "bar" ]} + + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'from test | where non_indexed == "foo" | keep text_ignore_above, text_ignore_above.raw, text_normalizer, text_normalizer.raw, non_indexed, non_indexed.raw' + + - match: { columns.0.name: "text_ignore_above" } + - match: { columns.0.type: "text" } + - match: { columns.1.name: "text_ignore_above.raw" } + - match: { columns.1.type: "keyword" } + - match: { columns.2.name: "text_normalizer" } + - match: { columns.2.type: "text" } + - match: { columns.3.name: "text_normalizer.raw" } + - match: { columns.3.type: "keyword" } + - match: { columns.4.name: "non_indexed" } + - match: { columns.4.type: "text" } + - match: { columns.5.name: "non_indexed.raw" } + - match: { columns.5.type: "keyword" } + + - length: { values: 1 } + - match: { values.0: [ "this is a long text", null, "CamelCase", "camelcase", "foo", "foo"] } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/change_point_agg.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/change_point_agg.yml index 3dc4fb54dbc13..a54d26057ee5b 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/change_point_agg.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/change_point_agg.yml @@ -48,11 +48,11 @@ setup: {"index":{}} {"cost":250,"time":1587501273000,"kind":"changed"} {"index":{}} - {"cost":580,"time":1587501283000,"kind":"changed"} + {"cost":380,"time":1587501283000,"kind":"changed"} {"index":{}} - {"cost":600,"time":1587501293000,"kind":"changed"} + {"cost":450,"time":1587501293000,"kind":"changed"} {"index":{}} - {"cost":600,"time":1587501303000,"kind":"changed"} + {"cost":550,"time":1587501303000,"kind":"changed"} {"index":{}} {"cost":600,"time":1587501313000,"kind":"changed"} {"index":{}} diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/text_expansion_search.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/text_expansion_search.yml index fe25d8957216c..5e29d3cdf2ae6 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/text_expansion_search.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/text_expansion_search.yml @@ -222,12 +222,12 @@ setup: query: weighted_tokens: ml.tokens: - tokens: [{"the": 1.0}, {"octopus":1.0}, {"comforter":1.0}] + tokens: [{"the": 1.0}, {"comforter":1.0}, {"smells":1.0}, {"bad": 1.0}] pruning_config: tokens_freq_ratio_threshold: 1 tokens_weight_threshold: 0.4 only_score_pruned_tokens: false - - match: { hits.total.value: 4 } + - match: { hits.total.value: 5 } - match: { hits.hits.0._source.source_text: "the octopus comforter smells" } --- @@ -243,9 +243,9 @@ setup: query: weighted_tokens: ml.tokens: - tokens: [{"the": 1.0}, {"octopus":1.0}, {"comforter":1.0}] + tokens: [{"the": 1.0}, {"comforter":1.0}, {"smells":1.0}, {"bad": 1.0}] pruning_config: {} - - match: { hits.total.value: 4 } + - match: { hits.total.value: 5 } - match: { hits.hits.0._source.source_text: "the octopus comforter smells" } --- @@ -261,7 +261,7 @@ setup: query: weighted_tokens: ml.tokens: - tokens: [{"the": 1.0}, {"octopus":1.0}, {"comforter":1.0}] + tokens: [{"the": 1.0}, {"comforter":1.0}, {"smells":1.0}, {"bad": 1.0}] pruning_config: tokens_freq_ratio_threshold: 4 tokens_weight_threshold: 0.4 diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/validate.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/validate.yml index 10de6e2c22d9e..1df34a64f860a 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/validate.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/validate.yml @@ -2,7 +2,7 @@ "Test valid job config": - do: ml.validate: - body: > + body: > { "analysis_config": { "bucket_span": "1h", @@ -21,7 +21,7 @@ - do: catch: /.data_description. failed to parse field .format./ ml.validate: - body: > + body: > { "analysis_config": { "bucket_span": "1h", @@ -38,7 +38,7 @@ "Test valid job config with job ID": - do: ml.validate: - body: > + body: > { "job_id": "validate-job-config-with-job-id", "analysis_config": { @@ -58,7 +58,7 @@ - do: catch: /Invalid job_id; '_' can contain lowercase alphanumeric \(a-z and 0-9\), hyphens or underscores; must start and end with alphanumeric/ ml.validate: - body: > + body: > { "job_id": "_", "analysis_config": { @@ -78,7 +78,7 @@ - do: catch: /illegal_argument_exception/ ml.validate: - body: > + body: > { "model_snapshot_id": "wont-create-with-this-setting", "analysis_config" : { @@ -92,7 +92,7 @@ - do: catch: /The job is configured with fields \[model_snapshot_id\] that are illegal to set at job creation/ ml.validate: - body: > + body: > { "model_snapshot_id": "wont-create-with-this-setting", "analysis_config" : { @@ -109,7 +109,7 @@ - do: catch: /illegal_argument_exception.*Duplicate detectors are not allowed/ ml.validate: - body: > + body: > { "analysis_config": { "bucket_span": "1h", @@ -126,7 +126,7 @@ - do: catch: /illegal_argument_exception.*Duplicate detectors are not allowed/ ml.validate: - body: > + body: > { "analysis_config": { "bucket_span": "1h", diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/profiling/10_basic.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/profiling/10_basic.yml new file mode 100644 index 0000000000000..5adbf782f3236 --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/profiling/10_basic.yml @@ -0,0 +1,179 @@ +--- +setup: + - skip: + version: all + reason: AwaitsFix https://github.com/elastic/elasticsearch/issues/104038 + # version: " - 8.12.99" + # reason: "Universal Profiling test infrastructure is available in 8.12+" + + - do: + cluster.put_settings: + body: + persistent: + xpack.profiling.templates.enabled: true + + - do: + profiling.status: + wait_for_resources_created: true + + - do: + bulk: + refresh: true + body: + - {"create": {"_index": "profiling-events-all"}} + - {"Stacktrace.count": [1], "profiling.project.id": ["100"], "os.kernel": ["9.9.9-0"], "tags": ["environment:qa", "region:eu-west-1"], "host.ip": ["192.168.1.2"], "@timestamp": ["1700504427"], "container.name": ["instance-0000000010"], "ecs.version": ["1.12.0"], "Stacktrace.id": ["S07KmaoGhvNte78xwwRbZQ"], "agent.version": ["head-be593ef3-1688111067"], "host.name": ["ip-192-168-1-2"], "host.id": ["8457605156473051743"], "process.thread.name": ["497295213074376"]} + - {"create": {"_index": "profiling-stacktraces", "_id": "S07KmaoGhvNte78xwwRbZQ"}} + - {"Stacktrace": {"frame": {"ids": "634wiWh6F21tPpXr0Zz3mgAAAAAAEfFi8NlMClggx8jaziUTJXlmWAAAAAAAAIYIZSkKN3zNxr0HYuO2pqe5hQAAAAAAwcBwZSkKN3zNxr0HYuO2pqe5hQAAAAAA5ECvZSkKN3zNxr0HYuO2pqe5hQAAAAAA4_9_ZSkKN3zNxr0HYuO2pqe5hQAAAAAAj7b5ZSkKN3zNxr0HYuO2pqe5hQAAAAAAgwXKZSkKN3zNxr0HYuO2pqe5hQAAAAAAgu3UAAAAAAAAV4sAAAAAAAAAHezOBBlhpr8qZgY89pr05YIxi0DTL7hyTAAAAAAAAAALzZZ6VCjFYAFVAKtY0XlyPwAAAAAAAAAFySPx-89oJ6TfXYn-uir7mQAAAAAAAABch4dwrMYlRFRjyfsvjXt4tgAAAAAAAAAg3V-8FLy1GH8nVRceMDeaiwAAAAAAAAABnVB2vvQdnm3M5BpEt6xnFAAAAAAAAAAV4j8yS0qsC_6XfTfMdPp5KQAAAAAAAAAQ9oBnE4xnAvOiOv1q-LbApgAAAAAAAAAEwRQstrBYZ0ShmJnYV-ADrQAAAAAAAAFLAFikCbtP_Dm7iUthjnlnEgAAAAAAAAEq56q5trA0bAF1B-Um6L_rqwAAAAAAAAAGgi_774C-EJhuJfyXXhzVgwAAAAAAAABEgvYbo0YBmE65VwrpTWYalQAAAAAAAAB2tMqbgEmfZJ47YRogSA-gKgAAAAAAAADlCQUIxcdtvT35ZznMVnzc_AAAAAAAAACXN4c5sJszjyVzcx3AmWN8pwAAAAAAAADS_GFFImAT2VE6Ar5VgmaN7QAAAAAAAAHywnSBrxGSumHiAQQABJeNtQAAAAAAAAAkPK6VPfk6aJqBe-5Qji8O5gAAAAAAAAAFEIxfgHbDbI5dElFzd3Ha-QAAAAAAAAAZFq10nEfKWtXEt510UwEUUAAAAAAAAAB7V_QMdmt4RxKxn4ZNgdvkJwAAAAAAAAAReNITicG0MvFr9HQHk70FLAAAAAAAAAAI9j0yGbd8eQNwdRhHZ159OQAAAAAAAAA9vzzPIR5tUnMkJ7d_ITdQRgAAAAAAAAAC6YIeLAztuVSewvuGh8XKXgAAAAAAAAAFIQvpHpp20NHD-0mZNf95oAAAAAAAAABp0vAOoRRxsQcS4vDapC3-mwAAAAAAAAANqnvWBP24iZLcQ-Wi76ZDxQAAAAAAAAAI3X9PCd1tVPhzrMiwigfodgAAAAAAAAAAZSkKN3zNxr0HYuO2pqe5hQAAAAAA52Uf8NlMClggx8jaziUTJXlmWAAAAAAAAQEslHp5_WAgpLy2alrUVab6HAAAAAAAwACLlHp5_WAgpLy2alrUVab6HAAAAAAAAEIGlHp5_WAgpLy2alrUVab6HAAAAAAAFFQelHp5_WAgpLy2alrUVab6HAAAAAAAFErelHp5_WAgpLy2alrUVab6HAAAAAAAFBtp", "types": "CAMfBQIDBQQ"}}, "ecs": {"version": "1.12.0"}} + - {"create": {"_index": "profiling-stackframes", "_id": "8NlMClggx8jaziUTJXlmWAAAAAAAAIYI"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["/build/glibc-sMfBJT/glibc-2.31/nptl/pthread_create.c"], "Stackframe.function.name": ["start_thread"], "Stackframe.line.number": [477]} + - { "create": { "_index": "profiling-stackframes", "_id": "AAAAAAAAV4sAAAAAAAAAHezOBBlhpr8q" } } + - { "ecs.version": "1.12.0", "Stackframe.file.name": [ "" ], "Stackframe.function.name": [ "StubRoutines (1)" ], "Stackframe.line.number": [ 0 ], "Stackframe.function.offset": [ 0 ] } + - { "create": { "_index": "profiling-stackframes", "_id": "ZgY89pr05YIxi0DTL7hyTAAAAAAAAAAL" } } + - { "ecs.version": "1.12.0", "Stackframe.file.name": [ "Thread.java" ], "Stackframe.function.name": [ "void java.lang.Thread.run()" ], "Stackframe.line.number": [ 833 ], "Stackframe.function.offset": [ 1 ] } + - {"create": {"_index": "profiling-stackframes", "_id": "zZZ6VCjFYAFVAKtY0XlyPwAAAAAAAAAF"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["ThreadPoolExecutor.java"], "Stackframe.function.name": ["void java.util.concurrent.ThreadPoolExecutor$Worker.run()"], "Stackframe.line.number": [635], "Stackframe.function.offset": [0]} + - {"create": {"_index": "profiling-stackframes", "_id": "ySPx-89oJ6TfXYn-uir7mQAAAAAAAABc"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["ThreadPoolExecutor.java"], "Stackframe.function.name": ["void java.util.concurrent.ThreadPoolExecutor.runWorker(java.util.concurrent.ThreadPoolExecutor$Worker)"], "Stackframe.line.number": [1136], "Stackframe.function.offset": [20]} + - {"create": {"_index": "profiling-stackframes", "_id": "h4dwrMYlRFRjyfsvjXt4tgAAAAAAAAAg"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["PrioritizedEsThreadPoolExecutor.java"], "Stackframe.function.name": ["void org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run()"], "Stackframe.line.number": [225], "Stackframe.function.offset": [6]} + - {"create": {"_index": "profiling-stackframes", "_id": "3V-8FLy1GH8nVRceMDeaiwAAAAAAAAAB"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["PrioritizedEsThreadPoolExecutor.java"], "Stackframe.function.name": ["void org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(java.lang.Runnable)"], "Stackframe.line.number": [262], "Stackframe.function.offset": [0]} + - {"create": {"_index": "profiling-stackframes", "_id": "nVB2vvQdnm3M5BpEt6xnFAAAAAAAAAAV"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["ThreadContext.java"], "Stackframe.function.name": ["void org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run()"], "Stackframe.line.number": [718], "Stackframe.function.offset": [2]} + - {"create": {"_index": "profiling-stackframes", "_id": "4j8yS0qsC_6XfTfMdPp5KQAAAAAAAAAQ"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["ClusterApplierService.java"], "Stackframe.function.name": ["void org.elasticsearch.cluster.service.ClusterApplierService$UpdateTask.run()"], "Stackframe.line.number": [154], "Stackframe.function.offset": [0]} + - {"create": {"_index": "profiling-stackframes", "_id": "9oBnE4xnAvOiOv1q-LbApgAAAAAAAAAE"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["ClusterApplierService.java"], "Stackframe.function.name": ["void org.elasticsearch.cluster.service.ClusterApplierService.access$000(org.elasticsearch.cluster.service.ClusterApplierService, java.lang.String, java.util.function.Function, org.elasticsearch.action.ActionListener)"], "Stackframe.line.number": [56], "Stackframe.function.offset": [0]} + - {"create": {"_index": "profiling-stackframes", "_id": "wRQstrBYZ0ShmJnYV-ADrQAAAAAAAAFL"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["ClusterApplierService.java"], "Stackframe.function.name": ["void org.elasticsearch.cluster.service.ClusterApplierService.runTask(java.lang.String, java.util.function.Function, org.elasticsearch.action.ActionListener)"], "Stackframe.line.number": [428], "Stackframe.function.offset": [44]} + - {"create": {"_index": "profiling-stackframes", "_id": "AFikCbtP_Dm7iUthjnlnEgAAAAAAAAEq"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["ClusterApplierService.java"], "Stackframe.function.name": ["void org.elasticsearch.cluster.service.ClusterApplierService.applyChanges(org.elasticsearch.cluster.ClusterState, org.elasticsearch.cluster.ClusterState, java.lang.String, org.elasticsearch.cluster.service.ClusterApplierRecordingService$Recorder)"], "Stackframe.line.number": [503], "Stackframe.function.offset": [25]} + - {"create": {"_index": "profiling-stackframes", "_id": "56q5trA0bAF1B-Um6L_rqwAAAAAAAAAG"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["ClusterApplierService.java"], "Stackframe.function.name": ["void org.elasticsearch.cluster.service.ClusterApplierService.callClusterStateAppliers(org.elasticsearch.cluster.ClusterChangedEvent, org.elasticsearch.cluster.service.ClusterApplierRecordingService$Recorder)"], "Stackframe.line.number": [539], "Stackframe.function.offset": [0]} + - {"create": {"_index": "profiling-stackframes", "_id": "gi_774C-EJhuJfyXXhzVgwAAAAAAAABE"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["ClusterApplierService.java"], "Stackframe.function.name": ["void org.elasticsearch.cluster.service.ClusterApplierService.callClusterStateAppliers(org.elasticsearch.cluster.ClusterChangedEvent, org.elasticsearch.cluster.service.ClusterApplierRecordingService$Recorder, java.util.Collection)"], "Stackframe.line.number": [553], "Stackframe.function.offset": [4]} + - {"create": {"_index": "profiling-stackframes", "_id": "gvYbo0YBmE65VwrpTWYalQAAAAAAAAB2"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["IndicesClusterStateService.java"], "Stackframe.function.name": ["void org.elasticsearch.indices.cluster.IndicesClusterStateService.applyClusterState(org.elasticsearch.cluster.ClusterChangedEvent)"], "Stackframe.line.number": [231], "Stackframe.function.offset": [31]} + - {"create": {"_index": "profiling-stackframes", "_id": "tMqbgEmfZJ47YRogSA-gKgAAAAAAAADl"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["IndicesClusterStateService.java"], "Stackframe.function.name": ["void org.elasticsearch.indices.cluster.IndicesClusterStateService.createOrUpdateShards(org.elasticsearch.cluster.ClusterState)"], "Stackframe.line.number": [556], "Stackframe.function.offset": [18]} + - {"create": {"_index": "profiling-stackframes", "_id": "CQUIxcdtvT35ZznMVnzc_AAAAAAAAACX"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["IndicesClusterStateService.java"], "Stackframe.function.name": ["void org.elasticsearch.indices.cluster.IndicesClusterStateService.updateShard(org.elasticsearch.cluster.node.DiscoveryNodes, org.elasticsearch.cluster.routing.ShardRouting, org.elasticsearch.indices.cluster.IndicesClusterStateService$Shard, org.elasticsearch.cluster.routing.RoutingTable, org.elasticsearch.cluster.ClusterState)"], "Stackframe.line.number": [614], "Stackframe.function.offset": [14]} + - {"create": {"_index": "profiling-stackframes", "_id": "N4c5sJszjyVzcx3AmWN8pwAAAAAAAADS"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["IndexShard.java"], "Stackframe.function.name": ["void org.elasticsearch.index.shard.IndexShard.updateShardState(org.elasticsearch.cluster.routing.ShardRouting, long, java.util.function.BiConsumer, long, java.util.Set, org.elasticsearch.cluster.routing.IndexShardRoutingTable)"], "Stackframe.line.number": [535], "Stackframe.function.offset": [24]} + - {"create": {"_index": "profiling-stackframes", "_id": "_GFFImAT2VE6Ar5VgmaN7QAAAAAAAAHy"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["ReplicationTracker.java"], "Stackframe.function.name": ["void org.elasticsearch.index.seqno.ReplicationTracker.updateFromMaster(long, java.util.Set, org.elasticsearch.cluster.routing.IndexShardRoutingTable)"], "Stackframe.line.number": [1198], "Stackframe.function.offset": [47]} + - {"create": {"_index": "profiling-stackframes", "_id": "wnSBrxGSumHiAQQABJeNtQAAAAAAAAAk"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["ReplicationTracker.java"], "Stackframe.function.name": ["void org.elasticsearch.index.seqno.ReplicationTracker.updateReplicationGroupAndNotify()"], "Stackframe.line.number": [994], "Stackframe.function.offset": [3]} + - {"create": {"_index": "profiling-stackframes", "_id": "PK6VPfk6aJqBe-5Qji8O5gAAAAAAAAAF"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["PendingReplicationActions.java"], "Stackframe.function.name": ["void org.elasticsearch.action.support.replication.PendingReplicationActions.accept(java.lang.Object)"], "Stackframe.line.number": [25], "Stackframe.function.offset": [0]} + - {"create": {"_index": "profiling-stackframes", "_id": "EIxfgHbDbI5dElFzd3Ha-QAAAAAAAAAZ"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["PendingReplicationActions.java"], "Stackframe.function.name": ["void org.elasticsearch.action.support.replication.PendingReplicationActions.accept(org.elasticsearch.index.shard.ReplicationGroup)"], "Stackframe.line.number": [71], "Stackframe.function.offset": [3]} + - {"create": {"_index": "profiling-stackframes", "_id": "Fq10nEfKWtXEt510UwEUUAAAAAAAAAB7"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["PendingReplicationActions.java"], "Stackframe.function.name": ["void org.elasticsearch.action.support.replication.PendingReplicationActions.acceptNewTrackedAllocationIds(java.util.Set)"], "Stackframe.line.number": [95], "Stackframe.function.offset": [10]} + - {"create": {"_index": "profiling-stackframes", "_id": "V_QMdmt4RxKxn4ZNgdvkJwAAAAAAAAAR"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["PendingReplicationActions.java"], "Stackframe.function.name": ["void org.elasticsearch.action.support.replication.PendingReplicationActions.cancelActions(java.util.ArrayList, java.lang.String)"], "Stackframe.line.number": [108], "Stackframe.function.offset": [1]} + - {"create": {"_index": "profiling-stackframes", "_id": "eNITicG0MvFr9HQHk70FLAAAAAAAAAAI"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["EsThreadPoolExecutor.java"], "Stackframe.function.name": ["void org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor.execute(java.lang.Runnable)"], "Stackframe.line.number": [95], "Stackframe.function.offset": [2]} + - {"create": {"_index": "profiling-stackframes", "_id": "9j0yGbd8eQNwdRhHZ159OQAAAAAAAAA9"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["ThreadPoolExecutor.java"], "Stackframe.function.name": ["void java.util.concurrent.ThreadPoolExecutor.execute(java.lang.Runnable)"], "Stackframe.line.number": [1357], "Stackframe.function.offset": [28]} + - {"create": {"_index": "profiling-stackframes", "_id": "vzzPIR5tUnMkJ7d_ITdQRgAAAAAAAAAC"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["EsExecutors.java"], "Stackframe.function.name": ["boolean org.elasticsearch.common.util.concurrent.EsExecutors$ExecutorScalingQueue.offer(java.lang.Object)"], "Stackframe.line.number": [363], "Stackframe.function.offset": [0]} + - {"create": {"_index": "profiling-stackframes", "_id": "6YIeLAztuVSewvuGh8XKXgAAAAAAAAAF"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["LinkedTransferQueue.java"], "Stackframe.function.name": ["boolean java.util.concurrent.LinkedTransferQueue.tryTransfer(java.lang.Object)"], "Stackframe.line.number": [1241], "Stackframe.function.offset": [0]} + - {"create": {"_index": "profiling-stackframes", "_id": "IQvpHpp20NHD-0mZNf95oAAAAAAAAABp"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["LinkedTransferQueue.java"], "Stackframe.function.name": ["java.lang.Object java.util.concurrent.LinkedTransferQueue.xfer(java.lang.Object, boolean, int, long)"], "Stackframe.line.number": [605], "Stackframe.function.offset": [10]} + - { "create": { "_index": "profiling-stackframes", "_id": "0vAOoRRxsQcS4vDapC3-mwAAAAAAAAAN" } } + - { "ecs.version": "1.12.0", "Stackframe.file.name": [ "LinkedTransferQueue.java" ], "Stackframe.function.name": [ "boolean java.util.concurrent.LinkedTransferQueue$Node.tryMatch(java.lang.Object, java.lang.Object)" ], "Stackframe.line.number": [ 448 ], "Stackframe.function.offset": [ 1 ] } + - {"create": {"_index": "profiling-stackframes", "_id": "qnvWBP24iZLcQ-Wi76ZDxQAAAAAAAAAI"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["LockSupport.java"], "Stackframe.function.name": ["void java.util.concurrent.locks.LockSupport.unpark(java.lang.Thread)"], "Stackframe.line.number": [177], "Stackframe.function.offset": [1]} + - {"create": {"_index": "profiling-stackframes", "_id": "3X9PCd1tVPhzrMiwigfodgAAAAAAAAAA"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["Unsafe.java"], "Stackframe.function.name": ["void jdk.internal.misc.Unsafe.unpark(java.lang.Object)"], "Stackframe.line.number": [0], "Stackframe.function.offset": [0]} + - {"create": {"_index": "profiling-stackframes", "_id": "lHp5_WAgpLy2alrUVab6HAAAAAAAwACL"}} + - {"ecs.version": "1.12.0", "Stackframe.function.name": ["entry_SYSCALL_64_after_hwframe"]} + - {"create": {"_index": "profiling-stackframes", "_id": "lHp5_WAgpLy2alrUVab6HAAAAAAAAEIG"}} + - {"ecs.version": "1.12.0", "Stackframe.function.name": ["do_syscall_64"]} + - {"create": {"_index": "profiling-stackframes", "_id": "lHp5_WAgpLy2alrUVab6HAAAAAAAFFQe"}} + - {"ecs.version": "1.12.0", "Stackframe.function.name": ["__x64_sys_futex"]} + - {"create": {"_index": "profiling-stackframes", "_id": "lHp5_WAgpLy2alrUVab6HAAAAAAAFEre"}} + - {"ecs.version": "1.12.0", "Stackframe.function.name": ["do_futex"]} + - {"create": {"_index": "profiling-stackframes", "_id": "lHp5_WAgpLy2alrUVab6HAAAAAAAFBtp"}} + - {"ecs.version": "1.12.0", "Stackframe.function.name": ["futex_wake"]} + - { "create": { "_index": "profiling-executables", "_id": "634wiWh6F21tPpXr0Zz3mg" } } + - { "@timestamp": "1698019200", "Executable": { "build": { "id": "9fdb74e7b217d06c93172a8243f8547f947ee6d1" }, "file": { "name": "libc-2.31.so" } }, "Symbolization": { "next_time": "4851892087" }, "ecs": { "version": "1.12.0" } } + - {"create": {"_index": "profiling-executables", "_id": "8NlMClggx8jaziUTJXlmWA"}} + - {"@timestamp": "1698019200", "Executable": {"build": {"id": "f0983025f0e0f327a6da752ff4ffa675e0be393f"}, "file": {"name": "libpthread-2.31.so"}}, "Symbolization": {"next_time": "4851892090"}, "ecs": {"version": "1.12.0"}} + - {"create": {"_index": "profiling-executables", "_id": "lHp5_WAgpLy2alrUVab6HA"}} + - {"@timestamp": "1698624000", "Executable": {"build": {"id": "c5f89ea1c68710d2a493bb604c343a92c4f8ddeb"}, "file": {"name": "vmlinux"}}, "Symbolization": {"next_time": "4852491791"}, "ecs": {"version": "1.12.0"}} + - {"create": {"_index": "profiling-hosts", "_id": "eLH27YsBj2lLi3tJYlvr"}} + - {"profiling.project.id": 100, "host.id": "8457605156473051743", "@timestamp": 1700504426, "ecs.version": "1.12.0", "profiling.agent.build_timestamp": 1688111067, "profiling.instance.private_ipv4s": ["192.168.1.2"], "ec2.instance_life_cycle": "on-demand", "profiling.agent.config.map_scale_factor": 0, "ec2.instance_type": "i3.2xlarge", "profiling.host.ip": "192.168.1.2", "profiling.agent.config.bpf_log_level": 0, "profiling.host.sysctl.net.core.bpf_jit_enable": 1, "profiling.agent.config.file": "/etc/prodfiler/prodfiler.conf", "ec2.local_ipv4": "192.168.1.2", "profiling.agent.config.no_kernel_version_check": false, "profiling.host.machine": "x86_64", "profiling.host.tags": ["cloud_provider:aws", "cloud_environment:qa", "cloud_region:eu-west-1"], "profiling.agent.config.probabilistic_threshold": 100, "profiling.agent.config.disable_tls": false, "profiling.agent.config.tracers": "all", "profiling.agent.start_time": 1700090045589, "profiling.agent.config.max_elements_per_interval": 800, "ec2.placement.region": "eu-west-1", "profiling.agent.config.present_cpu_cores": 8, "profiling.host.kernel_version": "9.9.9-0-aws", "profiling.agent.config.bpf_log_size": 65536, "profiling.agent.config.known_traces_entries": 65536, "profiling.host.sysctl.kernel.unprivileged_bpf_disabled": 1, "profiling.agent.config.verbose": false, "profiling.agent.config.probabilistic_interval": "1m0s", "ec2.placement.availability_zone_id": "euw1-az1", "ec2.security_groups": "", "ec2.local_hostname": "ip-192-168-1-2.eu-west-1.compute.internal", "ec2.placement.availability_zone": "eu-west-1c", "profiling.agent.config.upload_symbols": false, "profiling.host.sysctl.kernel.bpf_stats_enabled": 0, "profiling.host.name": "ip-192-168-1-2", "ec2.mac": "00:11:22:33:44:55", "profiling.host.kernel_proc_version": "Linux version 9.9.9-0-aws", "profiling.agent.config.cache_directory": "/var/cache/optimyze/", "profiling.agent.version": "v8.12.0", "ec2.hostname": "ip-192-168-1-2.eu-west-1.compute.internal", "profiling.agent.config.elastic_mode": false, "ec2.ami_id": "ami-aaaaaaaaaaa", "ec2.instance_id": "i-0b999999999999999" } + +--- +"Test Status": + - do: + profiling.status: {} + + - match: {profiling.enabled: true} + - match: {resource_management.enabled: true} + - match: {resources.created: true} + - match: {resources.pre_8_9_1_data: false} + - match: {resources.has_data: true} + +--- +"Test get stacktraces": + - do: + profiling.stacktraces: + body: > + { + "sample_size": 20000, + "requested_duration": 86400, + "query": { + "bool": { + "filter": [ + { + "range": { + "@timestamp": { + "gte": "2023-11-20", + "lt": "2023-11-21", + "format": "yyyy-MM-dd" + } + } + } + ] + } + } + } + - match: { stack_traces.S07KmaoGhvNte78xwwRbZQ.count: 1} + +--- +"Test flamegraph": + - do: + profiling.flamegraph: + body: > + { + "sample_size": 20000, + "requested_duration": 86400, + "query": { + "bool": { + "filter": [ + { + "range": { + "@timestamp": { + "gte": "2023-11-20", + "lt": "2023-11-21", + "format": "yyyy-MM-dd" + } + } + } + ] + } + } + } + - match: { Size: 47} + +--- +teardown: + - do: + cluster.put_settings: + body: + persistent: + xpack.profiling.templates.enabled: false diff --git a/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/StackTemplateRegistryTests.java b/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/StackTemplateRegistryTests.java index b6fd2e8dd1a53..2caa820c51645 100644 --- a/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/StackTemplateRegistryTests.java +++ b/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/StackTemplateRegistryTests.java @@ -13,8 +13,8 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.admin.indices.template.put.PutComponentTemplateAction; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; -import org.elasticsearch.action.ingest.PutPipelineAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; +import org.elasticsearch.action.ingest.PutPipelineTransportAction; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; @@ -49,7 +49,8 @@ import org.elasticsearch.xpack.core.ilm.LifecyclePolicy; import org.elasticsearch.xpack.core.ilm.LifecyclePolicyMetadata; import org.elasticsearch.xpack.core.ilm.OperationMode; -import org.elasticsearch.xpack.core.ilm.action.PutLifecycleAction; +import org.elasticsearch.xpack.core.ilm.action.ILMActions; +import org.elasticsearch.xpack.core.ilm.action.PutLifecycleRequest; import org.elasticsearch.xpack.core.template.IngestPipelineConfig; import org.junit.After; import org.junit.Before; @@ -176,11 +177,10 @@ public void testThatNonExistingPoliciesAreAddedImmediately() throws Exception { AtomicInteger calledTimes = new AtomicInteger(0); client.setVerifier((action, request, listener) -> { - if (action instanceof PutLifecycleAction) { + if (action == ILMActions.PUT) { calledTimes.incrementAndGet(); - assertThat(action, instanceOf(PutLifecycleAction.class)); - assertThat(request, instanceOf(PutLifecycleAction.Request.class)); - final PutLifecycleAction.Request putRequest = (PutLifecycleAction.Request) request; + assertThat(request, instanceOf(PutLifecycleRequest.class)); + final PutLifecycleRequest putRequest = (PutLifecycleRequest) request; assertThat( putRequest.getPolicy().getName(), anyOf( @@ -199,7 +199,7 @@ public void testThatNonExistingPoliciesAreAddedImmediately() throws Exception { } else if (action instanceof PutComponentTemplateAction) { // Ignore this, it's verified in another test return new StackTemplateRegistryTests.TestPutIndexTemplateResponse(true); - } else if (action instanceof PutComposableIndexTemplateAction) { + } else if (action == TransportPutComposableIndexTemplateAction.TYPE) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; } else { @@ -226,10 +226,10 @@ public void testPolicyAlreadyExists() { if (action instanceof PutComponentTemplateAction) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; - } else if (action instanceof PutComposableIndexTemplateAction) { + } else if (action == TransportPutComposableIndexTemplateAction.TYPE) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; - } else if (action instanceof PutLifecycleAction) { + } else if (action == ILMActions.PUT) { fail("if the policy already exists it should not be re-put"); } else { fail("client called with unexpected request: " + request.toString()); @@ -247,17 +247,17 @@ public void testThatIndependentPipelinesAreAdded() throws Exception { AtomicInteger calledTimes = new AtomicInteger(0); client.setVerifier((action, request, listener) -> { - if (action instanceof PutPipelineAction) { + if (action == PutPipelineTransportAction.TYPE) { calledTimes.incrementAndGet(); return AcknowledgedResponse.TRUE; } if (action instanceof PutComponentTemplateAction) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; - } else if (action instanceof PutLifecycleAction) { + } else if (action == ILMActions.PUT) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; - } else if (action instanceof PutComposableIndexTemplateAction) { + } else if (action == TransportPutComposableIndexTemplateAction.TYPE) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; } else { @@ -297,10 +297,10 @@ public void testPolicyAlreadyExistsButDiffers() throws IOException { if (action instanceof PutComponentTemplateAction) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; - } else if (action instanceof PutComposableIndexTemplateAction) { + } else if (action == TransportPutComposableIndexTemplateAction.TYPE) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; - } else if (action instanceof PutLifecycleAction) { + } else if (action == ILMActions.PUT) { fail("if the policy already exists it should not be re-put"); } else { fail("client called with unexpected request: " + request.toString()); @@ -386,13 +386,14 @@ public void testMissingNonRequiredTemplates() throws Exception { if (action instanceof PutComponentTemplateAction) { // Ignore such return AcknowledgedResponse.TRUE; - } else if (action instanceof PutLifecycleAction) { + } else if (action == ILMActions.PUT) { // Ignore such return AcknowledgedResponse.TRUE; - } else if (action instanceof PutComposableIndexTemplateAction) { + } else if (action == TransportPutComposableIndexTemplateAction.TYPE) { calledTimes.incrementAndGet(); - assertThat(request, instanceOf(PutComposableIndexTemplateAction.Request.class)); - PutComposableIndexTemplateAction.Request putComposableTemplateRequest = (PutComposableIndexTemplateAction.Request) request; + assertThat(request, instanceOf(TransportPutComposableIndexTemplateAction.Request.class)); + TransportPutComposableIndexTemplateAction.Request putComposableTemplateRequest = + (TransportPutComposableIndexTemplateAction.Request) request; assertThat(putComposableTemplateRequest.name(), equalTo("syslog")); ComposableIndexTemplate composableIndexTemplate = putComposableTemplateRequest.indexTemplate(); assertThat(composableIndexTemplate.composedOf(), hasSize(2)); @@ -431,10 +432,10 @@ public void testSameOrHigherVersionTemplateNotUpgraded() { if (action instanceof PutComponentTemplateAction) { fail("template should not have been re-installed"); return null; - } else if (action instanceof PutLifecycleAction) { + } else if (action == ILMActions.PUT) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; - } else if (action instanceof PutComposableIndexTemplateAction) { + } else if (action == TransportPutComposableIndexTemplateAction.TYPE) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; } else { @@ -586,10 +587,10 @@ private ActionResponse verifyComponentTemplateInstalled( assertThat(putRequest.componentTemplate().version(), equalTo((long) StackTemplateRegistry.REGISTRY_VERSION)); assertNotNull(listener); return new TestPutIndexTemplateResponse(true); - } else if (action instanceof PutLifecycleAction) { + } else if (action == ILMActions.PUT) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; - } else if (action instanceof PutComposableIndexTemplateAction) { + } else if (action == TransportPutComposableIndexTemplateAction.TYPE) { // Ignore this, it's verified in another test return AcknowledgedResponse.TRUE; } else { diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/NdJsonTextStructureFinder.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/NdJsonTextStructureFinder.java index 2b8f3c678e9d5..e43b2cfdc96d3 100644 --- a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/NdJsonTextStructureFinder.java +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/NdJsonTextStructureFinder.java @@ -44,9 +44,10 @@ static NdJsonTextStructureFinder makeNdJsonTextStructureFinder( List sampleMessages = Arrays.asList(sample.split("\n")); for (String sampleMessage : sampleMessages) { - XContentParser parser = jsonXContent.createParser(XContentParserConfiguration.EMPTY, sampleMessage); - sampleRecords.add(parser.mapOrdered()); - timeoutChecker.check("NDJSON parsing"); + try (XContentParser parser = jsonXContent.createParser(XContentParserConfiguration.EMPTY, sampleMessage)) { + sampleRecords.add(parser.mapOrdered()); + timeoutChecker.check("NDJSON parsing"); + } } TextStructure.Builder structureBuilder = new TextStructure.Builder(TextStructure.Format.NDJSON).setCharset(charsetName) diff --git a/x-pack/plugin/transform/qa/multi-cluster-tests-with-security/build.gradle b/x-pack/plugin/transform/qa/multi-cluster-tests-with-security/build.gradle index ef34db62e5e03..8f129789d46b7 100644 --- a/x-pack/plugin/transform/qa/multi-cluster-tests-with-security/build.gradle +++ b/x-pack/plugin/transform/qa/multi-cluster-tests-with-security/build.gradle @@ -2,6 +2,7 @@ import org.elasticsearch.gradle.internal.test.RestIntegTestTask import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionProperties import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE +import org.elasticsearch.gradle.internal.info.BuildParams apply plugin: 'elasticsearch.internal-testclusters' apply plugin: 'elasticsearch.standalone-rest-test' @@ -53,16 +54,29 @@ testClusters.register('mixed-cluster') { tasks.register('remote-cluster', RestIntegTestTask) { mustRunAfter("precommit") systemProperty 'tests.rest.suite', 'remote_cluster' + maybeDisableForFips(it) } tasks.register('mixed-cluster', RestIntegTestTask) { dependsOn 'remote-cluster' useCluster remoteCluster systemProperty 'tests.rest.suite', 'multi_cluster' + maybeDisableForFips(it) } tasks.register("integTest") { dependsOn 'mixed-cluster' + maybeDisableForFips(it) } tasks.named("check").configure { dependsOn("integTest") } + +//TODO: remove with version 8.14. A new FIPS setting was added in 8.13. Since FIPS configures all test clusters and this specific integTest uses +// the previous minor version, that setting is not available when running in FIPS until 8.14. +def maybeDisableForFips(task) { + if (BuildParams.inFipsJvm) { + if(Version.fromString(project.version).before(Version.fromString('8.14.0'))) { + task.enabled = false + } + } +} diff --git a/x-pack/plugin/transform/qa/multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/continuous/TransformContinuousIT.java b/x-pack/plugin/transform/qa/multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/continuous/TransformContinuousIT.java index 92114f635848b..cd4b2004b02f7 100644 --- a/x-pack/plugin/transform/qa/multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/continuous/TransformContinuousIT.java +++ b/x-pack/plugin/transform/qa/multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/continuous/TransformContinuousIT.java @@ -136,6 +136,7 @@ public void removePipelines() throws IOException { deletePipeline(ContinuousTestCase.INGEST_PIPELINE); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/97263") public void testContinuousEvents() throws Exception { String sourceIndexName = ContinuousTestCase.CONTINUOUS_EVENTS_SOURCE_INDEX; DecimalFormat numberFormat = new DecimalFormat("000", new DecimalFormatSymbols(Locale.ROOT)); diff --git a/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformDestIndexIT.java b/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformDestIndexIT.java index 49ee0f8bbd9a9..5d46c9933f48a 100644 --- a/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformDestIndexIT.java +++ b/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformDestIndexIT.java @@ -114,8 +114,16 @@ public void testTransformDestIndexAliases() throws Exception { assertAliases(destIndex2, destAliasAll, destAliasLatest); } - public void testTransformDestIndexCreatedDuringUpdate() throws Exception { - String transformId = "test_dest_index_on_update"; + public void testTransformDestIndexCreatedDuringUpdate_NoDeferValidation() throws Exception { + testTransformDestIndexCreatedDuringUpdate(false); + } + + public void testTransformDestIndexCreatedDuringUpdate_DeferValidation() throws Exception { + testTransformDestIndexCreatedDuringUpdate(true); + } + + private void testTransformDestIndexCreatedDuringUpdate(boolean deferValidation) throws Exception { + String transformId = "test_dest_index_on_update" + (deferValidation ? "-defer" : ""); String destIndex = transformId + "-dest"; assertFalse(indexExists(destIndex)); @@ -139,7 +147,7 @@ public void testTransformDestIndexCreatedDuringUpdate() throws Exception { // Note that at this point the destination index could have already been created by the indexing process of the running transform // but the update code should cope with this situation. updateTransform(transformId, """ - { "settings": { "max_page_search_size": 123 } }"""); + { "settings": { "max_page_search_size": 123 } }""", deferValidation); // Verify that the destination index now exists assertTrue(indexExists(destIndex)); diff --git a/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformRestTestCase.java b/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformRestTestCase.java index 6d14c74f8fea9..ce1178e760a6c 100644 --- a/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformRestTestCase.java +++ b/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformRestTestCase.java @@ -399,13 +399,16 @@ protected void createPivotReviewsTransform(String transformId, String transformI createPivotReviewsTransform(transformId, transformIndex, query, pipeline, null, null, authHeader, null, REVIEWS_INDEX_NAME); } - protected void updateTransform(String transformId, String update) throws IOException { + protected void updateTransform(String transformId, String update, boolean deferValidation) throws IOException { final Request updateTransformRequest = createRequestWithSecondaryAuth( "POST", getTransformEndpoint() + transformId + "/_update", null, null ); + if (deferValidation) { + updateTransformRequest.addParameter("defer_validation", String.valueOf(deferValidation)); + } updateTransformRequest.setJsonEntity(update); client().performRequest(updateTransformRequest); diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/DefaultTransformExtension.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/DefaultTransformExtension.java index 9cccbade339dc..ea9260f555905 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/DefaultTransformExtension.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/DefaultTransformExtension.java @@ -9,9 +9,12 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; public class DefaultTransformExtension implements TransformExtension { + private static final TimeValue MIN_FREQUENCY = TimeValue.timeValueSeconds(1); + @Override public boolean includeNodeInfo() { return true; @@ -33,4 +36,9 @@ public Settings getTransformDestinationIndexSettings() { .put(IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS, "0-1") .build(); } + + @Override + public TimeValue getMinFrequency() { + return MIN_FREQUENCY; + } } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java index 61cc0e2c072ad..98c95c5a9803a 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java @@ -243,7 +243,12 @@ public Collection createComponents(PluginServices services) { configManager, auditor ); - TransformScheduler scheduler = new TransformScheduler(clock, services.threadPool(), settings); + TransformScheduler scheduler = new TransformScheduler( + clock, + services.threadPool(), + settings, + getTransformExtension().getMinFrequency() + ); scheduler.start(); transformServices.set(new TransformServices(configManager, checkpointService, auditor, scheduler)); diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/TransformExtension.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/TransformExtension.java index c919f4dd4c550..4794f3c86f259 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/TransformExtension.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/TransformExtension.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.transform; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; public interface TransformExtension { @@ -20,4 +21,10 @@ public interface TransformExtension { * source settings. */ Settings getTransformDestinationIndexSettings(); + + // TODO(jkuipers): remove this default implementation after the ServerlessTransformPlugin + // in the elasticsearch-serverless project is updated. + default TimeValue getMinFrequency() { + return TimeValue.timeValueSeconds(1); + } } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportValidateTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportValidateTransformAction.java index eecff0722ddef..0f9c8e6755bee 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportValidateTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportValidateTransformAction.java @@ -38,6 +38,7 @@ import java.util.Map; +import static java.util.Collections.emptyMap; import static org.elasticsearch.core.Strings.format; public class TransportValidateTransformAction extends HandledTransportAction { @@ -127,7 +128,7 @@ protected void doExecute(Task task, Request request, ActionListener li // <4> Deduce destination index mappings ActionListener validateQueryListener = ActionListener.wrap(validateQueryResponse -> { if (request.isDeferValidation()) { - deduceMappingsListener.onResponse(null); + deduceMappingsListener.onResponse(emptyMap()); } else { function.deduceMappings(client, config.getHeaders(), config.getSource(), deduceMappingsListener); } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/notifications/TransformAuditor.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/notifications/TransformAuditor.java index d06b1a4ed106b..4fa19450a900c 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/notifications/TransformAuditor.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/notifications/TransformAuditor.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.transform.notifications; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; @@ -41,7 +41,7 @@ public TransformAuditor(Client client, String nodeName, ClusterService clusterSe TransformInternalIndexConstants.AUDIT_INDEX, () -> { try { - return new PutComposableIndexTemplateAction.Request(TransformInternalIndexConstants.AUDIT_INDEX).indexTemplate( + return new TransportPutComposableIndexTemplateAction.Request(TransformInternalIndexConstants.AUDIT_INDEX).indexTemplate( ComposableIndexTemplate.builder() .template(TransformInternalIndex.getAuditIndexTemplate()) .version((long) TransformConfigVersion.CURRENT.id()) diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/IndexBasedTransformConfigManager.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/IndexBasedTransformConfigManager.java index b3cdea8ee80d8..843dee43706f8 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/IndexBasedTransformConfigManager.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/IndexBasedTransformConfigManager.java @@ -37,6 +37,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.IndexNotFoundException; @@ -69,7 +70,6 @@ import org.elasticsearch.xpack.core.transform.transforms.persistence.TransformInternalIndexConstants; import java.io.IOException; -import java.io.InputStream; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -553,12 +553,7 @@ public void expandTransformIds( Set ids = Sets.newLinkedHashSetWithExpectedSize(searchResponse.getHits().getHits().length); Set configs = Sets.newLinkedHashSetWithExpectedSize(searchResponse.getHits().getHits().length); for (SearchHit hit : searchResponse.getHits().getHits()) { - BytesReference source = hit.getSourceRef(); - try ( - InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, stream) - ) { + try (XContentParser parser = createParser(hit)) { TransformConfig config = TransformConfig.fromXContent(parser, null, true); if (ids.add(config.getId())) { configs.add(config); @@ -592,6 +587,18 @@ public void expandTransformIds( ); } + private XContentParser createParser(BytesReference source) throws IOException { + return XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG.withRegistry(xContentRegistry), + source, + XContentType.JSON + ); + } + + private XContentParser createParser(SearchHit hit) throws IOException { + return createParser(hit.getSourceRef()); + } + @Override public void getAllTransformIds(TimeValue timeout, ActionListener> listener) { expandAllTransformIds( @@ -770,12 +777,7 @@ public void getTransformStoredDoc( return; } SearchHit searchHit = searchResponse.getHits().getHits()[0]; - BytesReference source = searchHit.getSourceRef(); - try ( - InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, stream) - ) { + try (XContentParser parser = createParser(searchHit)) { resultListener.onResponse( Tuple.tuple(TransformStoredDoc.fromXContent(parser), SeqNoPrimaryTermAndIndex.fromSearchHit(searchHit)) ); @@ -825,12 +827,7 @@ public void getTransformStoredDocs( // skip old versions if (hit.getId().equals(previousId) == false) { previousId = hit.getId(); - BytesReference source = hit.getSourceRef(); - try ( - InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream) - ) { + try (XContentParser parser = createParser(hit)) { stats.add(TransformStoredDoc.fromXContent(parser)); } catch (IOException e) { listener.onFailure(new ElasticsearchParseException("failed to parse transform stats from search hit", e)); @@ -861,11 +858,7 @@ private void parseTransformLenientlyFromSource( String transformId, ActionListener transformListener ) { - try ( - InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, stream) - ) { + try (XContentParser parser = createParser(source)) { transformListener.onResponse(TransformConfig.fromXContent(parser, transformId, true)); } catch (Exception e) { logger.error(TransformMessages.getMessage(TransformMessages.FAILED_TO_PARSE_TRANSFORM_CONFIGURATION, transformId), e); @@ -878,11 +871,7 @@ private void parseCheckpointsLenientlyFromSource( String transformId, ActionListener transformListener ) { - try ( - InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, stream) - ) { + try (XContentParser parser = createParser(source)) { transformListener.onResponse(TransformCheckpoint.fromXContent(parser, true)); } catch (Exception e) { logger.error(TransformMessages.getMessage(TransformMessages.FAILED_TO_PARSE_TRANSFORM_CHECKPOINTS, transformId), e); diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java index 29be02b87cbdf..1b8d14c6cdc2f 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java @@ -519,16 +519,23 @@ private void injectPointInTimeIfNeeded( void doSearch(Tuple namedSearchRequest, ActionListener listener) { String name = namedSearchRequest.v1(); - SearchRequest searchRequest = namedSearchRequest.v2(); + SearchRequest originalRequest = namedSearchRequest.v2(); // We want to treat a request to search 0 indices as a request to do nothing, not a request to search all indices - if (searchRequest.indices().length == 0) { - logger.debug("[{}] Search request [{}] optimized to noop; searchRequest [{}]", getJobId(), name, searchRequest); + if (originalRequest.indices().length == 0) { + logger.debug("[{}] Search request [{}] optimized to noop; searchRequest [{}]", getJobId(), name, originalRequest); listener.onResponse(null); return; } - logger.trace("searchRequest: [{}]", searchRequest); - PointInTimeBuilder pit = searchRequest.pointInTimeBuilder(); + final SearchRequest searchRequest; + PointInTimeBuilder pit = originalRequest.pointInTimeBuilder(); + if (pit != null) { + // remove the indices from the request, they will be derived from the provided pit + searchRequest = new SearchRequest(originalRequest).indices(new String[0]).indicesOptions(SearchRequest.DEFAULT_INDICES_OPTIONS); + } else { + searchRequest = originalRequest; + } + logger.trace("searchRequest: [{}]", searchRequest); ClientHelper.executeWithHeadersAsync( transformConfig.getHeaders(), @@ -555,13 +562,13 @@ void doSearch(Tuple namedSearchRequest, ActionListener namedSearchRequest, ActionListener new TransformScheduledTask( task.getTransformId(), - task.getFrequency(), + getFrequency(task.getFrequency()), task.getLastTriggeredTimeMillis(), failureCount, task.getListener() @@ -245,7 +249,7 @@ public void scheduleNow(String transformId) { transformId, task -> new TransformScheduledTask( task.getTransformId(), - task.getFrequency(), + getFrequency(task.getFrequency()), task.getLastTriggeredTimeMillis(), task.getFailureCount(), currentTimeMillis, // we schedule this task at current clock time so that it is processed ASAP @@ -273,4 +277,11 @@ public void deregisterTransform(String transformId) { List getTransformScheduledTasks() { return scheduledTasks.listScheduledTasks(); } + + private TimeValue getFrequency(TimeValue frequency) { + if (frequency == null) { + frequency = Transform.DEFAULT_TRANSFORM_FREQUENCY; + } + return frequency.compareTo(minFrequency) >= 0 ? frequency : minFrequency; + } } diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/TimeBasedCheckpointProviderTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/TimeBasedCheckpointProviderTests.java index bed646b9ddeb2..468a14bc1db12 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/TimeBasedCheckpointProviderTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/TimeBasedCheckpointProviderTests.java @@ -13,7 +13,6 @@ import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchResponseSections; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.client.internal.Client; @@ -179,36 +178,41 @@ private void testSourceHasChanged( TimeValue delay, Tuple expectedRangeQueryBounds ) throws InterruptedException { - doAnswer(withResponse(newSearchResponse(totalHits))).when(client).execute(eq(TransportSearchAction.TYPE), any(), any()); - String transformId = getTestName(); - TransformConfig transformConfig = newTransformConfigWithDateHistogram( - transformId, - transformVersion, - dateHistogramField, - dateHistogramInterval, - delay - ); - TimeBasedCheckpointProvider provider = newCheckpointProvider(transformConfig); + final SearchResponse searchResponse = newSearchResponse(totalHits); + try { + doAnswer(withResponse(searchResponse)).when(client).execute(eq(TransportSearchAction.TYPE), any(), any()); + String transformId = getTestName(); + TransformConfig transformConfig = newTransformConfigWithDateHistogram( + transformId, + transformVersion, + dateHistogramField, + dateHistogramInterval, + delay + ); + TimeBasedCheckpointProvider provider = newCheckpointProvider(transformConfig); - SetOnce hasChangedHolder = new SetOnce<>(); - SetOnce exceptionHolder = new SetOnce<>(); - CountDownLatch latch = new CountDownLatch(1); - provider.sourceHasChanged( - lastCheckpoint, - new LatchedActionListener<>(ActionListener.wrap(hasChangedHolder::set, exceptionHolder::set), latch) - ); - assertThat(latch.await(100, TimeUnit.MILLISECONDS), is(true)); + SetOnce hasChangedHolder = new SetOnce<>(); + SetOnce exceptionHolder = new SetOnce<>(); + CountDownLatch latch = new CountDownLatch(1); + provider.sourceHasChanged( + lastCheckpoint, + new LatchedActionListener<>(ActionListener.wrap(hasChangedHolder::set, exceptionHolder::set), latch) + ); + assertThat(latch.await(100, TimeUnit.MILLISECONDS), is(true)); - ArgumentCaptor searchRequestArgumentCaptor = ArgumentCaptor.forClass(SearchRequest.class); - verify(client).execute(eq(TransportSearchAction.TYPE), searchRequestArgumentCaptor.capture(), any()); - SearchRequest searchRequest = searchRequestArgumentCaptor.getValue(); - BoolQueryBuilder boolQuery = (BoolQueryBuilder) searchRequest.source().query(); - RangeQueryBuilder rangeQuery = (RangeQueryBuilder) boolQuery.filter().get(1); - assertThat(rangeQuery.from(), is(equalTo(expectedRangeQueryBounds.v1()))); - assertThat(rangeQuery.to(), is(equalTo(expectedRangeQueryBounds.v2()))); + ArgumentCaptor searchRequestArgumentCaptor = ArgumentCaptor.forClass(SearchRequest.class); + verify(client).execute(eq(TransportSearchAction.TYPE), searchRequestArgumentCaptor.capture(), any()); + SearchRequest searchRequest = searchRequestArgumentCaptor.getValue(); + BoolQueryBuilder boolQuery = (BoolQueryBuilder) searchRequest.source().query(); + RangeQueryBuilder rangeQuery = (RangeQueryBuilder) boolQuery.filter().get(1); + assertThat(rangeQuery.from(), is(equalTo(expectedRangeQueryBounds.v1()))); + assertThat(rangeQuery.to(), is(equalTo(expectedRangeQueryBounds.v2()))); - assertThat(hasChangedHolder.get(), is(equalTo(expectedHasChangedValue))); - assertThat(exceptionHolder.get(), is(nullValue())); + assertThat(hasChangedHolder.get(), is(equalTo(expectedHasChangedValue))); + assertThat(exceptionHolder.get(), is(nullValue())); + } finally { + searchResponse.decRef(); + } } public void testCreateNextCheckpoint_NoDelay() throws InterruptedException { @@ -339,15 +343,13 @@ public SingleGroupSource get() { private static SearchResponse newSearchResponse(long totalHits) { return new SearchResponse( - new SearchResponseSections( - new SearchHits(SearchHits.EMPTY, new TotalHits(totalHits, TotalHits.Relation.EQUAL_TO), 0), - null, - null, - false, - false, - null, - 0 - ), + SearchHits.empty(new TotalHits(totalHits, TotalHits.Relation.EQUAL_TO), 0), + null, + null, + false, + false, + null, + 0, null, 1, 1, diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java index 06de37af346d2..8ee7e902285c9 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.client.internal.ParentTaskAssigningClient; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.search.SearchContextMissingException; @@ -30,7 +31,6 @@ import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.builder.PointInTimeBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.profile.SearchProfileResults; import org.elasticsearch.search.suggest.Suggest; @@ -59,7 +59,6 @@ import java.time.Clock; import java.time.Instant; -import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.concurrent.CountDownLatch; @@ -135,7 +134,7 @@ public void testPitInjection() throws InterruptedException { mock(IndexBasedTransformConfigManager.class), mock(TransformCheckpointService.class), mock(TransformAuditor.class), - new TransformScheduler(Clock.systemUTC(), mock(ThreadPool.class), Settings.EMPTY) + new TransformScheduler(Clock.systemUTC(), mock(ThreadPool.class), Settings.EMPTY, TimeValue.ZERO) ), mock(CheckpointProvider.class), new AtomicReference<>(IndexerState.STOPPED), @@ -229,7 +228,7 @@ public void testPitInjectionIfPitNotSupported() throws InterruptedException { mock(IndexBasedTransformConfigManager.class), mock(TransformCheckpointService.class), mock(TransformAuditor.class), - new TransformScheduler(Clock.systemUTC(), mock(ThreadPool.class), Settings.EMPTY) + new TransformScheduler(Clock.systemUTC(), mock(ThreadPool.class), Settings.EMPTY, TimeValue.ZERO) ), mock(CheckpointProvider.class), new AtomicReference<>(IndexerState.STOPPED), @@ -306,7 +305,7 @@ public void testDisablePit() throws InterruptedException { mock(IndexBasedTransformConfigManager.class), mock(TransformCheckpointService.class), mock(TransformAuditor.class), - new TransformScheduler(Clock.systemUTC(), mock(ThreadPool.class), Settings.EMPTY) + new TransformScheduler(Clock.systemUTC(), mock(ThreadPool.class), Settings.EMPTY, TimeValue.ZERO) ), mock(CheckpointProvider.class), new AtomicReference<>(IndexerState.STOPPED), @@ -370,7 +369,7 @@ public void testDisablePitWhenThereIsRemoteIndexInSource() throws InterruptedExc mock(IndexBasedTransformConfigManager.class), mock(TransformCheckpointService.class), mock(TransformAuditor.class), - new TransformScheduler(Clock.systemUTC(), mock(ThreadPool.class), Settings.EMPTY) + new TransformScheduler(Clock.systemUTC(), mock(ThreadPool.class), Settings.EMPTY, TimeValue.ZERO) ), mock(CheckpointProvider.class), new AtomicReference<>(IndexerState.STOPPED), @@ -420,8 +419,9 @@ public void testHandlePitIndexNotFound() throws InterruptedException { try (var threadPool = createThreadPool()) { final var client = new PitMockClient(threadPool, true); ClientTransformIndexer indexer = createTestIndexer(new ParentTaskAssigningClient(client, new TaskId("dummy-node:123456"))); - SearchRequest searchRequest = new SearchRequest("deleted-index"); - searchRequest.source().pointInTimeBuilder(new PointInTimeBuilder("the_pit_id")); + SearchRequest searchRequest = new SearchRequest("deleted-index").source( + new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder("the_pit_id_on_deleted_index")) + ); Tuple namedSearchRequest = new Tuple<>("test-handle-pit-index-not-found", searchRequest); this.assertAsync(listener -> indexer.doSearch(namedSearchRequest, listener), response -> { // if the pit got deleted, we know it retried @@ -433,8 +433,9 @@ public void testHandlePitIndexNotFound() throws InterruptedException { try (var threadPool = createThreadPool()) { final var client = new PitMockClient(threadPool, true); ClientTransformIndexer indexer = createTestIndexer(new ParentTaskAssigningClient(client, new TaskId("dummy-node:123456"))); - SearchRequest searchRequest = new SearchRequest("essential-deleted-index"); - searchRequest.source().pointInTimeBuilder(new PointInTimeBuilder("the_pit_id")); + SearchRequest searchRequest = new SearchRequest("essential-deleted-index").source( + new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder("the_pit_id_essential-deleted-index")) + ); Tuple namedSearchRequest = new Tuple<>("test-handle-pit-index-not-found", searchRequest); indexer.doSearch(namedSearchRequest, ActionListener.wrap(r -> fail("expected a failure, got response"), e -> { assertTrue(e instanceof IndexNotFoundException); @@ -521,14 +522,16 @@ protected void listener.onResponse((Response) response); return; } else if (request instanceof SearchRequest searchRequest) { - // if pit is used and deleted-index is given throw index not found - if (searchRequest.pointInTimeBuilder() != null && Arrays.binarySearch(searchRequest.indices(), "deleted-index") >= 0) { + if (searchRequest.pointInTimeBuilder() != null + && searchRequest.pointInTimeBuilder().getEncodedId().equals("the_pit_id_on_deleted_index")) { listener.onFailure(new IndexNotFoundException("deleted-index")); return; } - if (Arrays.binarySearch(searchRequest.indices(), "essential-deleted-index") >= 0) { + if ((searchRequest.pointInTimeBuilder() != null + && searchRequest.pointInTimeBuilder().getEncodedId().equals("the_pit_id_essential-deleted-index")) + || (searchRequest.indices().length > 0 && searchRequest.indices()[0].equals("essential-deleted-index"))) { listener.onFailure(new IndexNotFoundException("essential-deleted-index")); return; } @@ -538,33 +541,32 @@ protected void && "the_pit_id+++".equals(searchRequest.pointInTimeBuilder().getEncodedId())) { listener.onFailure(new SearchContextMissingException(new ShardSearchContextId("sc_missing", 42))); } else { - SearchResponse response = new SearchResponse( - new InternalSearchResponse( + ActionListener.respondAndRelease( + listener, + (Response) new SearchResponse( new SearchHits(new SearchHit[] { new SearchHit(1) }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1.0f), // Simulate completely null aggs null, new Suggest(Collections.emptyList()), - new SearchProfileResults(Collections.emptyMap()), false, false, - 1 - ), - null, - 1, - 1, - 0, - 0, - ShardSearchFailure.EMPTY_ARRAY, - SearchResponse.Clusters.EMPTY, - // copy the pit from the request - searchRequest.pointInTimeBuilder() != null ? searchRequest.pointInTimeBuilder().getEncodedId() + "+" : null + new SearchProfileResults(Collections.emptyMap()), + 1, + null, + 1, + 1, + 0, + 0, + ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY, + // copy the pit from the request + searchRequest.pointInTimeBuilder() != null ? searchRequest.pointInTimeBuilder().getEncodedId() + "+" : null + ) ); - listener.onResponse((Response) response); } return; } - super.doExecute(action, request, listener); } } @@ -599,7 +601,7 @@ private ClientTransformIndexer createTestIndexer(ParentTaskAssigningClient clien mock(IndexBasedTransformConfigManager.class), mock(TransformCheckpointService.class), mock(TransformAuditor.class), - new TransformScheduler(Clock.systemUTC(), mock(ThreadPool.class), Settings.EMPTY) + new TransformScheduler(Clock.systemUTC(), mock(ThreadPool.class), Settings.EMPTY, TimeValue.ZERO) ), mock(CheckpointProvider.class), new AtomicReference<>(IndexerState.STOPPED), diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureHandlingTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureHandlingTests.java index d3be18a193415..5dee74cccee7a 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureHandlingTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureHandlingTests.java @@ -30,7 +30,6 @@ import org.elasticsearch.script.ScriptException; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.profile.SearchProfileResults; import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.test.ESTestCase; @@ -129,7 +128,7 @@ static class MockedTransformIndexer extends ClientTransformIndexer { transformsConfigManager, mock(TransformCheckpointService.class), auditor, - new TransformScheduler(Clock.systemUTC(), threadPool, Settings.EMPTY) + new TransformScheduler(Clock.systemUTC(), threadPool, Settings.EMPTY, TimeValue.ZERO) ), checkpointProvider, initialState, @@ -222,18 +221,17 @@ protected void onAbort() { @Override void doGetInitialProgress(SearchRequest request, ActionListener responseListener) { - responseListener.onResponse( + ActionListener.respondAndRelease( + responseListener, new SearchResponse( - new InternalSearchResponse( - new SearchHits(new SearchHit[0], new TotalHits(0L, TotalHits.Relation.EQUAL_TO), 0.0f), - // Simulate completely null aggs - null, - new Suggest(Collections.emptyList()), - new SearchProfileResults(Collections.emptyMap()), - false, - false, - 1 - ), + SearchHits.EMPTY_WITH_TOTAL_HITS, + // Simulate completely null aggs + null, + new Suggest(Collections.emptyList()), + false, + false, + new SearchProfileResults(Collections.emptyMap()), + 1, "", 1, 1, @@ -375,16 +373,14 @@ public void testDoProcessAggNullCheck() { null ); SearchResponse searchResponse = new SearchResponse( - new InternalSearchResponse( - new SearchHits(new SearchHit[0], new TotalHits(0L, TotalHits.Relation.EQUAL_TO), 0.0f), - // Simulate completely null aggs - null, - new Suggest(Collections.emptyList()), - new SearchProfileResults(Collections.emptyMap()), - false, - false, - 1 - ), + SearchHits.EMPTY_WITH_TOTAL_HITS, + // Simulate completely null aggs + null, + new Suggest(Collections.emptyList()), + false, + false, + new SearchProfileResults(Collections.emptyMap()), + 1, "", 1, 1, @@ -393,29 +389,33 @@ public void testDoProcessAggNullCheck() { ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY ); - AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); - Function searchFunction = searchRequest -> searchResponse; - Function bulkFunction = bulkRequest -> new BulkResponse(new BulkItemResponse[0], 100); - - TransformAuditor auditor = mock(TransformAuditor.class); - TransformContext context = new TransformContext(TransformTaskState.STARTED, "", 0, mock(TransformContext.Listener.class)); - - MockedTransformIndexer indexer = createMockIndexer( - config, - state, - searchFunction, - bulkFunction, - null, - threadPool, - ThreadPool.Names.GENERIC, - auditor, - context - ); + try { + AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); + Function searchFunction = searchRequest -> searchResponse; + Function bulkFunction = bulkRequest -> new BulkResponse(new BulkItemResponse[0], 100); + + TransformAuditor auditor = mock(TransformAuditor.class); + TransformContext context = new TransformContext(TransformTaskState.STARTED, "", 0, mock(TransformContext.Listener.class)); + + MockedTransformIndexer indexer = createMockIndexer( + config, + state, + searchFunction, + bulkFunction, + null, + threadPool, + ThreadPool.Names.GENERIC, + auditor, + context + ); - IterationResult newPosition = indexer.doProcess(searchResponse); - assertThat(newPosition.getToIndex().collect(Collectors.toList()), is(empty())); - assertThat(newPosition.getPosition(), is(nullValue())); - assertThat(newPosition.isDone(), is(true)); + IterationResult newPosition = indexer.doProcess(searchResponse); + assertThat(newPosition.getToIndex().collect(Collectors.toList()), is(empty())); + assertThat(newPosition.getPosition(), is(nullValue())); + assertThat(newPosition.isDone(), is(true)); + } finally { + searchResponse.decRef(); + } } public void testScriptError() throws Exception { @@ -513,16 +513,14 @@ public void testRetentionPolicyDeleteByQueryThrowsIrrecoverable() throws Excepti ); final SearchResponse searchResponse = new SearchResponse( - new InternalSearchResponse( - new SearchHits(new SearchHit[] { new SearchHit(1) }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1.0f), - // Simulate completely null aggs - null, - new Suggest(Collections.emptyList()), - new SearchProfileResults(Collections.emptyMap()), - false, - false, - 1 - ), + new SearchHits(new SearchHit[] { new SearchHit(1) }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1.0f), + // Simulate completely null aggs + null, + new Suggest(Collections.emptyList()), + false, + false, + new SearchProfileResults(Collections.emptyMap()), + 1, "", 1, 1, @@ -531,58 +529,61 @@ public void testRetentionPolicyDeleteByQueryThrowsIrrecoverable() throws Excepti ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY ); - - AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); - Function searchFunction = searchRequest -> searchResponse; - - Function bulkFunction = bulkRequest -> new BulkResponse(new BulkItemResponse[0], 100); - - Function deleteByQueryFunction = deleteByQueryRequest -> { - throw new SearchPhaseExecutionException( - "query", - "Partial shards failure", - new ShardSearchFailure[] { - new ShardSearchFailure( - new ElasticsearchParseException("failed to parse date field", new IllegalArgumentException("illegal format")) - ) } + try { + AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); + Function searchFunction = searchRequest -> searchResponse; + + Function bulkFunction = bulkRequest -> new BulkResponse(new BulkItemResponse[0], 100); + + Function deleteByQueryFunction = deleteByQueryRequest -> { + throw new SearchPhaseExecutionException( + "query", + "Partial shards failure", + new ShardSearchFailure[] { + new ShardSearchFailure( + new ElasticsearchParseException("failed to parse date field", new IllegalArgumentException("illegal format")) + ) } + ); + }; + + final AtomicBoolean failIndexerCalled = new AtomicBoolean(false); + final AtomicReference failureMessage = new AtomicReference<>(); + + MockTransformAuditor auditor = MockTransformAuditor.createMockAuditor(); + TransformContext.Listener contextListener = createContextListener(failIndexerCalled, failureMessage); + TransformContext context = new TransformContext(TransformTaskState.STARTED, "", 0, contextListener); + + MockedTransformIndexer indexer = createMockIndexer( + config, + state, + searchFunction, + bulkFunction, + deleteByQueryFunction, + threadPool, + ThreadPool.Names.GENERIC, + auditor, + context ); - }; - final AtomicBoolean failIndexerCalled = new AtomicBoolean(false); - final AtomicReference failureMessage = new AtomicReference<>(); + final CountDownLatch latch = indexer.newLatch(1); - MockTransformAuditor auditor = MockTransformAuditor.createMockAuditor(); - TransformContext.Listener contextListener = createContextListener(failIndexerCalled, failureMessage); - TransformContext context = new TransformContext(TransformTaskState.STARTED, "", 0, contextListener); + indexer.start(); + assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); + assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); + assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); - MockedTransformIndexer indexer = createMockIndexer( - config, - state, - searchFunction, - bulkFunction, - deleteByQueryFunction, - threadPool, - ThreadPool.Names.GENERIC, - auditor, - context - ); - - final CountDownLatch latch = indexer.newLatch(1); - - indexer.start(); - assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); - assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); - assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); - - latch.countDown(); - assertBusy(() -> assertThat(indexer.getState(), equalTo(IndexerState.STARTED)), 10, TimeUnit.SECONDS); - assertTrue(failIndexerCalled.get()); - assertThat( - failureMessage.get(), - matchesRegex( - "task encountered irrecoverable failure: org.elasticsearch.ElasticsearchParseException: failed to parse date field;.*" - ) - ); + latch.countDown(); + assertBusy(() -> assertThat(indexer.getState(), equalTo(IndexerState.STARTED)), 10, TimeUnit.SECONDS); + assertTrue(failIndexerCalled.get()); + assertThat( + failureMessage.get(), + matchesRegex( + "task encountered irrecoverable failure: org.elasticsearch.ElasticsearchParseException: failed to parse date field;.*" + ) + ); + } finally { + searchResponse.decRef(); + } } public void testRetentionPolicyDeleteByQueryThrowsTemporaryProblem() throws Exception { @@ -605,16 +606,14 @@ public void testRetentionPolicyDeleteByQueryThrowsTemporaryProblem() throws Exce ); final SearchResponse searchResponse = new SearchResponse( - new InternalSearchResponse( - new SearchHits(new SearchHit[] { new SearchHit(1) }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1.0f), - // Simulate completely null aggs - null, - new Suggest(Collections.emptyList()), - new SearchProfileResults(Collections.emptyMap()), - false, - false, - 1 - ), + new SearchHits(new SearchHit[] { new SearchHit(1) }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1.0f), + // Simulate completely null aggs + null, + new Suggest(Collections.emptyList()), + false, + false, + new SearchProfileResults(Collections.emptyMap()), + 1, "", 1, 1, @@ -623,61 +622,64 @@ public void testRetentionPolicyDeleteByQueryThrowsTemporaryProblem() throws Exce ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY ); - - AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); - Function searchFunction = searchRequest -> searchResponse; - - Function bulkFunction = bulkRequest -> new BulkResponse(new BulkItemResponse[0], 100); - - Function deleteByQueryFunction = deleteByQueryRequest -> { - throw new SearchPhaseExecutionException( - "query", - "Partial shards failure", - new ShardSearchFailure[] { new ShardSearchFailure(new ElasticsearchTimeoutException("timed out during dbq")) } + try { + AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); + Function searchFunction = searchRequest -> searchResponse; + + Function bulkFunction = bulkRequest -> new BulkResponse(new BulkItemResponse[0], 100); + + Function deleteByQueryFunction = deleteByQueryRequest -> { + throw new SearchPhaseExecutionException( + "query", + "Partial shards failure", + new ShardSearchFailure[] { new ShardSearchFailure(new ElasticsearchTimeoutException("timed out during dbq")) } + ); + }; + + final AtomicBoolean failIndexerCalled = new AtomicBoolean(false); + final AtomicReference failureMessage = new AtomicReference<>(); + + MockTransformAuditor auditor = MockTransformAuditor.createMockAuditor(); + auditor.addExpectation( + new MockTransformAuditor.SeenAuditExpectation( + "timed out during dbq", + Level.WARNING, + transformId, + "Transform encountered an exception: [org.elasticsearch.ElasticsearchTimeoutException: timed out during dbq];" + + " Will automatically retry [1/10]" + ) + ); + TransformContext.Listener contextListener = createContextListener(failIndexerCalled, failureMessage); + TransformContext context = new TransformContext(TransformTaskState.STARTED, "", 0, contextListener); + + MockedTransformIndexer indexer = createMockIndexer( + config, + state, + searchFunction, + bulkFunction, + deleteByQueryFunction, + threadPool, + ThreadPool.Names.GENERIC, + auditor, + context ); - }; - - final AtomicBoolean failIndexerCalled = new AtomicBoolean(false); - final AtomicReference failureMessage = new AtomicReference<>(); - - MockTransformAuditor auditor = MockTransformAuditor.createMockAuditor(); - auditor.addExpectation( - new MockTransformAuditor.SeenAuditExpectation( - "timed out during dbq", - Level.WARNING, - transformId, - "Transform encountered an exception: [org.elasticsearch.ElasticsearchTimeoutException: timed out during dbq];" - + " Will automatically retry [1/10]" - ) - ); - TransformContext.Listener contextListener = createContextListener(failIndexerCalled, failureMessage); - TransformContext context = new TransformContext(TransformTaskState.STARTED, "", 0, contextListener); - - MockedTransformIndexer indexer = createMockIndexer( - config, - state, - searchFunction, - bulkFunction, - deleteByQueryFunction, - threadPool, - ThreadPool.Names.GENERIC, - auditor, - context - ); - final CountDownLatch latch = indexer.newLatch(1); + final CountDownLatch latch = indexer.newLatch(1); - indexer.start(); - assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); - assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); - assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); + indexer.start(); + assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); + assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); + assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); - latch.countDown(); - assertBusy(() -> assertThat(indexer.getState(), equalTo(IndexerState.STARTED)), 10, TimeUnit.SECONDS); - assertFalse(failIndexerCalled.get()); - assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); - auditor.assertAllExpectationsMatched(); - assertEquals(1, context.getFailureCount()); + latch.countDown(); + assertBusy(() -> assertThat(indexer.getState(), equalTo(IndexerState.STARTED)), 10, TimeUnit.SECONDS); + assertFalse(failIndexerCalled.get()); + assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); + auditor.assertAllExpectationsMatched(); + assertEquals(1, context.getFailureCount()); + } finally { + searchResponse.decRef(); + } } public void testFailureCounterIsResetOnSuccess() throws Exception { @@ -700,16 +702,14 @@ public void testFailureCounterIsResetOnSuccess() throws Exception { ); final SearchResponse searchResponse = new SearchResponse( - new InternalSearchResponse( - new SearchHits(new SearchHit[] { new SearchHit(1) }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1.0f), - // Simulate completely null aggs - null, - new Suggest(Collections.emptyList()), - new SearchProfileResults(Collections.emptyMap()), - false, - false, - 1 - ), + new SearchHits(new SearchHit[] { new SearchHit(1) }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1.0f), + // Simulate completely null aggs + null, + new Suggest(Collections.emptyList()), + false, + false, + new SearchProfileResults(Collections.emptyMap()), + 1, "", 1, 1, @@ -718,72 +718,75 @@ public void testFailureCounterIsResetOnSuccess() throws Exception { ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY ); - - AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); - Function searchFunction = new Function<>() { - final AtomicInteger calls = new AtomicInteger(0); - - @Override - public SearchResponse apply(SearchRequest searchRequest) { - int call = calls.getAndIncrement(); - if (call == 0) { - throw new SearchPhaseExecutionException( - "query", - "Partial shards failure", - new ShardSearchFailure[] { new ShardSearchFailure(new Exception()) } - ); + try { + AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); + Function searchFunction = new Function<>() { + final AtomicInteger calls = new AtomicInteger(0); + + @Override + public SearchResponse apply(SearchRequest searchRequest) { + int call = calls.getAndIncrement(); + if (call == 0) { + throw new SearchPhaseExecutionException( + "query", + "Partial shards failure", + new ShardSearchFailure[] { new ShardSearchFailure(new Exception()) } + ); + } + return searchResponse; } - return searchResponse; - } - }; + }; - Function bulkFunction = request -> new BulkResponse(new BulkItemResponse[0], 1); + Function bulkFunction = request -> new BulkResponse(new BulkItemResponse[0], 1); - final AtomicBoolean failIndexerCalled = new AtomicBoolean(false); - final AtomicReference failureMessage = new AtomicReference<>(); + final AtomicBoolean failIndexerCalled = new AtomicBoolean(false); + final AtomicReference failureMessage = new AtomicReference<>(); - MockTransformAuditor auditor = MockTransformAuditor.createMockAuditor(); - TransformContext.Listener contextListener = createContextListener(failIndexerCalled, failureMessage); - TransformContext context = new TransformContext(TransformTaskState.STARTED, "", 0, contextListener); + MockTransformAuditor auditor = MockTransformAuditor.createMockAuditor(); + TransformContext.Listener contextListener = createContextListener(failIndexerCalled, failureMessage); + TransformContext context = new TransformContext(TransformTaskState.STARTED, "", 0, contextListener); - MockedTransformIndexer indexer = createMockIndexer( - config, - state, - searchFunction, - bulkFunction, - null, - threadPool, - ThreadPool.Names.GENERIC, - auditor, - context - ); + MockedTransformIndexer indexer = createMockIndexer( + config, + state, + searchFunction, + bulkFunction, + null, + threadPool, + ThreadPool.Names.GENERIC, + auditor, + context + ); - final CountDownLatch latch = indexer.newLatch(1); + final CountDownLatch latch = indexer.newLatch(1); - indexer.start(); - assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); - assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); - assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); + indexer.start(); + assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); + assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); + assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); - latch.countDown(); - assertBusy(() -> assertThat(indexer.getState(), equalTo(IndexerState.STARTED)), 10, TimeUnit.SECONDS); - assertFalse(failIndexerCalled.get()); - assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); - assertEquals(1, context.getFailureCount()); + latch.countDown(); + assertBusy(() -> assertThat(indexer.getState(), equalTo(IndexerState.STARTED)), 10, TimeUnit.SECONDS); + assertFalse(failIndexerCalled.get()); + assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); + assertEquals(1, context.getFailureCount()); - final CountDownLatch secondLatch = indexer.newLatch(1); + final CountDownLatch secondLatch = indexer.newLatch(1); - indexer.start(); - assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); - assertBusy(() -> assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis()))); - assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); + indexer.start(); + assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); + assertBusy(() -> assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis()))); + assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); - secondLatch.countDown(); - assertBusy(() -> assertThat(indexer.getState(), equalTo(IndexerState.STARTED)), 10, TimeUnit.SECONDS); - assertFalse(failIndexerCalled.get()); - assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); - auditor.assertAllExpectationsMatched(); - assertEquals(0, context.getFailureCount()); + secondLatch.countDown(); + assertBusy(() -> assertThat(indexer.getState(), equalTo(IndexerState.STARTED)), 10, TimeUnit.SECONDS); + assertFalse(failIndexerCalled.get()); + assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); + auditor.assertAllExpectationsMatched(); + assertEquals(0, context.getFailureCount()); + } finally { + searchResponse.decRef(); + } } // tests throttling of audits on logs based on repeated exception types diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureOnStatePersistenceTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureOnStatePersistenceTests.java index 55ae653c39629..750e535c4330f 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureOnStatePersistenceTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureOnStatePersistenceTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.client.internal.ParentTaskAssigningClient; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.shard.ShardId; @@ -217,7 +218,7 @@ public void fail(Throwable exception, String failureMessage, ActionListener(IndexerState.STOPPED), @@ -299,7 +300,7 @@ public void fail(Throwable exception, String failureMessage, ActionListener(IndexerState.STOPPED), @@ -430,7 +431,7 @@ public void fail(Throwable exception, String failureMessage, ActionListener(IndexerState.STOPPED), diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerStateTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerStateTests.java index 638a66fa3fb0d..9e72a92da5bee 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerStateTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerStateTests.java @@ -26,7 +26,6 @@ import org.elasticsearch.index.reindex.DeleteByQueryRequest; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.profile.SearchProfileResults; import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.test.ESTestCase; @@ -79,16 +78,14 @@ public class TransformIndexerStateTests extends ESTestCase { private static final SearchResponse ONE_HIT_SEARCH_RESPONSE = new SearchResponse( - new InternalSearchResponse( - new SearchHits(new SearchHit[] { new SearchHit(1) }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1.0f), - // Simulate completely null aggs - null, - new Suggest(Collections.emptyList()), - new SearchProfileResults(Collections.emptyMap()), - false, - false, - 1 - ), + new SearchHits(new SearchHit[] { new SearchHit(1) }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1.0f), + // Simulate completely null aggs + null, + new Suggest(Collections.emptyList()), + false, + false, + new SearchProfileResults(Collections.emptyMap()), + 1, "", 1, 1, @@ -805,7 +802,7 @@ private MockedTransformIndexer createMockIndexer( transformConfigManager, mock(TransformCheckpointService.class), transformAuditor, - new TransformScheduler(Clock.systemUTC(), threadPool, Settings.EMPTY) + new TransformScheduler(Clock.systemUTC(), threadPool, Settings.EMPTY, TimeValue.ZERO) ); MockedTransformIndexer indexer = new MockedTransformIndexer( @@ -839,7 +836,7 @@ private MockedTransformIndexerForStatePersistenceTesting createMockIndexerForSta transformConfigManager, mock(TransformCheckpointService.class), transformAuditor, - new TransformScheduler(Clock.systemUTC(), threadPool, Settings.EMPTY) + new TransformScheduler(Clock.systemUTC(), threadPool, Settings.EMPTY, TimeValue.ZERO) ); MockedTransformIndexerForStatePersistenceTesting indexer = new MockedTransformIndexerForStatePersistenceTesting( diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerTests.java index 6406308312f04..372aef3d0eea7 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerTests.java @@ -26,7 +26,6 @@ import org.elasticsearch.index.reindex.DeleteByQueryRequest; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.profile.SearchProfileResults; import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.test.ESTestCase; @@ -75,16 +74,14 @@ public class TransformIndexerTests extends ESTestCase { private static final SearchResponse ONE_HIT_SEARCH_RESPONSE = new SearchResponse( - new InternalSearchResponse( - new SearchHits(new SearchHit[] { new SearchHit(1) }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1.0f), - // Simulate completely null aggs - null, - new Suggest(Collections.emptyList()), - new SearchProfileResults(Collections.emptyMap()), - false, - false, - 1 - ), + new SearchHits(new SearchHit[] { new SearchHit(1) }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1.0f), + // Simulate completely null aggs + null, + new Suggest(Collections.emptyList()), + false, + false, + new SearchProfileResults(Collections.emptyMap()), + 1, "", 1, 1, @@ -451,7 +448,7 @@ private MockedTransformIndexer createMockIndexer( transformConfigManager, mock(TransformCheckpointService.class), transformAuditor, - new TransformScheduler(Clock.systemUTC(), threadPool, Settings.EMPTY) + new TransformScheduler(Clock.systemUTC(), threadPool, Settings.EMPTY, TimeValue.ZERO) ); MockedTransformIndexer indexer = new MockedTransformIndexer( diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutorTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutorTests.java index b1582970d4e07..69d81c85a62d3 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutorTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutorTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; @@ -444,7 +445,7 @@ private TransformPersistentTasksExecutor buildTaskExecutor() { transformsConfigManager, transformCheckpointService, mockAuditor, - new TransformScheduler(Clock.systemUTC(), threadPool, Settings.EMPTY) + new TransformScheduler(Clock.systemUTC(), threadPool, Settings.EMPTY, TimeValue.ZERO) ); ClusterSettings cSettings = new ClusterSettings(Settings.EMPTY, Collections.singleton(Transform.NUM_FAILURE_RETRIES_SETTING)); diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformTaskTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformTaskTests.java index cda258c6daa81..12af48faf8e38 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformTaskTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformTaskTests.java @@ -112,7 +112,7 @@ public void testStopOnFailedTaskWithStoppedIndexer() { transformsConfigManager, transformsCheckpointService, auditor, - new TransformScheduler(clock, threadPool, Settings.EMPTY) + new TransformScheduler(clock, threadPool, Settings.EMPTY, TimeValue.ZERO) ); TransformState transformState = new TransformState( @@ -134,7 +134,7 @@ public void testStopOnFailedTaskWithStoppedIndexer() { TaskId.EMPTY_TASK_ID, createTransformTaskParams(transformConfig.getId()), transformState, - new TransformScheduler(clock, threadPool, Settings.EMPTY), + new TransformScheduler(clock, threadPool, Settings.EMPTY, TimeValue.ZERO), auditor, threadPool, Collections.emptyMap() @@ -212,7 +212,7 @@ public void testStopOnFailedTaskWithoutIndexer() { TaskId.EMPTY_TASK_ID, createTransformTaskParams(transformConfig.getId()), transformState, - new TransformScheduler(Clock.systemUTC(), threadPool, Settings.EMPTY), + new TransformScheduler(Clock.systemUTC(), threadPool, Settings.EMPTY, TimeValue.ZERO), auditor, threadPool, Collections.emptyMap() @@ -431,7 +431,7 @@ public void testApplyNewAuthState() { TaskId.EMPTY_TASK_ID, createTransformTaskParams(transformConfig.getId()), transformState, - new TransformScheduler(Clock.systemUTC(), threadPool, Settings.EMPTY), + new TransformScheduler(Clock.systemUTC(), threadPool, Settings.EMPTY, TimeValue.ZERO), auditor, threadPool, Collections.emptyMap() diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/CompositeBucketsChangeCollectorTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/CompositeBucketsChangeCollectorTests.java index 9b8cf9745c558..708cb3d93cbed 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/CompositeBucketsChangeCollectorTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/CompositeBucketsChangeCollectorTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.transform.transforms.pivot; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchResponseSections; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.TermsQueryBuilder; @@ -112,8 +111,22 @@ public void testTermsFieldCollector() throws IOException { }); Aggregations aggs = new Aggregations(Collections.singletonList(composite)); - SearchResponseSections sections = new SearchResponseSections(null, aggs, null, false, null, null, 1); - SearchResponse response = new SearchResponse(sections, null, 1, 1, 0, 0, ShardSearchFailure.EMPTY_ARRAY, null); + SearchResponse response = new SearchResponse( + null, + aggs, + null, + false, + null, + null, + 1, + null, + 1, + 1, + 0, + 0, + ShardSearchFailure.EMPTY_ARRAY, + null + ); try { collector.processSearchResponse(response); diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/DateHistogramFieldCollectorTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/DateHistogramFieldCollectorTests.java index d43b4bd672a07..dab6d8518d28f 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/DateHistogramFieldCollectorTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/DateHistogramFieldCollectorTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.transform.transforms.pivot; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchResponseSections; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.RangeQueryBuilder; @@ -171,16 +170,22 @@ private static QueryBuilder buildFilterQuery(ChangeCollector collector) { } private static SearchResponse buildSearchResponse(SingleValue minTimestamp, SingleValue maxTimestamp) { - SearchResponseSections sections = new SearchResponseSections( + return new SearchResponse( null, new Aggregations(Arrays.asList(minTimestamp, maxTimestamp)), null, false, null, null, - 1 + 1, + null, + 1, + 1, + 0, + 0, + ShardSearchFailure.EMPTY_ARRAY, + null ); - return new SearchResponse(sections, null, 1, 1, 0, 0, ShardSearchFailure.EMPTY_ARRAY, null); } } diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/PivotTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/PivotTests.java index 37bee4a4eb999..67f923769ffe3 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/PivotTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/PivotTests.java @@ -7,14 +7,12 @@ package org.elasticsearch.xpack.transform.transforms.pivot; -import org.apache.lucene.search.TotalHits; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchResponseSections; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.aggregations.AggregationsPlugin; import org.elasticsearch.client.internal.Client; @@ -23,7 +21,6 @@ import org.elasticsearch.core.Strings; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.license.XPackLicenseState; -import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.aggregations.Aggregations; @@ -327,9 +324,7 @@ public void testPreviewForCompositeAggregation() throws Exception { } private static SearchResponse searchResponseFromAggs(Aggregations aggs) { - SearchResponseSections sections = new SearchResponseSections(null, aggs, null, false, null, null, 1); - SearchResponse searchResponse = new SearchResponse(sections, null, 10, 5, 0, 0, new ShardSearchFailure[0], null); - return searchResponse; + return new SearchResponse(null, aggs, null, false, null, null, 1, null, 10, 5, 0, 0, new ShardSearchFailure[0], null); } private class MyMockClient extends NoOpClient { @@ -358,28 +353,25 @@ protected void searchFailures.add(new ShardSearchFailure(new RuntimeException("shard failed"))); } } - - final SearchResponseSections sections = new SearchResponseSections( - new SearchHits(new SearchHit[0], new TotalHits(0L, TotalHits.Relation.EQUAL_TO), 0), - null, - null, - false, - null, - null, - 1 - ); - final SearchResponse response = new SearchResponse( - sections, - null, - 10, - searchFailures.size() > 0 ? 0 : 5, - 0, - 0, - searchFailures.toArray(new ShardSearchFailure[searchFailures.size()]), - null + ActionListener.respondAndRelease( + listener, + (Response) new SearchResponse( + SearchHits.EMPTY_WITH_TOTAL_HITS, + null, + null, + false, + null, + null, + 1, + null, + 10, + searchFailures.size() > 0 ? 0 : 5, + 0, + 0, + searchFailures.toArray(new ShardSearchFailure[searchFailures.size()]), + null + ) ); - - listener.onResponse((Response) response); return; } diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformScheduledTaskQueueTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformScheduledTaskQueueTests.java index 4748189745f1b..5030d42f9c17c 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformScheduledTaskQueueTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformScheduledTaskQueueTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.xpack.transform.Transform; import org.elasticsearch.xpack.transform.transforms.scheduling.TransformScheduler.Event; import org.hamcrest.Matchers; import org.junit.After; @@ -197,7 +198,7 @@ public void testUpdatePriority() { private static TransformScheduledTask createTask(String transformId, long nextScheduledTimeMillis) { return new TransformScheduledTask( transformId, - null, + Transform.DEFAULT_SCHEDULER_FREQUENCY, null, 0, nextScheduledTimeMillis, diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformScheduledTaskTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformScheduledTaskTests.java index fd8a1de429c14..5d2efdd23a0af 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformScheduledTaskTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformScheduledTaskTests.java @@ -32,11 +32,6 @@ public void testBasics() { assertThat(task.getListener(), is(equalTo(LISTENER))); } - public void testDefaultFrequency() { - TransformScheduledTask task = new TransformScheduledTask(TRANSFORM_ID, null, LAST_TRIGGERED_TIME_MILLIS, 0, 0, LISTENER); - assertThat(task.getFrequency(), is(equalTo(DEFAULT_FREQUENCY))); - } - public void testNextScheduledTimeMillis() { { TransformScheduledTask task = new TransformScheduledTask(TRANSFORM_ID, FREQUENCY, LAST_TRIGGERED_TIME_MILLIS, 0, 123, LISTENER); diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformSchedulerTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformSchedulerTests.java index 7125b4074bc4a..8d3220a5b4de3 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformSchedulerTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformSchedulerTests.java @@ -61,49 +61,59 @@ public void shutdownThreadPool() { } public void testScheduling() { + testScheduling(5, 0); + } + + public void testScheduling_withMinFrequency() { + testScheduling(1, 5); + } + + // Note: frequencySeconds and minFrequencySeconds together should lead to an expected frequency of 5 seconds. + private void testScheduling(int frequencySeconds, int minFreqencySeconds) { String transformId = "test-with-fake-clock"; - int frequencySeconds = 5; TimeValue frequency = TimeValue.timeValueSeconds(frequencySeconds); + TimeValue minFrequency = TimeValue.timeValueSeconds(minFreqencySeconds); + TimeValue fiveSeconds = TimeValue.timeValueSeconds(5); TransformTaskParams transformTaskParams = new TransformTaskParams(transformId, TransformConfigVersion.CURRENT, frequency, false); FakeClock clock = new FakeClock(Instant.ofEpochMilli(0)); CopyOnWriteArrayList events = new CopyOnWriteArrayList<>(); TransformScheduler.Listener listener = events::add; - TransformScheduler transformScheduler = new TransformScheduler(clock, threadPool, SETTINGS); + TransformScheduler transformScheduler = new TransformScheduler(clock, threadPool, SETTINGS, minFrequency); transformScheduler.registerTransform(transformTaskParams, listener); assertThat( transformScheduler.getTransformScheduledTasks(), - contains(new TransformScheduledTask(transformId, frequency, 0L, 0, 5000, listener)) + contains(new TransformScheduledTask(transformId, fiveSeconds, 0L, 0, 5000, listener)) ); assertThat(events, hasSize(1)); - for (int i = 0; i < frequencySeconds; ++i) { + for (int i = 0; i < 5; ++i) { transformScheduler.processScheduledTasks(); assertThat( transformScheduler.getTransformScheduledTasks(), - contains(new TransformScheduledTask(transformId, frequency, 0L, 0, 5000, listener)) + contains(new TransformScheduledTask(transformId, fiveSeconds, 0L, 0, 5000, listener)) ); assertThat(events, hasSize(1)); clock.advanceTimeBy(Duration.ofMillis(1001)); } assertThat(clock.instant(), is(equalTo(Instant.ofEpochMilli(5005)))); - for (int i = 0; i < frequencySeconds; ++i) { + for (int i = 0; i < 5; ++i) { transformScheduler.processScheduledTasks(); assertThat( transformScheduler.getTransformScheduledTasks(), - contains(new TransformScheduledTask(transformId, frequency, 5005L, 0, 10005, listener)) + contains(new TransformScheduledTask(transformId, fiveSeconds, 5005L, 0, 10005, listener)) ); assertThat(events, hasSize(2)); clock.advanceTimeBy(Duration.ofMillis(1001)); } assertThat(clock.instant(), is(equalTo(Instant.ofEpochMilli(10010)))); - for (int i = 0; i < frequencySeconds; ++i) { + for (int i = 0; i < 5; ++i) { transformScheduler.processScheduledTasks(); assertThat( transformScheduler.getTransformScheduledTasks(), - contains(new TransformScheduledTask(transformId, frequency, 10010L, 0, 15010, listener)) + contains(new TransformScheduledTask(transformId, fiveSeconds, 10010L, 0, 15010, listener)) ); assertThat(events, hasSize(3)); clock.advanceTimeBy(Duration.ofMillis(1001)); @@ -128,7 +138,7 @@ public void testSchedulingWithFailures() { CopyOnWriteArrayList events = new CopyOnWriteArrayList<>(); TransformScheduler.Listener listener = events::add; - TransformScheduler transformScheduler = new TransformScheduler(clock, threadPool, SETTINGS); + TransformScheduler transformScheduler = new TransformScheduler(clock, threadPool, SETTINGS, TimeValue.ZERO); transformScheduler.registerTransform(transformTaskParams, listener); assertThat( transformScheduler.getTransformScheduledTasks(), @@ -180,7 +190,7 @@ public void testScheduleNow() { CopyOnWriteArrayList events = new CopyOnWriteArrayList<>(); TransformScheduler.Listener listener = events::add; - TransformScheduler transformScheduler = new TransformScheduler(clock, threadPool, SETTINGS); + TransformScheduler transformScheduler = new TransformScheduler(clock, threadPool, SETTINGS, TimeValue.ZERO); transformScheduler.registerTransform(transformTaskParams, listener); assertThat( transformScheduler.getTransformScheduledTasks(), @@ -230,7 +240,7 @@ public void testConcurrentProcessing() throws Exception { CopyOnWriteArrayList events = new CopyOnWriteArrayList<>(); TransformScheduler.Listener listener = events::add; - TransformScheduler transformScheduler = new TransformScheduler(clock, threadPool, SETTINGS); + TransformScheduler transformScheduler = new TransformScheduler(clock, threadPool, SETTINGS, TimeValue.ZERO); transformScheduler.registerTransform(transformTaskParams, listener); assertThat( transformScheduler.getTransformScheduledTasks(), @@ -267,7 +277,7 @@ public void testConcurrentModifications() { FakeClock clock = new FakeClock(Instant.ofEpochMilli(0)); CopyOnWriteArrayList events = new CopyOnWriteArrayList<>(); - TransformScheduler transformScheduler = new TransformScheduler(clock, threadPool, SETTINGS); + TransformScheduler transformScheduler = new TransformScheduler(clock, threadPool, SETTINGS, TimeValue.ZERO); TransformScheduler.Listener taskModifyingListener = new TransformScheduler.Listener() { private boolean firstTime = true; @@ -309,7 +319,7 @@ public void testSchedulingWithSystemClock() throws Exception { Clock clock = Clock.systemUTC(); CopyOnWriteArrayList events = new CopyOnWriteArrayList<>(); - TransformScheduler transformScheduler = new TransformScheduler(clock, threadPool, SETTINGS); + TransformScheduler transformScheduler = new TransformScheduler(clock, threadPool, SETTINGS, TimeValue.ZERO); transformScheduler.start(); transformScheduler.registerTransform(transformTaskParams, events::add); assertThat(events, hasSize(1)); @@ -334,7 +344,7 @@ public void testScheduleNowWithSystemClock() throws Exception { Clock clock = Clock.systemUTC(); CopyOnWriteArrayList events = new CopyOnWriteArrayList<>(); - TransformScheduler transformScheduler = new TransformScheduler(clock, threadPool, SETTINGS); + TransformScheduler transformScheduler = new TransformScheduler(clock, threadPool, SETTINGS, TimeValue.ZERO); transformScheduler.start(); transformScheduler.registerTransform(transformTaskParams, events::add); assertThat(events, hasSize(1)); @@ -391,7 +401,7 @@ public void testRegisterMultipleTransforms() { CopyOnWriteArrayList events = new CopyOnWriteArrayList<>(); TransformScheduler.Listener listener = events::add; - TransformScheduler transformScheduler = new TransformScheduler(clock, threadPool, SETTINGS); + TransformScheduler transformScheduler = new TransformScheduler(clock, threadPool, SETTINGS, TimeValue.ZERO); transformScheduler.registerTransform(transformTaskParams1, listener); transformScheduler.registerTransform(transformTaskParams2, listener); transformScheduler.registerTransform(transformTaskParams3, listener); @@ -421,7 +431,7 @@ public void testMultipleTransformsEligibleForProcessingAtOnce() { CopyOnWriteArrayList events = new CopyOnWriteArrayList<>(); TransformScheduler.Listener listener = events::add; - TransformScheduler transformScheduler = new TransformScheduler(clock, threadPool, SETTINGS); + TransformScheduler transformScheduler = new TransformScheduler(clock, threadPool, SETTINGS, TimeValue.ZERO); transformScheduler.registerTransform(transformTaskParams1, listener); transformScheduler.registerTransform(transformTaskParams2, listener); transformScheduler.registerTransform(transformTaskParams3, listener); diff --git a/x-pack/plugin/vector-tile/src/main/java/org/elasticsearch/xpack/vectortile/rest/RestVectorTileAction.java b/x-pack/plugin/vector-tile/src/main/java/org/elasticsearch/xpack/vectortile/rest/RestVectorTileAction.java index f3332cb50e27b..fe6a0b93ca7cd 100644 --- a/x-pack/plugin/vector-tile/src/main/java/org/elasticsearch/xpack/vectortile/rest/RestVectorTileAction.java +++ b/x-pack/plugin/vector-tile/src/main/java/org/elasticsearch/xpack/vectortile/rest/RestVectorTileAction.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchResponseSections; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.geo.GeoBoundingBox; @@ -147,21 +146,14 @@ public RestResponse buildResponse(SearchResponse searchResponse) throws Exceptio .collect(Collectors.toList()) ); final SearchResponse meta = new SearchResponse( - new SearchResponseSections( - new SearchHits( - SearchHits.EMPTY, - searchResponse.getHits().getTotalHits(), - searchResponse.getHits().getMaxScore() - ), // remove actual hits - aggsWithoutGridAndBounds, - searchResponse.getSuggest(), - searchResponse.isTimedOut(), - searchResponse.isTerminatedEarly(), - searchResponse.getProfileResults() == null - ? null - : new SearchProfileResults(searchResponse.getProfileResults()), - searchResponse.getNumReducePhases() - ), + // remove actual hits + SearchHits.empty(searchResponse.getHits().getTotalHits(), searchResponse.getHits().getMaxScore()), + aggsWithoutGridAndBounds, + searchResponse.getSuggest(), + searchResponse.isTimedOut(), + searchResponse.isTerminatedEarly(), + searchResponse.getProfileResults() == null ? null : new SearchProfileResults(searchResponse.getProfileResults()), + searchResponse.getNumReducePhases(), searchResponse.getScrollId(), searchResponse.getTotalShards(), searchResponse.getSuccessfulShards(), diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookTokenIntegrationTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookTokenIntegrationTests.java index 5797541b72d98..7da2c5b718356 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookTokenIntegrationTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookTokenIntegrationTests.java @@ -7,8 +7,8 @@ package org.elasticsearch.xpack.watcher.actions.webhook; -import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsAction; import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsRequest; +import org.elasticsearch.action.admin.cluster.node.reload.TransportNodesReloadSecureSettingsAction; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.KeyStoreWrapper; import org.elasticsearch.common.settings.MockSecureSettings; @@ -100,8 +100,12 @@ public void testWebhook() throws Exception { } // Reload the keystore to load the new settings NodesReloadSecureSettingsRequest reloadReq = new NodesReloadSecureSettingsRequest(); - reloadReq.setSecureStorePassword(new SecureString("".toCharArray())); - client().execute(NodesReloadSecureSettingsAction.INSTANCE, reloadReq).get(); + try { + reloadReq.setSecureStorePassword(new SecureString("".toCharArray())); + client().execute(TransportNodesReloadSecureSettingsAction.TYPE, reloadReq).get(); + } finally { + reloadReq.decRef(); + } webServer.enqueue(new MockResponse().setResponseCode(200).setBody("body")); HttpRequestTemplate.Builder builder = HttpRequestTemplate.builder("localhost", webServer.getPort()) diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/condition/CompareConditionSearchTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/condition/CompareConditionSearchTests.java index dbb7b7d93c2e3..f02b3f865adf0 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/condition/CompareConditionSearchTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/condition/CompareConditionSearchTests.java @@ -16,7 +16,6 @@ import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; import org.elasticsearch.xpack.core.watcher.watch.Payload; @@ -105,17 +104,14 @@ public void testExecuteAccessHits() throws Exception { hit.score(1f); hit.shard(new SearchShardTarget("a", new ShardId("a", "indexUUID", 0), null)); - InternalSearchResponse internalSearchResponse = new InternalSearchResponse( + SearchResponse response = new SearchResponse( new SearchHits(new SearchHit[] { hit }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1f), null, null, - null, false, false, - 1 - ); - SearchResponse response = new SearchResponse( - internalSearchResponse, + null, + 1, "", 3, 3, diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStore.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStore.java index d2b38f4b11ef8..6775dca424bf1 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStore.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStore.java @@ -167,12 +167,15 @@ public Collection findTriggeredWatches(Collection watches } SearchScrollRequest request = new SearchScrollRequest(response.getScrollId()); request.scroll(scrollTimeout); + response.decRef(); response = client.searchScroll(request).actionGet(defaultSearchTimeout); } } finally { if (response != null) { + final String scrollId = response.getScrollId(); + response.decRef(); ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); - clearScrollRequest.addScrollId(response.getScrollId()); + clearScrollRequest.addScrollId(scrollId); client.clearScroll(clearScrollRequest).actionGet(scrollTimeout); } } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/http/ExecutableHttpInput.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/http/ExecutableHttpInput.java index c04026bbdbbd2..c0581d012dac9 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/http/ExecutableHttpInput.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/http/ExecutableHttpInput.java @@ -11,7 +11,6 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; @@ -24,7 +23,6 @@ import org.elasticsearch.xpack.watcher.support.Variables; import org.elasticsearch.xpack.watcher.support.XContentFilterKeysUtils; -import java.io.InputStream; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -90,9 +88,11 @@ HttpInput.Result doExecute(WatchExecutionContext ctx, HttpRequest request) throw if (contentType != null) { // EMPTY is safe here because we never use namedObject try ( - InputStream stream = response.body().streamInput(); - XContentParser parser = contentType.xContent() - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream) + XContentParser parser = XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG, + response.body(), + contentType + ) ) { if (input.getExtractKeys() != null) { payloadMap.putAll(XContentFilterKeysUtils.filterMapOrdered(input.getExtractKeys(), parser)); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/ReportingAttachmentParser.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/ReportingAttachmentParser.java index db1a9ed9f8c16..b43d462b71516 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/ReportingAttachmentParser.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/ReportingAttachmentParser.java @@ -17,12 +17,12 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.json.JsonXContent; +import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; import org.elasticsearch.xpack.core.watcher.watch.Payload; import org.elasticsearch.xpack.watcher.common.http.BasicAuth; @@ -38,7 +38,6 @@ import org.elasticsearch.xpack.watcher.support.Variables; import java.io.IOException; -import java.io.InputStream; import java.io.UncheckedIOException; import java.util.ArrayList; import java.util.Arrays; @@ -358,11 +357,10 @@ private HttpResponse requestReportGeneration(String watchId, String attachmentId private static String extractIdFromJson(String watchId, String attachmentId, BytesReference body) throws IOException { // EMPTY is safe here becaus we never call namedObject try ( - InputStream stream = body.streamInput(); - XContentParser parser = JsonXContent.jsonXContent.createParser( - NamedXContentRegistry.EMPTY, - LoggingDeprecationHandler.INSTANCE, - stream + XContentParser parser = XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG, + body, + XContentType.JSON ) ) { KibanaReportingPayload payload = new KibanaReportingPayload(); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/jira/JiraAccount.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/jira/JiraAccount.java index 2ba4506c9a2a4..74fe444f2dfdd 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/jira/JiraAccount.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/jira/JiraAccount.java @@ -13,8 +13,8 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Booleans; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; @@ -28,7 +28,6 @@ import org.elasticsearch.xpack.watcher.common.http.Scheme; import java.io.IOException; -import java.io.InputStream; import java.io.UncheckedIOException; import java.net.URI; import java.net.URISyntaxException; @@ -83,9 +82,11 @@ public JiraAccount(String name, Settings settings, HttpClient httpClient) { settings.getAsSettings(ISSUE_DEFAULTS_SETTING).toXContent(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); try ( - InputStream stream = BytesReference.bytes(builder).streamInput(); - XContentParser parser = XContentType.JSON.xContent() - .createParser(new NamedXContentRegistry(Collections.emptyList()), LoggingDeprecationHandler.INSTANCE, stream) + XContentParser parser = XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG, + BytesReference.bytes(builder), + XContentType.JSON + ) ) { this.issueDefaults = Collections.unmodifiableMap(parser.map()); } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/jira/JiraIssue.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/jira/JiraIssue.java index 1f3a59efcfc6e..696f43dcb574c 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/jira/JiraIssue.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/jira/JiraIssue.java @@ -9,13 +9,13 @@ import org.apache.http.HttpStatus; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.json.JsonXContent; +import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.watcher.actions.jira.JiraAction; import org.elasticsearch.xpack.watcher.common.http.HttpRequest; import org.elasticsearch.xpack.watcher.common.http.HttpResponse; @@ -145,11 +145,10 @@ static String resolveFailureReason(HttpResponse response) { final List errors = new ArrayList<>(); // EMPTY is safe here because we never call namedObject try ( - InputStream stream = response.body().streamInput(); - XContentParser parser = JsonXContent.jsonXContent.createParser( - NamedXContentRegistry.EMPTY, - LoggingDeprecationHandler.INSTANCE, - stream + XContentParser parser = XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG, + response.body(), + XContentType.JSON ) ) { XContentParser.Token token = parser.currentToken(); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/pagerduty/IncidentEventContext.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/pagerduty/IncidentEventContext.java index acb76709b647d..b56ad278d0b2a 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/pagerduty/IncidentEventContext.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/pagerduty/IncidentEventContext.java @@ -95,103 +95,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder.endObject(); } - public static IncidentEventContext parse(XContentParser parser) throws IOException { - Type type = null; - String href = null; - String text = null; - String src = null; - String alt = null; - - String currentFieldName = null; - XContentParser.Token token; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (Strings.hasLength(currentFieldName)) { - if (XField.TYPE.match(currentFieldName, parser.getDeprecationHandler())) { - try { - type = Type.valueOf(parser.text().toUpperCase(Locale.ROOT)); - } catch (IllegalArgumentException e) { - String msg = "could not parse trigger incident event context. unknown context type [{}]"; - throw new ElasticsearchParseException(msg, parser.text()); - } - } else { - if (XField.HREF.match(currentFieldName, parser.getDeprecationHandler())) { - href = parser.text(); - } else if (XField.TEXT.match(currentFieldName, parser.getDeprecationHandler())) { - text = parser.text(); - } else if (XField.SRC.match(currentFieldName, parser.getDeprecationHandler())) { - src = parser.text(); - } else if (XField.ALT.match(currentFieldName, parser.getDeprecationHandler())) { - alt = parser.text(); - } else { - String msg = "could not parse trigger incident event context. unknown field [{}]"; - throw new ElasticsearchParseException(msg, currentFieldName); - } - } - } - } - - return createAndValidateTemplate(type, href, src, alt, text); - } - - private static IncidentEventContext createAndValidateTemplate(Type type, String href, String src, String alt, String text) { - if (type == null) { - throw new ElasticsearchParseException( - "could not parse trigger incident event context. missing required field [{}]", - XField.TYPE.getPreferredName() - ); - } - - switch (type) { - case LINK -> { - if (href == null) { - throw new ElasticsearchParseException( - "could not parse trigger incident event context. missing required field " + "[{}] for [{}] context", - XField.HREF.getPreferredName(), - Type.LINK.name().toLowerCase(Locale.ROOT) - ); - } - if (src != null) { - throw new ElasticsearchParseException( - "could not parse trigger incident event context. unexpected field [{}] for " + "[{}] context", - XField.SRC.getPreferredName(), - Type.LINK.name().toLowerCase(Locale.ROOT) - ); - } - if (alt != null) { - throw new ElasticsearchParseException( - "could not parse trigger incident event context. unexpected field [{}] for " + "[{}] context", - XField.ALT.getPreferredName(), - Type.LINK.name().toLowerCase(Locale.ROOT) - ); - } - return link(href, text); - } - case IMAGE -> { - if (src == null) { - throw new ElasticsearchParseException( - "could not parse trigger incident event context. missing required field " + "[{}] for [{}] context", - XField.SRC.getPreferredName(), - Type.IMAGE.name().toLowerCase(Locale.ROOT) - ); - } - if (text != null) { - throw new ElasticsearchParseException( - "could not parse trigger incident event context. unexpected field [{}] for " + "[{}] context", - XField.TEXT.getPreferredName(), - Type.IMAGE.name().toLowerCase(Locale.ROOT) - ); - } - return image(src, href, alt); - } - default -> throw new ElasticsearchParseException( - "could not parse trigger incident event context. unknown context type [{}]", - type - ); - } - } - public static class Template implements ToXContentObject { final Type type; diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/pagerduty/SentEvent.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/pagerduty/SentEvent.java index 206bddf1459cb..566e3650e9ca9 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/pagerduty/SentEvent.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/pagerduty/SentEvent.java @@ -8,13 +8,13 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.json.JsonXContent; +import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherParams; import org.elasticsearch.xpack.watcher.actions.pagerduty.PagerDutyAction; import org.elasticsearch.xpack.watcher.common.http.HttpRequest; @@ -119,10 +119,9 @@ private static String resolveFailureReason(HttpResponse response) { // lets first try to parse the error response in the body // based on https://developer.pagerduty.com/documentation/rest/errors try ( - InputStream stream = response.body().streamInput(); - XContentParser parser = JsonXContent.jsonXContent + XContentParser parser = XContentHelper // EMPTY is safe here because we never call namedObject - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream) + .createParserNotCompressed(LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG, response.body(), XContentType.JSON) ) { parser.nextToken(); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/search/WatcherSearchTemplateService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/search/WatcherSearchTemplateService.java index 195f99e127e3e..a4c0723455d2d 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/search/WatcherSearchTemplateService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/search/WatcherSearchTemplateService.java @@ -16,7 +16,6 @@ import org.elasticsearch.script.TemplateScript; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.xcontent.NamedXContentRegistry; -import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; import org.elasticsearch.xpack.core.watcher.watch.Payload; @@ -24,7 +23,6 @@ import org.elasticsearch.xpack.watcher.support.Variables; import java.io.IOException; -import java.io.InputStream; import java.util.Map; /** @@ -69,9 +67,11 @@ public SearchRequest toSearchRequest(WatcherSearchTemplateRequest request) throw BytesReference source = request.getSearchSource(); if (source != null && source.length() > 0) { try ( - InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentHelper.xContentType(source)) - .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, stream) + XContentParser parser = XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG.withRegistry(xContentRegistry), + source, + XContentHelper.xContentType(source) + ) ) { sourceBuilder.parseXContent(parser, true); searchRequest.source(sourceBuilder); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/watch/WatchParser.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/watch/WatchParser.java index 054645c4197a8..225b4c5d57d65 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/watch/WatchParser.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/watch/WatchParser.java @@ -11,9 +11,9 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.watcher.actions.ActionRegistry; @@ -36,7 +36,6 @@ import org.elasticsearch.xpack.watcher.trigger.TriggerService; import java.io.IOException; -import java.io.InputStream; import java.time.Clock; import java.time.ZoneOffset; import java.time.ZonedDateTime; @@ -156,9 +155,8 @@ private Watch parse( } // EMPTY is safe here because we never use namedObject try ( - InputStream stream = source.streamInput(); WatcherXContentParser parser = new WatcherXContentParser( - xContentType.xContent().createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream), + XContentHelper.createParserNotCompressed(LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG, source, xContentType), now, withSecrets ? cryptoService : null, allowRedactedPasswords diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java index 1dede3f4e135c..c2ed68d8fa1bd 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java @@ -16,7 +16,6 @@ import org.elasticsearch.action.search.ClearScrollResponse; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchResponseSections; import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.search.TransportClearScrollAction; @@ -43,6 +42,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -172,29 +172,21 @@ void stopExecutor() {} return null; }).when(client).execute(eq(RefreshAction.INSTANCE), any(RefreshRequest.class), anyActionListener()); - // empty scroll response, no further scrolling needed - SearchResponseSections scrollSearchSections = new SearchResponseSections( - SearchHits.EMPTY_WITH_TOTAL_HITS, - null, - null, - false, - false, - null, - 1 - ); - SearchResponse scrollSearchResponse = new SearchResponse( - scrollSearchSections, - "scrollId", - 1, - 1, - 0, - 10, - ShardSearchFailure.EMPTY_ARRAY, - SearchResponse.Clusters.EMPTY - ); doAnswer(invocation -> { ActionListener listener = (ActionListener) invocation.getArguments()[2]; - listener.onResponse(scrollSearchResponse); + // empty scroll response, no further scrolling needed + ActionListener.respondAndRelease( + listener, + SearchResponseUtils.emptyWithTotalHits( + "scrollId", + 1, + 1, + 0, + 10, + ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY + ) + ); return null; }).when(client).execute(eq(TransportSearchScrollAction.TYPE), any(SearchScrollRequest.class), anyActionListener()); @@ -221,20 +213,27 @@ void stopExecutor() {} when(parser.parse(eq(id), eq(true), any(), eq(XContentType.JSON), anyLong(), anyLong())).thenReturn(watch); } SearchHits searchHits = new SearchHits(hits, new TotalHits(count, TotalHits.Relation.EQUAL_TO), 1.0f); - SearchResponseSections sections = new SearchResponseSections(searchHits, null, null, false, false, null, 1); - SearchResponse searchResponse = new SearchResponse( - sections, - "scrollId", - 1, - 1, - 0, - 10, - ShardSearchFailure.EMPTY_ARRAY, - SearchResponse.Clusters.EMPTY - ); doAnswer(invocation -> { ActionListener listener = (ActionListener) invocation.getArguments()[2]; - listener.onResponse(searchResponse); + ActionListener.respondAndRelease( + listener, + new SearchResponse( + searchHits, + null, + null, + false, + false, + null, + 1, + "scrollId", + 1, + 1, + 0, + 10, + ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY + ) + ); return null; }).when(client).execute(eq(TransportSearchAction.TYPE), any(SearchRequest.class), anyActionListener()); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/ScriptConditionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/ScriptConditionTests.java index 89ddb2c0011bb..fa0dc89fd5106 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/ScriptConditionTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/ScriptConditionTests.java @@ -21,7 +21,7 @@ import org.elasticsearch.script.ScriptMetadata; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptType; -import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -95,8 +95,7 @@ public void init() throws IOException { public void testExecute() throws Exception { ScriptCondition condition = new ScriptCondition(mockScript("ctx.payload.hits.total.value > 1"), scriptService); - SearchResponse response = new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, + SearchResponse response = SearchResponseUtils.emptyWithTotalHits( "", 3, 3, @@ -121,8 +120,7 @@ public void testExecuteMergedParams() throws Exception { singletonMap("threshold", 1) ); ScriptCondition executable = new ScriptCondition(script, scriptService); - SearchResponse response = new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, + SearchResponse response = SearchResponseUtils.emptyWithTotalHits( "", 3, 3, @@ -147,8 +145,7 @@ public void testParserValid() throws Exception { parser.nextToken(); ExecutableCondition executable = ScriptCondition.parse(scriptService, "_watch", parser); - SearchResponse response = new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, + SearchResponse response = SearchResponseUtils.emptyWithTotalHits( "", 3, 3, @@ -223,8 +220,7 @@ public void testScriptConditionParser_badLang() throws Exception { public void testScriptConditionThrowException() throws Exception { ScriptCondition condition = new ScriptCondition(mockScript("null.foo"), scriptService); - SearchResponse response = new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, + SearchResponse response = SearchResponseUtils.emptyWithTotalHits( "", 3, 3, @@ -247,8 +243,7 @@ public void testScriptConditionAccessCtx() throws Exception { mockScript("ctx.trigger.scheduled_time.toInstant().toEpochMill() < new Date().time"), scriptService ); - SearchResponse response = new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, + SearchResponse response = SearchResponseUtils.emptyWithTotalHits( "", 3, 3, diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java index 0f47df9dff12b..60fa2581b4218 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java @@ -46,8 +46,8 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.search.SearchShardTarget; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.ToXContent; @@ -210,13 +210,16 @@ public void testFindTriggeredWatchesGoodCase() { SearchResponse searchResponse1 = mock(SearchResponse.class); when(searchResponse1.getSuccessfulShards()).thenReturn(1); when(searchResponse1.getTotalShards()).thenReturn(1); - BytesArray source = new BytesArray("{}"); - SearchHit hit = new SearchHit(0, "first_foo"); - hit.version(1L); - hit.shard(new SearchShardTarget("_node_id", new ShardId(index, 0), null)); - hit.sourceRef(source); - SearchHits hits = new SearchHits(new SearchHit[] { hit }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0f); - when(searchResponse1.getHits()).thenReturn(hits); + final BytesArray source = new BytesArray("{}"); + { + final SearchHit hit = new SearchHit(0, "first_foo"); + hit.version(1L); + hit.shard(new SearchShardTarget("_node_id", new ShardId(index, 0), null)); + hit.sourceRef(source); + when(searchResponse1.getHits()).thenReturn( + new SearchHits(new SearchHit[] { hit }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0f) + ); + } when(searchResponse1.getScrollId()).thenReturn("_scrollId"); doAnswer(invocation -> { @SuppressWarnings("unchecked") @@ -226,40 +229,36 @@ public void testFindTriggeredWatchesGoodCase() { }).when(client).execute(eq(TransportSearchAction.TYPE), any(), any()); // First return a scroll response with a single hit and then with no hits - hit = new SearchHit(0, "second_foo"); - hit.version(1L); - hit.shard(new SearchShardTarget("_node_id", new ShardId(index, 0), null)); - hit.sourceRef(source); - hits = new SearchHits(new SearchHit[] { hit }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0f); - SearchResponse searchResponse2 = new SearchResponse( - new InternalSearchResponse(hits, null, null, null, false, null, 1), - "_scrollId1", - 1, - 1, - 0, - 1, - null, - null - ); - SearchResponse searchResponse3 = new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, - "_scrollId2", - 1, - 1, - 0, - 1, - null, - null - ); - doAnswer(invocation -> { SearchScrollRequest request = (SearchScrollRequest) invocation.getArguments()[1]; @SuppressWarnings("unchecked") ActionListener listener = (ActionListener) invocation.getArguments()[2]; if (request.scrollId().equals("_scrollId")) { - listener.onResponse(searchResponse2); + final var hit2 = new SearchHit(0, "second_foo"); + hit2.version(1L); + hit2.shard(new SearchShardTarget("_node_id", new ShardId(index, 0), null)); + hit2.sourceRef(source); + ActionListener.respondAndRelease( + listener, + new SearchResponse( + new SearchHits(new SearchHit[] { hit2 }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0f), + null, + null, + false, + null, + null, + 1, + "_scrollId1", + 1, + 1, + 0, + 1, + null, + null + ) + ); } else if (request.scrollId().equals("_scrollId1")) { - listener.onResponse(searchResponse3); + ActionListener.respondAndRelease(listener, SearchResponseUtils.emptyWithTotalHits("_scrollId2", 1, 1, 0, 1, null, null)); } else { listener.onFailure(new ElasticsearchException("test issue")); } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistryTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistryTests.java index e081063f47a7e..6d740bc5c5e4f 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistryTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistryTests.java @@ -7,8 +7,8 @@ package org.elasticsearch.xpack.watcher.support; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.AdminClient; import org.elasticsearch.client.internal.Client; @@ -43,7 +43,7 @@ import org.elasticsearch.xpack.core.ilm.LifecycleType; import org.elasticsearch.xpack.core.ilm.OperationMode; import org.elasticsearch.xpack.core.ilm.TimeseriesLifecycleType; -import org.elasticsearch.xpack.core.ilm.action.PutLifecycleAction; +import org.elasticsearch.xpack.core.ilm.action.ILMActions; import org.elasticsearch.xpack.core.watcher.support.WatcherIndexTemplateRegistryField; import org.elasticsearch.xpack.watcher.Watcher; import org.junit.Before; @@ -123,19 +123,19 @@ public void testThatNonExistingTemplatesAreAddedImmediately() { ClusterChangedEvent event = createClusterChangedEvent(Collections.emptyMap(), nodes); registry.clusterChanged(event); - ArgumentCaptor argumentCaptor = ArgumentCaptor.forClass( - PutComposableIndexTemplateAction.Request.class + ArgumentCaptor argumentCaptor = ArgumentCaptor.forClass( + TransportPutComposableIndexTemplateAction.Request.class ); - verify(client, times(1)).execute(same(PutComposableIndexTemplateAction.INSTANCE), argumentCaptor.capture(), any()); + verify(client, times(1)).execute(same(TransportPutComposableIndexTemplateAction.TYPE), argumentCaptor.capture(), any()); // now delete one template from the cluster state and lets retry Map existingTemplates = new HashMap<>(); existingTemplates.put(WatcherIndexTemplateRegistryField.HISTORY_TEMPLATE_NAME, INDEX_TEMPLATE_VERSION); ClusterChangedEvent newEvent = createClusterChangedEvent(existingTemplates, nodes); registry.clusterChanged(newEvent); - argumentCaptor = ArgumentCaptor.forClass(PutComposableIndexTemplateAction.Request.class); - verify(client, times(1)).execute(same(PutComposableIndexTemplateAction.INSTANCE), argumentCaptor.capture(), any()); - PutComposableIndexTemplateAction.Request req = argumentCaptor.getAllValues() + argumentCaptor = ArgumentCaptor.forClass(TransportPutComposableIndexTemplateAction.Request.class); + verify(client, times(1)).execute(same(TransportPutComposableIndexTemplateAction.TYPE), argumentCaptor.capture(), any()); + TransportPutComposableIndexTemplateAction.Request req = argumentCaptor.getAllValues() .stream() .filter(r -> r.name().equals(WatcherIndexTemplateRegistryField.HISTORY_TEMPLATE_NAME)) .findFirst() @@ -156,10 +156,10 @@ public void testThatNonExistingTemplatesAreAddedEvenWithILMUsageDisabled() { ); ClusterChangedEvent event = createClusterChangedEvent(Settings.EMPTY, Collections.emptyMap(), Collections.emptyMap(), nodes); registry.clusterChanged(event); - ArgumentCaptor argumentCaptor = ArgumentCaptor.forClass( - PutComposableIndexTemplateAction.Request.class + ArgumentCaptor argumentCaptor = ArgumentCaptor.forClass( + TransportPutComposableIndexTemplateAction.Request.class ); - verify(client, times(1)).execute(same(PutComposableIndexTemplateAction.INSTANCE), argumentCaptor.capture(), any()); + verify(client, times(1)).execute(same(TransportPutComposableIndexTemplateAction.TYPE), argumentCaptor.capture(), any()); // now delete one template from the cluster state and lets retry Map existingTemplates = new HashMap<>(); @@ -167,9 +167,9 @@ public void testThatNonExistingTemplatesAreAddedEvenWithILMUsageDisabled() { ClusterChangedEvent newEvent = createClusterChangedEvent(existingTemplates, nodes); registry.clusterChanged(newEvent); ArgumentCaptor captor = ArgumentCaptor.forClass(PutIndexTemplateRequest.class); - verify(client, times(1)).execute(same(PutComposableIndexTemplateAction.INSTANCE), argumentCaptor.capture(), any()); + verify(client, times(1)).execute(same(TransportPutComposableIndexTemplateAction.TYPE), argumentCaptor.capture(), any()); captor.getAllValues().forEach(req -> assertNull(req.settings().get("index.lifecycle.name"))); - verify(client, times(0)).execute(eq(PutLifecycleAction.INSTANCE), any(), any()); + verify(client, times(0)).execute(eq(ILMActions.PUT), any(), any()); } public void testThatNonExistingPoliciesAreAddedImmediately() { @@ -178,7 +178,7 @@ public void testThatNonExistingPoliciesAreAddedImmediately() { ClusterChangedEvent event = createClusterChangedEvent(Collections.emptyMap(), nodes); registry.clusterChanged(event); - verify(client, times(1)).execute(eq(PutLifecycleAction.INSTANCE), any(), any()); + verify(client, times(1)).execute(eq(ILMActions.PUT), any(), any()); } public void testPolicyAlreadyExists() { @@ -192,7 +192,7 @@ public void testPolicyAlreadyExists() { policyMap.put(policy.getName(), policy); ClusterChangedEvent event = createClusterChangedEvent(Collections.emptyMap(), policyMap, nodes); registry.clusterChanged(event); - verify(client, times(0)).execute(eq(PutLifecycleAction.INSTANCE), any(), any()); + verify(client, times(0)).execute(eq(ILMActions.PUT), any(), any()); } public void testNoPolicyButILMDisabled() { @@ -208,7 +208,7 @@ public void testNoPolicyButILMDisabled() { ); ClusterChangedEvent event = createClusterChangedEvent(Settings.EMPTY, Collections.emptyMap(), Collections.emptyMap(), nodes); registry.clusterChanged(event); - verify(client, times(0)).execute(eq(PutLifecycleAction.INSTANCE), any(), any()); + verify(client, times(0)).execute(eq(ILMActions.PUT), any(), any()); } public void testPolicyAlreadyExistsButDiffers() throws IOException { @@ -228,7 +228,7 @@ public void testPolicyAlreadyExistsButDiffers() throws IOException { policyMap.put(policy.getName(), different); ClusterChangedEvent event = createClusterChangedEvent(Collections.emptyMap(), policyMap, nodes); registry.clusterChanged(event); - verify(client, times(0)).execute(eq(PutLifecycleAction.INSTANCE), any(), any()); + verify(client, times(0)).execute(eq(ILMActions.PUT), any(), any()); } } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SearchInputTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SearchInputTests.java index d06ee606f31ce..172338d60bbe1 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SearchInputTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SearchInputTests.java @@ -22,8 +22,8 @@ import org.elasticsearch.script.ScriptEngine; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -91,8 +91,7 @@ public void setup() { public void testExecute() throws Exception { ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(SearchRequest.class); PlainActionFuture searchFuture = new PlainActionFuture<>(); - SearchResponse searchResponse = new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, + SearchResponse searchResponse = SearchResponseUtils.emptyWithTotalHits( "", 1, 1, @@ -132,8 +131,7 @@ public void testExecute() throws Exception { public void testDifferentSearchType() throws Exception { ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(SearchRequest.class); PlainActionFuture searchFuture = new PlainActionFuture<>(); - SearchResponse searchResponse = new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, + SearchResponse searchResponse = SearchResponseUtils.emptyWithTotalHits( "", 1, 1, @@ -187,8 +185,7 @@ public void testParserValid() throws Exception { public void testThatEmptyRequestBodyWorks() throws Exception { ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(SearchRequest.class); PlainActionFuture searchFuture = new PlainActionFuture<>(); - SearchResponse searchResponse = new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, + SearchResponse searchResponse = SearchResponseUtils.emptyWithTotalHits( "", 1, 1, diff --git a/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java b/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java index a17cb7474a681..cdb14c348bbf8 100644 --- a/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java +++ b/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java @@ -60,6 +60,7 @@ import org.elasticsearch.index.mapper.LuceneDocument; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperBuilderContext; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperTestCase; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.MappingLookup; @@ -1217,7 +1218,7 @@ protected SyntheticSourceSupport syntheticSourceSupport(boolean ignoreMalformed) } @Override - protected Function loadBlockExpected() { + protected Function loadBlockExpected(MapperService mapper, String loaderFieldName) { return v -> ((BytesRef) v).utf8ToString(); } diff --git a/x-pack/plugin/write-load-forecaster/src/internalClusterTest/java/org/elasticsearch/xpack/writeloadforecaster/WriteLoadForecasterIT.java b/x-pack/plugin/write-load-forecaster/src/internalClusterTest/java/org/elasticsearch/xpack/writeloadforecaster/WriteLoadForecasterIT.java index d983747571b34..cb93725b320d1 100644 --- a/x-pack/plugin/write-load-forecaster/src/internalClusterTest/java/org/elasticsearch/xpack/writeloadforecaster/WriteLoadForecasterIT.java +++ b/x-pack/plugin/write-load-forecaster/src/internalClusterTest/java/org/elasticsearch/xpack/writeloadforecaster/WriteLoadForecasterIT.java @@ -12,7 +12,7 @@ import org.elasticsearch.action.admin.indices.stats.IndexShardStats; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.admin.indices.stats.ShardStats; -import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.datastreams.CreateDataStreamAction; import org.elasticsearch.action.index.IndexRequest; @@ -151,8 +151,8 @@ private void setUpDataStreamWriteDocsAndRollover(String dataStreamName, Settings assertAcked( client().execute( - PutComposableIndexTemplateAction.INSTANCE, - new PutComposableIndexTemplateAction.Request("my-template").indexTemplate( + TransportPutComposableIndexTemplateAction.TYPE, + new TransportPutComposableIndexTemplateAction.Request("my-template").indexTemplate( ComposableIndexTemplate.builder() .indexPatterns(List.of("logs-*")) .template(new Template(indexSettings, null, null)) diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/AbstractXpackFullClusterRestartTestCase.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/AbstractXpackFullClusterRestartTestCase.java index 0bc9101301a54..96acaaa5b41b4 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/AbstractXpackFullClusterRestartTestCase.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/AbstractXpackFullClusterRestartTestCase.java @@ -36,6 +36,7 @@ public abstract class AbstractXpackFullClusterRestartTestCase extends Parameteri .keystore("xpack.watcher.encryption_key", Resource.fromClasspath("system_key")) .keystore("xpack.security.transport.ssl.secure_key_passphrase", "testnode") .feature(FeatureFlag.TIME_SERIES_MODE) + .feature(FeatureFlag.FAILURE_STORE_ENABLED) .build(); public AbstractXpackFullClusterRestartTestCase(FullClusterRestartUpgradeStatus upgradeStatus) { diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index 3bff0027f8752..85126d9f1f12e 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.UpdateForV9; +import org.elasticsearch.index.mapper.FieldNamesFieldMapper; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.test.StreamsUtils; @@ -52,6 +53,8 @@ import java.util.stream.Collectors; import static org.elasticsearch.core.TimeValue.timeValueSeconds; +import static org.elasticsearch.test.MapMatcher.assertMap; +import static org.elasticsearch.test.MapMatcher.matchesMap; import static org.elasticsearch.upgrades.FullClusterRestartIT.assertNumHits; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; @@ -990,6 +993,65 @@ public void testDataStreams() throws Exception { assertNumHits("ds", 1, 1); } + /** + * Tests that a single document survives. Super basic smoke test. + */ + public void testDisableFieldNameField() throws IOException { + assumeTrue("can only disable field names field before 8.0", Version.fromString(getOldClusterVersion()).before(Version.V_8_0_0)); + String docLocation = "/nofnf/_doc/1"; + String doc = """ + { + "dv": "test", + "no_dv": "test" + }"""; + + if (isRunningAgainstOldCluster()) { + Request createIndex = new Request("PUT", "/nofnf"); + createIndex.setJsonEntity(""" + { + "settings": { + "index": { + "number_of_replicas": 0 + } + }, + "mappings": { + "_field_names": { "enabled": false }, + "properties": { + "dv": { "type": "keyword" }, + "no_dv": { "type": "keyword", "doc_values": false } + } + } + }"""); + createIndex.setOptions( + RequestOptions.DEFAULT.toBuilder() + .setWarningsHandler(warnings -> false == warnings.equals(List.of(FieldNamesFieldMapper.ENABLED_DEPRECATION_MESSAGE))) + ); + client().performRequest(createIndex); + + Request createDoc = new Request("PUT", docLocation); + createDoc.addParameter("refresh", "true"); + createDoc.setJsonEntity(doc); + client().performRequest(createDoc); + } + + Request getRequest = new Request("GET", docLocation); + assertThat(toStr(client().performRequest(getRequest)), containsString(doc)); + + if (isRunningAgainstOldCluster() == false) { + Request esql = new Request("POST", "_query"); + esql.setJsonEntity(""" + { + "query": "FROM nofnf | LIMIT 1" + }"""); + // {"columns":[{"name":"dv","type":"keyword"},{"name":"no_dv","type":"keyword"}],"values":[["test",null]]} + assertMap( + entityAsMap(client().performRequest(esql)), + matchesMap().entry("columns", List.of(Map.of("name", "dv", "type", "keyword"), Map.of("name", "no_dv", "type", "keyword"))) + .entry("values", List.of(List.of("test", "test"))) + ); + } + } + private static void createComposableTemplate(RestClient client, String templateName, String indexPattern) throws IOException { StringEntity templateJSON = new StringEntity(Strings.format(""" { diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java index b2594eaf02ea4..c6ef15bace343 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java @@ -10,6 +10,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import org.apache.http.util.EntityUtils; +import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -36,6 +37,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103808") public class MLModelDeploymentFullClusterRestartIT extends AbstractXpackFullClusterRestartTestCase { // See PyTorchModelIT for how this model was created diff --git a/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/build.gradle b/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/build.gradle index cce18a4bd1579..54b455d483b9a 100644 --- a/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/build.gradle +++ b/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/build.gradle @@ -8,7 +8,7 @@ apply plugin: 'elasticsearch.rest-resources' restResources { restApi { include '_common', 'bulk', 'field_caps', 'security', 'search', 'clear_scroll', 'scroll', 'async_search', 'cluster', - 'indices', 'open_point_in_time', 'close_point_in_time', 'terms_enum' + 'indices', 'open_point_in_time', 'close_point_in_time', 'terms_enum', 'esql' } } @@ -23,6 +23,8 @@ def fulfillingCluster = testClusters.register('fulfilling-cluster') { module ':modules:data-streams' module ':x-pack:plugin:mapper-constant-keyword' module ':x-pack:plugin:async-search' + module ':x-pack:plugin:ql' + module ':x-pack:plugin:esql' user username: "test_user", password: "x-pack-test-password" } @@ -34,6 +36,8 @@ def queryingCluster = testClusters.register('querying-cluster') { module ':modules:data-streams' module ':x-pack:plugin:mapper-constant-keyword' module ':x-pack:plugin:async-search' + module ':x-pack:plugin:ql' + module ':x-pack:plugin:esql' setting 'cluster.remote.connections_per_cluster', "1" user username: "test_user", password: "x-pack-test-password" diff --git a/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/src/test/resources/rest-api-spec/test/fulfilling_cluster/10_basic.yml b/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/src/test/resources/rest-api-spec/test/fulfilling_cluster/10_basic.yml index e91a87b65c013..36002f3cde470 100644 --- a/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/src/test/resources/rest-api-spec/test/fulfilling_cluster/10_basic.yml +++ b/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/src/test/resources/rest-api-spec/test/fulfilling_cluster/10_basic.yml @@ -23,7 +23,7 @@ setup: "indices": [ { "names": ["single_doc_index", "secure_alias", "test_index", "aliased_test_index", "field_caps_index_1", - "field_caps_index_3", "point_in_time_index", "simple-data-stream1", "simple-data-stream2"], + "field_caps_index_3", "point_in_time_index", "simple-data-stream1", "simple-data-stream2", "esql_index"], "privileges": ["read", "read_cross_cluster"] } ] @@ -46,7 +46,7 @@ setup: "indices": [ { "names": ["single_doc_index", "secure_alias", "test_index", "aliased_test_index", "field_caps_index_1", - "field_caps_index_3", "point_in_time_index", "simple-data-stream1", "simple-data-stream2"], + "field_caps_index_3", "point_in_time_index", "simple-data-stream1", "simple-data-stream2", "esql_index"], "privileges": ["read", "read_cross_cluster"] } ] @@ -429,3 +429,31 @@ setup: - '{"foo": "foo"}' - '{"index": {"_index": "terms_enum_index"}}' - '{"foo": "foobar"}' + + - do: + indices.create: + index: esql_index + body: + mappings: + properties: + since: + type: date + format: "yyyy-MM-dd" + cost: + type: long + tag: + type: keyword + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "esql_index"}}' + - '{"since" : "2023-01-01", "cost": 1000, "tag": "computer"}' + - '{"index": {"_index": "esql_index"}}' + - '{ "since" : "2023-01-02", "cost": 1200, "tag": "computer"}' + - '{"index": {"_index": "esql_index"}}' + - '{"since" : "2023-01-03", "cost": 450, "tag": "tablet"}' + - '{"index": {"_index": "esql_index"}}' + - '{"since" : "2023-01-04", "cost": 100, "tag": "headphone"}' + - '{"index": {"_index": "esql_index"}}' + - '{"since" : "2023-01-05", "cost": 20, "tag": "headphone"}' diff --git a/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/src/test/resources/rest-api-spec/test/querying_cluster/100_resolve_index.yml b/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/src/test/resources/rest-api-spec/test/querying_cluster/100_resolve_index.yml index 36ea0b65f2aa5..b9dbb0a070af4 100644 --- a/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/src/test/resources/rest-api-spec/test/querying_cluster/100_resolve_index.yml +++ b/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/src/test/resources/rest-api-spec/test/querying_cluster/100_resolve_index.yml @@ -26,23 +26,25 @@ - match: {indices.4.name: my_remote_cluster:closed_index} - match: {indices.4.aliases.0: aliased_closed_index} - match: {indices.4.attributes.0: closed} - - match: {indices.5.name: my_remote_cluster:field_caps_index_1} - - match: {indices.5.attributes.0: open} - - match: {indices.6.name: my_remote_cluster:field_caps_index_3} + - match: {indices.5.name: my_remote_cluster:esql_index } + - match: {indices.5.attributes.0: open } + - match: {indices.6.name: my_remote_cluster:field_caps_index_1} - match: {indices.6.attributes.0: open} - - match: {indices.7.name: my_remote_cluster:point_in_time_index } - - match: {indices.7.attributes.0: open } - - match: {indices.8.name: my_remote_cluster:secured_via_alias} - - match: {indices.8.attributes.0: open} - - match: {indices.9.name: my_remote_cluster:shared_index} + - match: {indices.7.name: my_remote_cluster:field_caps_index_3} + - match: {indices.7.attributes.0: open} + - match: {indices.8.name: my_remote_cluster:point_in_time_index } + - match: {indices.8.attributes.0: open } + - match: {indices.9.name: my_remote_cluster:secured_via_alias} - match: {indices.9.attributes.0: open} - - match: {indices.10.name: my_remote_cluster:single_doc_index} + - match: {indices.10.name: my_remote_cluster:shared_index} - match: {indices.10.attributes.0: open} - - match: {indices.11.name: my_remote_cluster:terms_enum_index } - - match: {indices.11.attributes.0: open } - - match: {indices.12.name: my_remote_cluster:test_index} - - match: {indices.12.aliases.0: aliased_test_index} - - match: {indices.12.attributes.0: open} + - match: {indices.11.name: my_remote_cluster:single_doc_index} + - match: {indices.11.attributes.0: open} + - match: {indices.12.name: my_remote_cluster:terms_enum_index } + - match: {indices.12.attributes.0: open } + - match: {indices.13.name: my_remote_cluster:test_index} + - match: {indices.13.aliases.0: aliased_test_index} + - match: {indices.13.attributes.0: open} - match: {aliases.0.name: my_remote_cluster:.security} - match: {aliases.0.indices.0: .security-7} - match: {aliases.1.name: my_remote_cluster:aliased_closed_index} diff --git a/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/src/test/resources/rest-api-spec/test/querying_cluster/10_basic.yml b/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/src/test/resources/rest-api-spec/test/querying_cluster/10_basic.yml index 4a5905a11feed..cbbfbe2372f3e 100644 --- a/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/src/test/resources/rest-api-spec/test/querying_cluster/10_basic.yml +++ b/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/src/test/resources/rest-api-spec/test/querying_cluster/10_basic.yml @@ -52,6 +52,7 @@ teardown: security.delete_role: name: "x_cluster_role" ignore: 404 + --- "Index data and search on the mixed cluster": @@ -236,6 +237,9 @@ teardown: - match: { aggregations.cluster.buckets.0.key: "local_cluster" } - match: { aggregations.cluster.buckets.0.doc_count: 5 } + - do: + indices.delete: + index: local_index --- "Add persistent remote cluster based on the preset cluster": - do: diff --git a/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/src/test/resources/rest-api-spec/test/querying_cluster/80_esql.yml b/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/src/test/resources/rest-api-spec/test/querying_cluster/80_esql.yml new file mode 100644 index 0000000000000..1894a26e80f33 --- /dev/null +++ b/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/src/test/resources/rest-api-spec/test/querying_cluster/80_esql.yml @@ -0,0 +1,143 @@ +--- +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + - do: + security.put_user: + username: "joe" + body: > + { + "password": "s3krit-password", + "roles" : [ "x_cluster_role" ] + } + - do: + security.put_role: + name: "x_cluster_role" + body: > + { + "cluster": [], + "indices": [ + { + "names": ["local_index", "esql_local"], + "privileges": ["read"] + } + ] + } + + - do: + security.put_user: + username: "remote" + body: > + { + "password": "s3krit-password", + "roles" : [ "remote_ccs" ] + } + - do: + security.put_role: + name: "remote_ccs" + body: > + { + } +--- +teardown: + - do: + security.delete_user: + username: "joe" + ignore: 404 + - do: + security.delete_role: + name: "x_cluster_role" + ignore: 404 + +--- +"Index data and search on the mixed cluster": + + - do: + indices.create: + index: esql_local + body: + mappings: + properties: + since: + type: date + format: "yyyy-MM-dd" + cost: + type: long + tag: + type: keyword + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "esql_local"}}' + - '{"since" : "2023-01-01", "cost": 750, "tag": "monitor"}' + - '{"index": {"_index": "esql_local"}}' + - '{ "since" : "2023-01-02", "cost": 2100, "tag": "laptop"}' + - '{"index": {"_index": "esql_local"}}' + - '{"since" : "2023-01-03", "cost": 250, "tag": "monitor"}' + - '{"index": {"_index": "esql_local"}}' + - '{"since" : "2023-01-04", "cost": 100, "tag": "tablet"}' + - '{"index": {"_index": "esql_local"}}' + - '{"since" : "2023-01-05", "cost": 50, "tag": "headphone"}' + + - do: + headers: { Authorization: "Basic am9lOnMza3JpdC1wYXNzd29yZA==" } + esql.query: + body: + query: 'FROM *:esql*,esql_* | STATS total = sum(cost) by tag | SORT tag | LIMIT 10' + + - match: {columns.0.name: "total"} + - match: {columns.0.type: "long"} + - match: {columns.1.name: "tag"} + - match: {columns.1.type: "keyword"} + + - match: {values.0.0: 2200} + - match: {values.0.1: "computer"} + - match: {values.1.0: 170} + - match: {values.1.1: "headphone"} + - match: {values.2.0: 2100 } + - match: {values.2.1: "laptop" } + - match: {values.3.0: 1000 } + - match: {values.3.1: "monitor" } + - match: {values.4.0: 550 } + - match: {values.4.1: "tablet" } + + - do: + headers: { Authorization: "Basic am9lOnMza3JpdC1wYXNzd29yZA==" } + esql.query: + body: + query: 'FROM *:esql*,esql_* [METADATA _index] | sort cost | KEEP _index, tag, cost | LIMIT 10' + filter: + range: + since: + gte: "2023-01-02" + lte: "2023-01-03" + format: "yyyy-MM-dd" + + - match: {columns.0.name: "_index"} + - match: {columns.0.type: "keyword"} + - match: {columns.1.name: "tag"} + - match: {columns.1.type: "keyword"} + - match: {columns.2.name: "cost" } + - match: {columns.2.type: "long" } + + - match: {values.0.0: "esql_local"} + - match: {values.0.1: "monitor"} + - match: {values.0.2: 250 } + - match: {values.1.0: "my_remote_cluster:esql_index" } + - match: {values.1.1: "tablet"} + - match: {values.1.2: 450 } + - match: {values.2.0: "my_remote_cluster:esql_index" } + - match: {values.2.1: "computer" } + - match: {values.2.2: 1200 } + - match: {values.3.0: "esql_local"} + - match: {values.3.1: "laptop" } + - match: {values.3.2: 2100 } + + - do: + indices.delete: + index: esql_local diff --git a/x-pack/qa/openldap-tests/build.gradle b/x-pack/qa/openldap-tests/build.gradle index 78a03c556bc11..02b2abad3726f 100644 --- a/x-pack/qa/openldap-tests/build.gradle +++ b/x-pack/qa/openldap-tests/build.gradle @@ -5,11 +5,8 @@ dependencies { testImplementation(testArtifact(project(xpackModule('core')))) testImplementation project(":x-pack:test:idp-fixture") testImplementation "junit:junit:${versions.junit}" - testImplementation "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" - testImplementation "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" } - tasks.named('test') { // test suite uses jks which is not supported in fips mode systemProperty 'tests.security.manager', 'false' diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/ILMHistoryManagedTemplateUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/ILMHistoryManagedTemplateUpgradeIT.java new file mode 100644 index 0000000000000..aa177474b81e8 --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/ILMHistoryManagedTemplateUpgradeIT.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.upgrades; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.test.rest.ObjectPath; + +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.is; + +public class ILMHistoryManagedTemplateUpgradeIT extends AbstractUpgradeTestCase { + + @SuppressWarnings("unchecked") + public void testEnsureHistoryManagedTemplateIsInstalledOnUpgradedVersion() throws Exception { + if (CLUSTER_TYPE.equals(ClusterType.UPGRADED)) { + assertBusy(() -> { + Request request = new Request("GET", "/_index_template/ilm-history-7"); + try { + Response response = client().performRequest(request); + Map responseMap = entityAsMap(response); + assertNotNull(responseMap); + + List> indexTemplates = (List>) responseMap.get("index_templates"); + assertThat(indexTemplates.size(), is(1)); + assertThat(ObjectPath.evaluate(indexTemplates.get(0), "name"), is("ilm-history-7")); + assertThat(ObjectPath.evaluate(indexTemplates.get(0), "index_template.index_patterns"), is(List.of("ilm-history-7*"))); + } catch (ResponseException e) { + // Not found is fine + assertThat( + "Unexpected failure getting templates: " + e.getResponse().getStatusLine(), + e.getResponse().getStatusLine().getStatusCode(), + is(404) + ); + } + }, 30, TimeUnit.SECONDS); + } + } +} diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SLMHistoryManagedTemplateUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SLMHistoryManagedTemplateUpgradeIT.java new file mode 100644 index 0000000000000..fed42c35cf5ce --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SLMHistoryManagedTemplateUpgradeIT.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.upgrades; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.test.rest.ObjectPath; + +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.is; + +public class SLMHistoryManagedTemplateUpgradeIT extends AbstractUpgradeTestCase { + + @SuppressWarnings("unchecked") + public void testEnsureHistoryManagedTemplateIsInstalledOnUpgradedVersion() throws Exception { + if (CLUSTER_TYPE.equals(ClusterType.UPGRADED)) { + assertBusy(() -> { + Request request = new Request("GET", "/_index_template/.slm-history-7"); + try { + Response response = client().performRequest(request); + Map responseMap = entityAsMap(response); + assertNotNull(responseMap); + + List> indexTemplates = (List>) responseMap.get("index_templates"); + assertThat(indexTemplates.size(), is(1)); + assertThat(ObjectPath.evaluate(indexTemplates.get(0), "name"), is(".slm-history-7")); + assertThat(ObjectPath.evaluate(indexTemplates.get(0), "index_template.index_patterns"), is(List.of(".slm-history-7*"))); + } catch (ResponseException e) { + // Not found is fine + assertThat( + "Unexpected failure getting templates: " + e.getResponse().getStatusLine(), + e.getResponse().getStatusLine().getStatusCode(), + is(404) + ); + } + }, 30, TimeUnit.SECONDS); + } + } +} diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java index 8b2fe0d1e2af1..dddba9b7b0fba 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java @@ -8,7 +8,6 @@ import org.apache.http.HttpHeaders; import org.apache.http.HttpHost; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; @@ -17,6 +16,7 @@ import org.elasticsearch.client.WarningsHandler; import org.elasticsearch.core.Strings; import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.test.rest.ObjectPath; import org.junit.After; import org.junit.Before; @@ -440,17 +440,22 @@ private List getAllTokenIds() throws IOException { }"""); final Response searchResponse = client().performRequest(searchRequest); assertOK(searchResponse); - final SearchHits searchHits = SearchResponse.fromXContent(responseAsParser(searchResponse)).getHits(); - assertThat( - "Search request used with size parameter that was too small to fetch all tokens.", - searchHits.getTotalHits().value, - lessThanOrEqualTo(searchSize) - ); - final List tokenIds = Arrays.stream(searchHits.getHits()).map(searchHit -> { - assertNotNull(searchHit.getId()); - return searchHit.getId(); - }).toList(); - assertThat(tokenIds, not(empty())); - return tokenIds; + var response = SearchResponseUtils.responseAsSearchResponse(searchResponse); + try { + final SearchHits searchHits = response.getHits(); + assertThat( + "Search request used with size parameter that was too small to fetch all tokens.", + searchHits.getTotalHits().value, + lessThanOrEqualTo(searchSize) + ); + final List tokenIds = Arrays.stream(searchHits.getHits()).map(searchHit -> { + assertNotNull(searchHit.getId()); + return searchHit.getId(); + }).toList(); + assertThat(tokenIds, not(empty())); + return tokenIds; + } finally { + response.decRef(); + } } } diff --git a/x-pack/qa/saml-idp-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticationIT.java b/x-pack/qa/saml-idp-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticationIT.java index c8b3b3fc3aed2..5718930f37c82 100644 --- a/x-pack/qa/saml-idp-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticationIT.java +++ b/x-pack/qa/saml-idp-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticationIT.java @@ -28,6 +28,7 @@ import org.apache.http.protocol.HttpContext; import org.apache.http.protocol.HttpCoreContext; import org.apache.http.util.EntityUtils; +import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; @@ -91,6 +92,7 @@ /** * An integration test for validating SAML authentication against a real Identity Provider (Shibboleth) */ +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103717") @ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) public class SamlAuthenticationIT extends ESRestTestCase { diff --git a/x-pack/qa/third-party/active-directory/build.gradle b/x-pack/qa/third-party/active-directory/build.gradle index f5c4e6d63d37c..5156d20dd1d12 100644 --- a/x-pack/qa/third-party/active-directory/build.gradle +++ b/x-pack/qa/third-party/active-directory/build.gradle @@ -1,12 +1,15 @@ apply plugin: 'elasticsearch.standalone-test' -apply plugin: 'elasticsearch.test.fixtures' +configurations.all { + exclude group: 'org.slf4j', module: 'slf4j-nop' +} dependencies { + testImplementation project(':test:framework') testImplementation project(xpackModule('core')) testImplementation project(xpackModule('security')) - testImplementation(testArtifact(project(xpackModule('security'))))} - -testFixtures.useFixture ":x-pack:test:smb-fixture" + testImplementation(testArtifact(project(xpackModule('security')))) + testImplementation project(":x-pack:test:smb-fixture") +} // add test resources from security, so tests can use example certs tasks.named("processTestResources").configure { @@ -23,6 +26,7 @@ tasks.named("forbiddenPatterns").configure { } tasks.named("test").configure { + systemProperty 'tests.security.manager', 'false' include '**/*IT.class' include '**/*Tests.class' } diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ADLdapUserSearchSessionFactoryTests.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ADLdapUserSearchSessionFactoryTests.java index 26e0121b92a7d..d2443720de5ce 100644 --- a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ADLdapUserSearchSessionFactoryTests.java +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ADLdapUserSearchSessionFactoryTests.java @@ -63,7 +63,7 @@ public void testUserSearchWithActiveDirectory() throws Exception { String groupSearchBase = "DC=ad,DC=test,DC=elasticsearch,DC=com"; String userSearchBase = "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com"; Settings settings = Settings.builder() - .put("url", ActiveDirectorySessionFactoryTests.AD_LDAP_URL) + .put("url", smbFixture.getAdLdapUrl()) .put("group_search.base_dn", groupSearchBase) .put("user_search.base_dn", userSearchBase) .put("bind_dn", "ironman@ad.test.elasticsearch.com") diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractActiveDirectoryTestCase.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractActiveDirectoryTestCase.java index 9ab6b5a309393..ff68d879d8a8f 100644 --- a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractActiveDirectoryTestCase.java +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractActiveDirectoryTestCase.java @@ -6,6 +6,7 @@ */ package org.elasticsearch.xpack.security.authc.ldap; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; import com.unboundid.ldap.sdk.LDAPConnection; import com.unboundid.ldap.sdk.LDAPConnectionPool; import com.unboundid.ldap.sdk.LDAPException; @@ -18,6 +19,8 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.fixtures.smb.SmbTestContainer; +import org.elasticsearch.test.fixtures.testcontainers.TestContainersThreadFilter; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.ldap.ActiveDirectorySessionFactorySettings; import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; @@ -25,6 +28,7 @@ import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; import org.elasticsearch.xpack.core.ssl.SSLService; import org.junit.Before; +import org.junit.ClassRule; import java.io.IOException; import java.nio.file.FileVisitResult; @@ -39,8 +43,11 @@ import static org.elasticsearch.xpack.core.security.authc.RealmSettings.getFullSettingKey; +@ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) public abstract class AbstractActiveDirectoryTestCase extends ESTestCase { + @ClassRule + public static final SmbTestContainer smbFixture = new SmbTestContainer(); // follow referrals defaults to false here which differs from the default value of the setting // this is needed to prevent test logs being filled by errors as the default configuration of // the tests run against a vagrant samba4 instance configured as a domain controller with the @@ -48,14 +55,7 @@ public abstract class AbstractActiveDirectoryTestCase extends ESTestCase { // as we cannot control the URL of the referral which may contain a non-resolvable DNS name as // this name would be served by the samba4 instance public static final Boolean FOLLOW_REFERRALS = Booleans.parseBoolean(getFromEnv("TESTS_AD_FOLLOW_REFERRALS", "false")); - public static final String AD_LDAP_URL = getFromEnv("TESTS_AD_LDAP_URL", "ldaps://localhost:" + getFromProperty("636")); - public static final String AD_LDAP_GC_URL = getFromEnv("TESTS_AD_LDAP_GC_URL", "ldaps://localhost:" + getFromProperty("3269")); - public static final String PASSWORD = getFromEnv("TESTS_AD_USER_PASSWORD", "Passw0rd"); - public static final String AD_LDAP_PORT = getFromEnv("TESTS_AD_LDAP_PORT", getFromProperty("389")); - - public static final String AD_LDAPS_PORT = getFromEnv("TESTS_AD_LDAPS_PORT", getFromProperty("636")); - public static final String AD_GC_LDAP_PORT = getFromEnv("TESTS_AD_GC_LDAP_PORT", getFromProperty("3268")); - public static final String AD_GC_LDAPS_PORT = getFromEnv("TESTS_AD_GC_LDAPS_PORT", getFromProperty("3269")); + public static final String PASSWORD = "Passw0rd"; public static final String AD_DOMAIN = "ad.test.elasticsearch.com"; protected SSLService sslService; @@ -108,10 +108,6 @@ Settings buildAdSettings( .put(getFullSettingKey(realmId, ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING), adDomainName) .put(getFullSettingKey(realmName, ActiveDirectorySessionFactorySettings.AD_USER_SEARCH_BASEDN_SETTING), userSearchDN) .put(getFullSettingKey(realmName, ActiveDirectorySessionFactorySettings.AD_USER_SEARCH_SCOPE_SETTING), scope) - .put(getFullSettingKey(realmName, ActiveDirectorySessionFactorySettings.AD_LDAP_PORT_SETTING), AD_LDAP_PORT) - .put(getFullSettingKey(realmName, ActiveDirectorySessionFactorySettings.AD_LDAPS_PORT_SETTING), AD_LDAPS_PORT) - .put(getFullSettingKey(realmName, ActiveDirectorySessionFactorySettings.AD_GC_LDAP_PORT_SETTING), AD_GC_LDAP_PORT) - .put(getFullSettingKey(realmName, ActiveDirectorySessionFactorySettings.AD_GC_LDAPS_PORT_SETTING), AD_GC_LDAPS_PORT) .put(getFullSettingKey(realmId, SessionFactorySettings.FOLLOW_REFERRALS_SETTING), FOLLOW_REFERRALS) .putList(getFullSettingKey(realmId, SSLConfigurationSettings.CAPATH_SETTING_REALM), certificatePaths); if (randomBoolean()) { @@ -153,11 +149,4 @@ private static String getFromEnv(String envVar, String defaultValue) { final String value = System.getenv(envVar); return value == null ? defaultValue : value; } - - private static String getFromProperty(String port) { - String key = "test.fixtures.smb-fixture.tcp." + port; - final String value = System.getProperty(key); - assertNotNull("Expected the actual value for port " + port + " to be in system property " + key, value); - return value; - } } diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractAdLdapRealmTestCase.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractAdLdapRealmTestCase.java index 1af08ffd5fafe..3d9e7f3828bc7 100644 --- a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractAdLdapRealmTestCase.java +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractAdLdapRealmTestCase.java @@ -6,6 +6,8 @@ */ package org.elasticsearch.xpack.security.authc.ldap; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + import org.apache.logging.log4j.LogManager; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchSecurityException; @@ -21,18 +23,20 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.test.fixtures.smb.SmbTestContainer; +import org.elasticsearch.test.fixtures.testcontainers.TestContainersThreadFilter; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingRequestBuilder; import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingResponse; import org.elasticsearch.xpack.core.security.action.user.AuthenticateAction; import org.elasticsearch.xpack.core.security.action.user.AuthenticateRequest; import org.elasticsearch.xpack.core.security.action.user.AuthenticateResponse; -import org.elasticsearch.xpack.core.security.authc.ldap.ActiveDirectorySessionFactorySettings; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; +import org.junit.ClassRule; import java.io.IOException; import java.nio.file.Path; @@ -47,14 +51,9 @@ import java.util.stream.Collectors; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.xpack.core.security.authc.RealmSettings.getFullSettingKey; import static org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope.ONE_LEVEL; import static org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope.SUB_TREE; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.BASIC_AUTH_HEADER; -import static org.elasticsearch.xpack.security.authc.ldap.AbstractActiveDirectoryTestCase.AD_GC_LDAPS_PORT; -import static org.elasticsearch.xpack.security.authc.ldap.AbstractActiveDirectoryTestCase.AD_GC_LDAP_PORT; -import static org.elasticsearch.xpack.security.authc.ldap.AbstractActiveDirectoryTestCase.AD_LDAPS_PORT; -import static org.elasticsearch.xpack.security.authc.ldap.AbstractActiveDirectoryTestCase.AD_LDAP_PORT; import static org.elasticsearch.xpack.security.test.SecurityTestUtils.writeFile; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -63,6 +62,7 @@ * This test assumes all subclass tests will be of type SUITE. It picks a random realm configuration for the tests, and * writes a group to role mapping file for each node. */ +@ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) public abstract class AbstractAdLdapRealmTestCase extends SecurityIntegTestCase { public static final String XPACK_SECURITY_AUTHC_REALMS_AD_EXTERNAL = "xpack.security.authc.realms.active_directory.external"; @@ -72,6 +72,9 @@ public abstract class AbstractAdLdapRealmTestCase extends SecurityIntegTestCase public static final String PHILANTHROPISTS_INDEX = "philanthropists"; public static final String SECURITY_INDEX = "security"; + @ClassRule + public static final SmbTestContainer smbFixture = new SmbTestContainer(); + private static final RoleMappingEntry[] AD_ROLE_MAPPING = new RoleMappingEntry[] { new RoleMappingEntry("SHIELD: [ \"CN=SHIELD,CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com\" ]", """ { @@ -359,12 +362,8 @@ enum RealmConfig { .put(XPACK_SECURITY_AUTHC_REALMS_AD_EXTERNAL + ".domain_name", ActiveDirectorySessionFactoryTests.AD_DOMAIN) .put(XPACK_SECURITY_AUTHC_REALMS_AD_EXTERNAL + ".group_search.base_dn", "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com") .put(XPACK_SECURITY_AUTHC_REALMS_AD_EXTERNAL + ".group_search.scope", randomBoolean() ? SUB_TREE : ONE_LEVEL) - .put(XPACK_SECURITY_AUTHC_REALMS_AD_EXTERNAL + ".url", ActiveDirectorySessionFactoryTests.AD_LDAP_URL) + .put(XPACK_SECURITY_AUTHC_REALMS_AD_EXTERNAL + ".url", smbFixture.getAdLdapUrl()) .put(XPACK_SECURITY_AUTHC_REALMS_AD_EXTERNAL + ".follow_referrals", ActiveDirectorySessionFactoryTests.FOLLOW_REFERRALS) - .put(getFullSettingKey("external", ActiveDirectorySessionFactorySettings.AD_LDAP_PORT_SETTING), AD_LDAP_PORT) - .put(getFullSettingKey("external", ActiveDirectorySessionFactorySettings.AD_LDAPS_PORT_SETTING), AD_LDAPS_PORT) - .put(getFullSettingKey("external", ActiveDirectorySessionFactorySettings.AD_GC_LDAP_PORT_SETTING), AD_GC_LDAP_PORT) - .put(getFullSettingKey("external", ActiveDirectorySessionFactorySettings.AD_GC_LDAPS_PORT_SETTING), AD_GC_LDAPS_PORT) .build(), "active_directory" ), @@ -373,7 +372,7 @@ enum RealmConfig { true, AD_ROLE_MAPPING, Settings.builder() - .put(XPACK_SECURITY_AUTHC_REALMS_LDAP_EXTERNAL + ".url", ActiveDirectorySessionFactoryTests.AD_LDAP_URL) + .put(XPACK_SECURITY_AUTHC_REALMS_LDAP_EXTERNAL + ".url", smbFixture.getAdLdapUrl()) .put(XPACK_SECURITY_AUTHC_REALMS_LDAP_EXTERNAL + ".group_search.base_dn", "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com") .put(XPACK_SECURITY_AUTHC_REALMS_LDAP_EXTERNAL + ".group_search.scope", randomBoolean() ? SUB_TREE : ONE_LEVEL) .putList( @@ -389,7 +388,7 @@ enum RealmConfig { true, AD_ROLE_MAPPING, Settings.builder() - .put(XPACK_SECURITY_AUTHC_REALMS_LDAP_EXTERNAL + ".url", ActiveDirectorySessionFactoryTests.AD_LDAP_URL) + .put(XPACK_SECURITY_AUTHC_REALMS_LDAP_EXTERNAL + ".url", smbFixture.getAdLdapUrl()) .putList( XPACK_SECURITY_AUTHC_REALMS_LDAP_EXTERNAL + ".user_dn_templates", "cn={0},CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com" diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryGroupsResolverTests.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryGroupsResolverTests.java index d8f82c6419501..231bf47e3e712 100644 --- a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryGroupsResolverTests.java +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryGroupsResolverTests.java @@ -6,15 +6,19 @@ */ package org.elasticsearch.xpack.security.authc.ldap; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; import com.unboundid.ldap.sdk.Filter; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.test.fixtures.smb.SmbTestContainer; +import org.elasticsearch.test.fixtures.testcontainers.TestContainersThreadFilter; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; import org.elasticsearch.xpack.core.security.support.NoOpLogger; import org.junit.Before; +import org.junit.ClassRule; import java.util.List; import java.util.regex.Pattern; @@ -24,12 +28,16 @@ import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.is; +@ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) public class ActiveDirectoryGroupsResolverTests extends GroupsResolverTestCase { private static final String BRUCE_BANNER_DN = "cn=Bruce Banner,CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com"; private static final RealmConfig.RealmIdentifier REALM_ID = new RealmConfig.RealmIdentifier("active_directory", "ad"); + @ClassRule + public static final SmbTestContainer smbFixture = new SmbTestContainer(); + @Before public void setReferralFollowing() { ldapConnection.getConnectionOptions().setFollowReferrals(AbstractActiveDirectoryTestCase.FOLLOW_REFERRALS); @@ -145,7 +153,7 @@ private void assertValidSidQuery(Filter query, String[] expectedSids) { @Override protected String ldapUrl() { - return ActiveDirectorySessionFactoryTests.AD_LDAP_URL; + return smbFixture.getAdLdapUrl(); } @Override diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactoryTests.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactoryTests.java index 120a27c944bd8..28637560d9d53 100644 --- a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactoryTests.java +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactoryTests.java @@ -75,7 +75,11 @@ public boolean enableWarningsCheck() { } public void testAdAuth() throws Exception { - RealmConfig config = configureRealm("ad-test", LdapRealmSettings.AD_TYPE, buildAdSettings(AD_LDAP_URL, AD_DOMAIN, false)); + RealmConfig config = configureRealm( + "ad-test", + LdapRealmSettings.AD_TYPE, + buildAdSettings(smbFixture.getAdLdapUrl(), AD_DOMAIN, false) + ); try (ActiveDirectorySessionFactory sessionFactory = getActiveDirectorySessionFactory(config, sslService, threadPool)) { String userName = "ironman"; @@ -115,7 +119,7 @@ private RealmConfig configureRealm(String name, String type, Settings settings) } public void testNetbiosAuth() throws Exception { - final String adUrl = randomFrom(AD_LDAP_URL, AD_LDAP_GC_URL); + final String adUrl = randomFrom(smbFixture.getAdLdapUrl(), smbFixture.getAdLdapGcUrl()); RealmConfig config = configureRealm("ad-test", LdapRealmSettings.AD_TYPE, buildAdSettings(adUrl, AD_DOMAIN, false)); try (ActiveDirectorySessionFactory sessionFactory = getActiveDirectorySessionFactory(config, sslService, threadPool)) { @@ -142,7 +146,11 @@ public void testNetbiosAuth() throws Exception { } public void testAdAuthAvengers() throws Exception { - RealmConfig config = configureRealm("ad-test", LdapRealmSettings.AD_TYPE, buildAdSettings(AD_LDAP_URL, AD_DOMAIN, false)); + RealmConfig config = configureRealm( + "ad-test", + LdapRealmSettings.AD_TYPE, + buildAdSettings(smbFixture.getAdLdapUrl(), AD_DOMAIN, false) + ); try (ActiveDirectorySessionFactory sessionFactory = getActiveDirectorySessionFactory(config, sslService, threadPool)) { String[] users = new String[] { "cap", "hawkeye", "hulk", "ironman", "thor", "blackwidow" }; @@ -158,7 +166,7 @@ public void testAdAuthAvengers() throws Exception { public void testAuthenticate() throws Exception { Settings settings = buildAdSettings( REALM_ID, - AD_LDAP_URL, + smbFixture.getAdLdapUrl(), AD_DOMAIN, "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", LdapSearchScope.ONE_LEVEL, @@ -191,7 +199,7 @@ public void testAuthenticate() throws Exception { public void testAuthenticateBaseUserSearch() throws Exception { Settings settings = buildAdSettings( REALM_ID, - AD_LDAP_URL, + smbFixture.getAdLdapUrl(), AD_DOMAIN, "CN=Bruce Banner, CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", LdapSearchScope.BASE, @@ -226,7 +234,7 @@ public void testAuthenticateBaseGroupSearch() throws Exception { .put( buildAdSettings( REALM_ID, - AD_LDAP_URL, + smbFixture.getAdLdapUrl(), AD_DOMAIN, "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", LdapSearchScope.ONE_LEVEL, @@ -255,7 +263,7 @@ public void testAuthenticateBaseGroupSearch() throws Exception { public void testAuthenticateWithUserPrincipalName() throws Exception { Settings settings = buildAdSettings( REALM_ID, - AD_LDAP_URL, + smbFixture.getAdLdapUrl(), AD_DOMAIN, "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", LdapSearchScope.ONE_LEVEL, @@ -281,7 +289,7 @@ public void testAuthenticateWithUserPrincipalName() throws Exception { public void testAuthenticateWithSAMAccountName() throws Exception { Settings settings = buildAdSettings( REALM_ID, - AD_LDAP_URL, + smbFixture.getAdLdapUrl(), AD_DOMAIN, "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", LdapSearchScope.ONE_LEVEL, @@ -310,7 +318,7 @@ public void testCustomUserFilter() throws Exception { .put( buildAdSettings( REALM_ID, - AD_LDAP_URL, + smbFixture.getAdLdapUrl(), AD_DOMAIN, "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", LdapSearchScope.SUB_TREE, @@ -349,7 +357,7 @@ public void testStandardLdapConnection() throws Exception { .put( LdapTestCase.buildLdapSettings( realmId, - new String[] { AD_LDAP_URL }, + new String[] { smbFixture.getAdLdapUrl() }, new String[] { userTemplate }, groupSearchBase, LdapSearchScope.SUB_TREE, @@ -389,7 +397,7 @@ public void testHandlingLdapReferralErrors() throws Exception { .put( LdapTestCase.buildLdapSettings( realmId, - new String[] { AD_LDAP_URL }, + new String[] { smbFixture.getAdLdapUrl() }, new String[] { userTemplate }, groupSearchBase, LdapSearchScope.SUB_TREE, @@ -423,7 +431,7 @@ public void testStandardLdapWithAttributeGroups() throws Exception { .put( LdapTestCase.buildLdapSettings( realmId, - new String[] { AD_LDAP_URL }, + new String[] { smbFixture.getAdLdapUrl() }, new String[] { userTemplate }, groupSearchBase, LdapSearchScope.SUB_TREE, @@ -456,7 +464,11 @@ public void testStandardLdapWithAttributeGroups() throws Exception { } public void testADLookup() throws Exception { - RealmConfig config = configureRealm("ad-test", LdapRealmSettings.AD_TYPE, buildAdSettings(AD_LDAP_URL, AD_DOMAIN, false, true)); + RealmConfig config = configureRealm( + "ad-test", + LdapRealmSettings.AD_TYPE, + buildAdSettings(smbFixture.getAdLdapUrl(), AD_DOMAIN, false, true) + ); try (ActiveDirectorySessionFactory sessionFactory = getActiveDirectorySessionFactory(config, sslService, threadPool)) { List users = randomSubsetOf( @@ -499,7 +511,7 @@ public void testResolveTokenGroupsSID() throws Exception { .put( buildAdSettings( REALM_ID, - AD_LDAP_URL, + smbFixture.getAdLdapUrl(), AD_DOMAIN, "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", LdapSearchScope.SUB_TREE, @@ -536,10 +548,6 @@ private Settings buildAdSettings(String ldapUrl, String adDomainName, boolean ho Settings.Builder builder = Settings.builder() .put(getFullSettingKey(REALM_ID, SessionFactorySettings.URLS_SETTING), ldapUrl) .put(getFullSettingKey(REALM_ID, ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING), adDomainName) - .put(getFullSettingKey(REALM_NAME, ActiveDirectorySessionFactorySettings.AD_LDAP_PORT_SETTING), AD_LDAP_PORT) - .put(getFullSettingKey(REALM_NAME, ActiveDirectorySessionFactorySettings.AD_LDAPS_PORT_SETTING), AD_LDAPS_PORT) - .put(getFullSettingKey(REALM_NAME, ActiveDirectorySessionFactorySettings.AD_GC_LDAP_PORT_SETTING), AD_GC_LDAP_PORT) - .put(getFullSettingKey(REALM_NAME, ActiveDirectorySessionFactorySettings.AD_GC_LDAPS_PORT_SETTING), AD_GC_LDAPS_PORT) .put(getFullSettingKey(REALM_ID, SessionFactorySettings.FOLLOW_REFERRALS_SETTING), FOLLOW_REFERRALS); if (randomBoolean()) { builder.put( diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/UserAttributeGroupsResolverTests.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/UserAttributeGroupsResolverTests.java index 5a8350739ef6b..256d710b3dfe2 100644 --- a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/UserAttributeGroupsResolverTests.java +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/UserAttributeGroupsResolverTests.java @@ -6,16 +6,20 @@ */ package org.elasticsearch.xpack.security.authc.ldap; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; import com.unboundid.ldap.sdk.Attribute; import com.unboundid.ldap.sdk.SearchRequest; import com.unboundid.ldap.sdk.SearchScope; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.test.fixtures.smb.SmbTestContainer; +import org.elasticsearch.test.fixtures.testcontainers.TestContainersThreadFilter; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.ldap.UserAttributeGroupsResolverSettings; import org.elasticsearch.xpack.core.security.support.NoOpLogger; import org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils; +import org.junit.ClassRule; import java.util.Collection; import java.util.List; @@ -26,11 +30,15 @@ import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.hasSize; +@ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) public class UserAttributeGroupsResolverTests extends GroupsResolverTestCase { public static final String BRUCE_BANNER_DN = "cn=Bruce Banner,CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com"; private static final RealmConfig.RealmIdentifier REALM_ID = new RealmConfig.RealmIdentifier("ldap", "realm1"); + @ClassRule + public static final SmbTestContainer smbFixture = new SmbTestContainer(); + public void testResolve() throws Exception { // falling back on the 'memberOf' attribute UserAttributeGroupsResolver resolver = new UserAttributeGroupsResolver(config(REALM_ID, Settings.EMPTY)); @@ -112,7 +120,7 @@ public void testResolveInvalidGroupAttribute() throws Exception { @Override protected String ldapUrl() { - return ActiveDirectorySessionFactoryTests.AD_LDAP_URL; + return smbFixture.getAdLdapUrl(); } @Override diff --git a/x-pack/test/idp-fixture/build.gradle b/x-pack/test/idp-fixture/build.gradle index 407fb520fcae1..691483bcfe5c3 100644 --- a/x-pack/test/idp-fixture/build.gradle +++ b/x-pack/test/idp-fixture/build.gradle @@ -1,27 +1,9 @@ apply plugin: 'elasticsearch.java' apply plugin: 'elasticsearch.cache-test-fixtures' -configurations.all { - transitive = false -} - dependencies { testImplementation project(':test:framework') api project(':test:fixtures:testcontainer-utils') api "junit:junit:${versions.junit}" - api "org.testcontainers:testcontainers:${versions.testcontainer}" - implementation "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" - implementation "org.slf4j:slf4j-api:${versions.slf4j}" - implementation "com.github.docker-java:docker-java-api:${versions.dockerJava}" - - runtimeOnly "com.github.docker-java:docker-java-transport-zerodep:${versions.dockerJava}" - runtimeOnly "com.github.docker-java:docker-java-transport:${versions.dockerJava}" - runtimeOnly "com.github.docker-java:docker-java-core:${versions.dockerJava}" - runtimeOnly "org.apache.commons:commons-compress:${versions.commonsCompress}" - runtimeOnly "org.rnorth.duct-tape:duct-tape:${versions.ductTape}" - - // ensure we have proper logging during when used in tests - runtimeOnly "org.slf4j:slf4j-simple:${versions.slf4j}" - runtimeOnly "org.hamcrest:hamcrest:${versions.hamcrest}" } diff --git a/x-pack/test/idp-fixture/src/main/java/org/elasticsearch/test/fixtures/idp/HttpProxyTestContainer.java b/x-pack/test/idp-fixture/src/main/java/org/elasticsearch/test/fixtures/idp/HttpProxyTestContainer.java index e517c2a9fe2c3..4f7d3528f85d4 100644 --- a/x-pack/test/idp-fixture/src/main/java/org/elasticsearch/test/fixtures/idp/HttpProxyTestContainer.java +++ b/x-pack/test/idp-fixture/src/main/java/org/elasticsearch/test/fixtures/idp/HttpProxyTestContainer.java @@ -31,7 +31,6 @@ public HttpProxyTestContainer(Network network) { ); addExposedPort(PORT); withNetwork(network); - } public Integer getProxyPort() { diff --git a/x-pack/test/idp-fixture/src/main/java/org/elasticsearch/test/fixtures/idp/IdpTestContainer.java b/x-pack/test/idp-fixture/src/main/java/org/elasticsearch/test/fixtures/idp/IdpTestContainer.java index ed19dc997fd8e..692cd4b081411 100644 --- a/x-pack/test/idp-fixture/src/main/java/org/elasticsearch/test/fixtures/idp/IdpTestContainer.java +++ b/x-pack/test/idp-fixture/src/main/java/org/elasticsearch/test/fixtures/idp/IdpTestContainer.java @@ -10,7 +10,9 @@ import org.elasticsearch.test.fixtures.testcontainers.DockerEnvironmentAwareTestContainer; import org.junit.rules.TemporaryFolder; import org.testcontainers.containers.Network; +import org.testcontainers.containers.wait.strategy.Wait; import org.testcontainers.images.builder.ImageFromDockerfile; +import org.testcontainers.images.builder.dockerfile.statement.SingleArgumentStatement; import java.io.IOException; import java.nio.file.Path; @@ -125,19 +127,24 @@ public IdpTestContainer(Network network) { .run("chmod +x /opt/jetty-home/bin/jetty.sh") // Opening 4443 (browser TLS), 8443 (mutual auth TLS) .cmd("run-jetty.sh") + .withStatement( + new SingleArgumentStatement( + "HEALTHCHECK", + "CMD curl -f -s --http0.9 http://localhost:4443 " + "--connect-timeout 10 --max-time 10 --output - > /dev/null" + ) + ) // .expose(4443) .build() - ) .withFileFromClasspath("idp/jetty-custom/ssl.mod", "/idp/jetty-custom/ssl.mod") .withFileFromClasspath("idp/jetty-custom/keystore", "/idp/jetty-custom/keystore") .withFileFromClasspath("idp/shib-jetty-base/", "/idp/shib-jetty-base/") .withFileFromClasspath("idp/shibboleth-idp/", "/idp/shibboleth-idp/") .withFileFromClasspath("idp/bin/", "/idp/bin/") - ); withNetworkAliases("idp"); withNetwork(network); + waitingFor(Wait.forHealthcheck()); addExposedPorts(4443, 8443); } diff --git a/x-pack/test/idp-fixture/src/main/resources/idp/bin/run-jetty.sh b/x-pack/test/idp-fixture/src/main/resources/idp/bin/run-jetty.sh index 24ece94c2715d..0160cc613407d 100644 --- a/x-pack/test/idp-fixture/src/main/resources/idp/bin/run-jetty.sh +++ b/x-pack/test/idp-fixture/src/main/resources/idp/bin/run-jetty.sh @@ -20,7 +20,7 @@ exit_code=$? end_time=$(date +%s) duration=$((end_time - start_time)) -if [ $duration -lt 5 ]; then +if [ $duration -lt 10 ]; then /opt/jetty-home/bin/jetty.sh run exit_code=$? fi diff --git a/x-pack/test/smb-fixture/Dockerfile b/x-pack/test/smb-fixture/Dockerfile deleted file mode 100644 index bcd74758ff496..0000000000000 --- a/x-pack/test/smb-fixture/Dockerfile +++ /dev/null @@ -1,12 +0,0 @@ -FROM ubuntu:16.04 -RUN apt-get update -qqy && apt-get install -qqy samba ldap-utils -ADD . /fixture -RUN chmod +x /fixture/src/main/resources/provision/installsmb.sh -RUN /fixture/src/main/resources/provision/installsmb.sh - -EXPOSE 389 -EXPOSE 636 -EXPOSE 3268 -EXPOSE 3269 - -CMD service samba-ad-dc restart && sleep infinity diff --git a/x-pack/test/smb-fixture/build.gradle b/x-pack/test/smb-fixture/build.gradle index 8740d94f26357..aeb5626ce9508 100644 --- a/x-pack/test/smb-fixture/build.gradle +++ b/x-pack/test/smb-fixture/build.gradle @@ -1 +1,13 @@ -apply plugin: 'elasticsearch.test.fixtures' +apply plugin: 'elasticsearch.java' +apply plugin: 'elasticsearch.cache-test-fixtures' + +dependencies { + api project(':test:fixtures:testcontainer-utils') + api "junit:junit:${versions.junit}" + api "org.testcontainers:testcontainers:${versions.testcontainer}" + api "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" + + // ensure we have proper logging during when used in tests + runtimeOnly "org.slf4j:slf4j-simple:${versions.slf4j}" + runtimeOnly "org.hamcrest:hamcrest:${versions.hamcrest}" +} diff --git a/x-pack/test/smb-fixture/docker-compose.yml b/x-pack/test/smb-fixture/docker-compose.yml deleted file mode 100644 index 51a76fd42b435..0000000000000 --- a/x-pack/test/smb-fixture/docker-compose.yml +++ /dev/null @@ -1,11 +0,0 @@ -version: '3' -services: - smb-fixture: - build: - context: . - dockerfile: Dockerfile - ports: - - "389" - - "636" - - "3268" - - "3269" diff --git a/x-pack/test/smb-fixture/src/main/java/org/elasticsearch/test/fixtures/smb/SmbTestContainer.java b/x-pack/test/smb-fixture/src/main/java/org/elasticsearch/test/fixtures/smb/SmbTestContainer.java new file mode 100644 index 0000000000000..10f589e4e1df3 --- /dev/null +++ b/x-pack/test/smb-fixture/src/main/java/org/elasticsearch/test/fixtures/smb/SmbTestContainer.java @@ -0,0 +1,53 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.test.fixtures.smb; + +import org.elasticsearch.test.fixtures.testcontainers.DockerEnvironmentAwareTestContainer; +import org.testcontainers.images.builder.ImageFromDockerfile; + +public final class SmbTestContainer extends DockerEnvironmentAwareTestContainer { + + private static final String DOCKER_BASE_IMAGE = "ubuntu:16.04"; + public static final int AD_LDAP_PORT = 636; + public static final int AD_LDAP_GC_PORT = 3269; + + public SmbTestContainer() { + super( + new ImageFromDockerfile("es-smb-fixture").withDockerfileFromBuilder( + builder -> builder.from(DOCKER_BASE_IMAGE) + .run("apt-get update -qqy && apt-get install -qqy samba ldap-utils") + .copy("fixture/provision/installsmb.sh", "/fixture/provision/installsmb.sh") + .copy("fixture/certs/ca.key", "/fixture/certs/ca.key") + .copy("fixture/certs/ca.pem", "/fixture/certs/ca.pem") + .copy("fixture/certs/cert.pem", "/fixture/certs/cert.pem") + .copy("fixture/certs/key.pem", "/fixture/certs/key.pem") + .run("chmod +x /fixture/provision/installsmb.sh") + .run("/fixture/provision/installsmb.sh") + .cmd("service samba-ad-dc restart && sleep infinity") + .build() + ) + .withFileFromClasspath("fixture/provision/installsmb.sh", "/smb/provision/installsmb.sh") + .withFileFromClasspath("fixture/certs/ca.key", "/smb/certs/ca.key") + .withFileFromClasspath("fixture/certs/ca.pem", "/smb/certs/ca.pem") + .withFileFromClasspath("fixture/certs/cert.pem", "/smb/certs/cert.pem") + .withFileFromClasspath("fixture/certs/key.pem", "/smb/certs/key.pem") + ); + // addExposedPort(389); + // addExposedPort(3268); + addExposedPort(AD_LDAP_PORT); + addExposedPort(AD_LDAP_GC_PORT); + } + + public String getAdLdapUrl() { + return "ldaps://localhost:" + getMappedPort(AD_LDAP_PORT); + } + + public String getAdLdapGcUrl() { + return "ldaps://localhost:" + getMappedPort(AD_LDAP_GC_PORT); + } +} diff --git a/x-pack/test/smb-fixture/src/main/resources/certs/README.asciidoc b/x-pack/test/smb-fixture/src/main/resources/smb/certs/README.asciidoc similarity index 100% rename from x-pack/test/smb-fixture/src/main/resources/certs/README.asciidoc rename to x-pack/test/smb-fixture/src/main/resources/smb/certs/README.asciidoc diff --git a/x-pack/test/smb-fixture/src/main/resources/certs/ca.key b/x-pack/test/smb-fixture/src/main/resources/smb/certs/ca.key similarity index 100% rename from x-pack/test/smb-fixture/src/main/resources/certs/ca.key rename to x-pack/test/smb-fixture/src/main/resources/smb/certs/ca.key diff --git a/x-pack/test/smb-fixture/src/main/resources/certs/ca.pem b/x-pack/test/smb-fixture/src/main/resources/smb/certs/ca.pem similarity index 100% rename from x-pack/test/smb-fixture/src/main/resources/certs/ca.pem rename to x-pack/test/smb-fixture/src/main/resources/smb/certs/ca.pem diff --git a/x-pack/test/smb-fixture/src/main/resources/certs/cert.pem b/x-pack/test/smb-fixture/src/main/resources/smb/certs/cert.pem similarity index 100% rename from x-pack/test/smb-fixture/src/main/resources/certs/cert.pem rename to x-pack/test/smb-fixture/src/main/resources/smb/certs/cert.pem diff --git a/x-pack/test/smb-fixture/src/main/resources/certs/key.pem b/x-pack/test/smb-fixture/src/main/resources/smb/certs/key.pem similarity index 100% rename from x-pack/test/smb-fixture/src/main/resources/certs/key.pem rename to x-pack/test/smb-fixture/src/main/resources/smb/certs/key.pem diff --git a/x-pack/test/smb-fixture/src/main/resources/provision/installsmb.sh b/x-pack/test/smb-fixture/src/main/resources/smb/provision/installsmb.sh similarity index 97% rename from x-pack/test/smb-fixture/src/main/resources/provision/installsmb.sh rename to x-pack/test/smb-fixture/src/main/resources/smb/provision/installsmb.sh index 0bc86e96530bc..463238b9f50c2 100644 --- a/x-pack/test/smb-fixture/src/main/resources/provision/installsmb.sh +++ b/x-pack/test/smb-fixture/src/main/resources/smb/provision/installsmb.sh @@ -8,8 +8,7 @@ set -ex VDIR=/fixture -RESOURCES=$VDIR/src/main/resources -CERTS_DIR=$RESOURCES/certs +CERTS_DIR=$VDIR/certs SSL_DIR=/var/lib/samba/private/tls # install ssl certs