diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+bwc-snapshots.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+bwc-snapshots.yml index a50c1d5e44eb1..2194bd986a891 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+bwc-snapshots.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+bwc-snapshots.yml @@ -25,8 +25,6 @@ black-list-labels: - '>test-mute' - 'test-full-bwc' - black-list-target-branches: - - feature/desired-balance-allocator axes: - axis: type: slave diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+docs-check.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+docs-check.yml index 04b91e91817b3..26d17e60959d6 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+docs-check.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+docs-check.yml @@ -22,8 +22,6 @@ - ^docs/.* black-list-labels: - '>test-mute' - black-list-target-branches: - - feature/desired-balance-allocator builders: - inject: properties-file: '.ci/java-versions.properties' diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+eql-correctness.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+eql-correctness.yml index 525878c292627..be749c200557b 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+eql-correctness.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+eql-correctness.yml @@ -20,7 +20,6 @@ cancel-builds-on-update: true black-list-target-branches: - 6.8 - - feature/desired-balance-allocator excluded-regions: - ^docs/.* black-list-labels: diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+example-plugins.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+example-plugins.yml index f3c35868a7d11..f9be84bd5f6c7 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+example-plugins.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+example-plugins.yml @@ -23,8 +23,6 @@ - build-tools/.* - build-tools-internal/.* - plugins/examples/.* - black-list-target-branches: - - feature/desired-balance-allocator builders: - inject: properties-file: '.ci/java-versions.properties' diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix-sample.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix-sample.yml index 10e9f3a36394c..1942bc53ded11 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix-sample.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix-sample.yml @@ -21,7 +21,6 @@ cancel-builds-on-update: true black-list-target-branches: - 6.8 - - feature/desired-balance-allocator excluded-regions: - ^docs/.* black-list-labels: diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample.yml index 4dbd7ae32abdc..b98716656ce8e 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample.yml @@ -24,7 +24,6 @@ black-list-target-branches: - 7.17 - 7.16 - - feature/desired-balance-allocator excluded-regions: - ^docs/.* black-list-labels: diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-upgrade-tests.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-upgrade-tests.yml index eac8be547ddd7..97f7b1faee25f 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-upgrade-tests.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-upgrade-tests.yml @@ -22,7 +22,6 @@ cancel-builds-on-update: true black-list-target-branches: - 6.8 - - feature/desired-balance-allocator excluded-regions: - ^docs/.* white-list-labels: diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3.yml new file mode 100644 index 0000000000000..e0a3e9cb8fd71 --- /dev/null +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3.yml @@ -0,0 +1,33 @@ +--- +- job: + name: "elastic+elasticsearch+pull-request+part-3" + display-name: "elastic / elasticsearch - pull request part-3" + description: "Testing of Elasticsearch pull requests - part 3" + workspace: "/dev/shm/elastic+elasticsearch+pull-request+part-3" + scm: + - git: + refspec: "+refs/pull/${ghprbPullId}/*:refs/remotes/origin/pr/${ghprbPullId}/*" + branches: + - "${ghprbActualCommit}" + triggers: + - github-pull-request: + org-list: + - elastic + allow-whitelist-orgs-as-admins: true + trigger-phrase: '.*run\W+elasticsearch-ci/part-3.*' + github-hooks: true + status-context: elasticsearch-ci/part-3 + cancel-builds-on-update: true + white-list-labels: + - 'test-part-3' + builders: + - inject: + properties-file: '.ci/java-versions.properties' + properties-content: | + JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA + RUNTIME_JAVA_HOME=$HOME/.java/$ES_RUNTIME_JAVA + JAVA8_HOME=$HOME/.java/java8 + JAVA11_HOME=$HOME/.java/java11 + - shell: | + #!/usr/local/bin/runbld --redirect-stderr + $WORKSPACE/.ci/scripts/run-gradle.sh -Dignore.tests.seed checkPart3 diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+precommit.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+precommit.yml index c0b900372522c..aadb8464cff55 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+precommit.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+precommit.yml @@ -20,8 +20,6 @@ cancel-builds-on-update: true white-list-labels: - '>test-mute' - black-list-target-branches: - - feature/desired-balance-allocator builders: - inject: properties-file: '.ci/java-versions.properties' diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+rest-compatibility.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+rest-compatibility.yml index f0868df1dc57a..f99a3c1bdd32c 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+rest-compatibility.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+rest-compatibility.yml @@ -23,7 +23,6 @@ - 7.16 - 7.15 - 6.8 - - feature/desired-balance-allocator excluded-regions: - ^docs/.* black-list-labels: diff --git a/.ci/templates.t/pull-request-gradle-unix.yml b/.ci/templates.t/pull-request-gradle-unix.yml index 4c3eed1eef526..3257bd9ed2951 100644 --- a/.ci/templates.t/pull-request-gradle-unix.yml +++ b/.ci/templates.t/pull-request-gradle-unix.yml @@ -22,8 +22,6 @@ - ^docs/.* black-list-labels: - '>test-mute' - black-list-target-branches: - - feature/desired-balance-allocator builders: - inject: properties-file: '.ci/java-versions.properties' diff --git a/TESTING.asciidoc b/TESTING.asciidoc index 64a669aa5d563..a9ecf0cf47a8d 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -108,6 +108,40 @@ password: `elastic-password`. - In order to set an Elasticsearch setting, provide a setting with the following prefix: `-Dtests.es.` - In order to pass a JVM setting, e.g. to disable assertions: `-Dtests.jvm.argline="-da"` +==== Customizing the test cluster for ./gradlew run + +You may need to customize the cluster configuration for the ./gradlew run task. +You can simply find the task in the source code and configure it there. +(The task is currently defined in build-tools-internal/src/main/groovy/elasticsearch.run.gradle) +However, this requires modifying a source controlled file and subject to accidental commits. +Alternatively, you can use a Gradle init script to inject custom build logic with the -I flag to configure this task locally. + +For example: + +To enable HTTPS for use with ./gradlew run, an extraConfigFile is needed to be added to the cluster configuration. +Create a file (for example ~/custom-run.gradle) with the following contents: +------------------------------------- +rootProject { + if(project.name == 'elasticsearch') { + afterEvaluate { + testClusters.matching { it.name == "runTask"}.configureEach { + extraConfigFile 'http.p12', file("/http.p12") + } + } + } +} +------------------------------------- +Now tell Gradle to use this init script: +------------------------------------- +./gradlew run -I ~/custom-run.gradle \ +-Dtests.es.xpack.security.http.ssl.enabled=true \ +-Dtests.es.xpack.security.http.ssl.keystore.path=http.p12 +------------------------------------- + +Now the http.p12 file will be placed in the config directory of the running cluster and available for use. +Assuming you have the http.ssl.keystore setup correctly, you can now use HTTPS with ./gradlew run without the risk +of accidentally committing your local configurations. + === Test case filtering. You can run a single test, provided that you specify the Gradle project. See the documentation on diff --git a/build-tools-internal/src/main/resources/changelog-schema.json b/build-tools-internal/src/main/resources/changelog-schema.json index 6c6e01089573f..8dd8b9493e0ca 100644 --- a/build-tools-internal/src/main/resources/changelog-schema.json +++ b/build-tools-internal/src/main/resources/changelog-schema.json @@ -83,6 +83,7 @@ "TLS", "Transform", "TSDB", + "Vector Search", "Watcher" ] }, diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/AbstractDistributionDownloadPluginTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/AbstractDistributionDownloadPluginTests.java index cad9b19895c5d..d93d78cd24533 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/AbstractDistributionDownloadPluginTests.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/AbstractDistributionDownloadPluginTests.java @@ -10,7 +10,6 @@ import org.elasticsearch.gradle.internal.BwcVersions; import org.elasticsearch.gradle.internal.BwcVersions.VersionPair; -import org.elasticsearch.gradle.internal.test.GradleUnitTestCase; import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Project; import org.gradle.testfixtures.ProjectBuilder; @@ -18,7 +17,7 @@ import java.io.File; import java.util.Arrays; -public class AbstractDistributionDownloadPluginTests extends GradleUnitTestCase { +public class AbstractDistributionDownloadPluginTests { protected static Project rootProject; protected static Project archivesProject; protected static Project packagesProject; diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/ConcatFilesTaskTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/ConcatFilesTaskTests.java index 835b96ddaf02e..1e912f23a9923 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/ConcatFilesTaskTests.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/ConcatFilesTaskTests.java @@ -7,9 +7,9 @@ */ package org.elasticsearch.gradle.internal; -import org.elasticsearch.gradle.internal.test.GradleUnitTestCase; import org.gradle.api.Project; import org.gradle.testfixtures.ProjectBuilder; +import org.junit.Test; import java.io.File; import java.io.IOException; @@ -17,8 +17,11 @@ import java.nio.file.Files; import java.util.Arrays; -public class ConcatFilesTaskTests extends GradleUnitTestCase { +import static org.junit.Assert.assertEquals; +public class ConcatFilesTaskTests { + + @Test public void testHeaderAdded() throws IOException { Project project = createProject(); @@ -39,6 +42,7 @@ public void testHeaderAdded() throws IOException { file.delete(); } + @Test public void testConcatenationWithUnique() throws IOException { Project project = createProject(); diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/EmptyDirTaskTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/EmptyDirTaskTests.java index b8f3f6d3dd53b..fadbc8ab75dd8 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/EmptyDirTaskTests.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/EmptyDirTaskTests.java @@ -7,18 +7,22 @@ */ package org.elasticsearch.gradle.internal; -import com.carrotsearch.randomizedtesting.RandomizedTest; - import org.apache.tools.ant.taskdefs.condition.Os; -import org.elasticsearch.gradle.internal.test.GradleUnitTestCase; import org.gradle.api.Project; import org.gradle.testfixtures.ProjectBuilder; +import org.junit.Test; import java.io.File; import java.io.IOException; -public class EmptyDirTaskTests extends GradleUnitTestCase { +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeFalse; + +public class EmptyDirTaskTests { + @Test public void testCreateEmptyDir() throws Exception { Project project = ProjectBuilder.builder().build(); EmptyDirTask emptyDirTask = project.getTasks().create("emptyDirTask", EmptyDirTask.class); @@ -40,8 +44,9 @@ public void testCreateEmptyDir() throws Exception { newEmptyFolder.delete(); } + @Test public void testCreateEmptyDirNoPermissions() throws Exception { - RandomizedTest.assumeFalse("Functionality is Unix specific", Os.isFamily(Os.FAMILY_WINDOWS)); + assumeFalse("Functionality is Unix specific", Os.isFamily(Os.FAMILY_WINDOWS)); Project project = ProjectBuilder.builder().build(); EmptyDirTask emptyDirTask = project.getTasks().create("emptyDirTask", EmptyDirTask.class); diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPluginTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPluginTests.java index ef19c385f25db..e51890802ffae 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPluginTests.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPluginTests.java @@ -14,11 +14,13 @@ import org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes; import org.gradle.api.Project; import org.gradle.testfixtures.ProjectBuilder; +import org.junit.Test; import java.io.File; public class InternalDistributionDownloadPluginTests extends AbstractDistributionDownloadPluginTests { + @Test public void testLocalCurrentVersionPackages() { ElasticsearchDistributionType[] types = { InternalElasticsearchDistributionTypes.RPM, InternalElasticsearchDistributionTypes.DEB }; for (ElasticsearchDistributionType packageType : types) { @@ -33,6 +35,7 @@ public void testLocalCurrentVersionPackages() { } } + @Test public void testLocalBwcPackages() { ElasticsearchDistributionType[] types = { InternalElasticsearchDistributionTypes.RPM, InternalElasticsearchDistributionTypes.DEB }; for (ElasticsearchDistributionType packageType : types) { diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/JdkDownloadPluginTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/JdkDownloadPluginTests.java index dff5c3b28d2ca..19ff1d72705f2 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/JdkDownloadPluginTests.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/JdkDownloadPluginTests.java @@ -8,15 +8,17 @@ package org.elasticsearch.gradle.internal; -import org.elasticsearch.gradle.internal.test.GradleUnitTestCase; import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Project; import org.gradle.testfixtures.ProjectBuilder; import org.junit.BeforeClass; +import org.junit.Test; import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertThrows; -public class JdkDownloadPluginTests extends GradleUnitTestCase { +public class JdkDownloadPluginTests { private static Project rootProject; @BeforeClass @@ -24,10 +26,12 @@ public static void setupRoot() { rootProject = ProjectBuilder.builder().build(); } + @Test public void testMissingVendor() { assertJdkError(createProject(), "testjdk", null, "11.0.2+33", "linux", "x64", "vendor not specified for jdk [testjdk]"); } + @Test public void testUnknownVendor() { assertJdkError( createProject(), @@ -40,10 +44,12 @@ public void testUnknownVendor() { ); } + @Test public void testMissingVersion() { assertJdkError(createProject(), "testjdk", "openjdk", null, "linux", "x64", "version not specified for jdk [testjdk]"); } + @Test public void testBadVersionFormat() { assertJdkError( createProject(), @@ -56,10 +62,12 @@ public void testBadVersionFormat() { ); } + @Test public void testMissingPlatform() { assertJdkError(createProject(), "testjdk", "openjdk", "11.0.2+33", null, "x64", "platform not specified for jdk [testjdk]"); } + @Test public void testUnknownPlatform() { assertJdkError( createProject(), @@ -72,10 +80,12 @@ public void testUnknownPlatform() { ); } + @Test public void testMissingArchitecture() { assertJdkError(createProject(), "testjdk", "openjdk", "11.0.2+33", "linux", null, "architecture not specified for jdk [testjdk]"); } + @Test public void testUnknownArchitecture() { assertJdkError( createProject(), @@ -97,7 +107,7 @@ private void assertJdkError( final String architecture, final String message ) { - IllegalArgumentException e = expectThrows( + IllegalArgumentException e = assertThrows( IllegalArgumentException.class, () -> createJdk(project, name, vendor, version, platform, architecture) ); diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/checkstyle/SnipptLengthCheckTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/checkstyle/SnipptLengthCheckTests.java index 3258cd67ba1f6..efdf7563d74d2 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/checkstyle/SnipptLengthCheckTests.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/checkstyle/SnipptLengthCheckTests.java @@ -8,7 +8,7 @@ package org.elasticsearch.gradle.internal.checkstyle; -import org.elasticsearch.gradle.internal.test.GradleUnitTestCase; +import org.junit.Test; import java.util.ArrayList; import java.util.Arrays; @@ -17,40 +17,51 @@ import static java.util.Collections.singletonList; import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.fail; -public class SnipptLengthCheckTests extends GradleUnitTestCase { +public class SnipptLengthCheckTests { + + @Test public void testNoSnippets() { SnippetLengthCheck.checkFile(failOnError(), 10, "There a no snippets"); } + @Test public void testEmptySnippet() { SnippetLengthCheck.checkFile(failOnError(), 10, "// tag::foo", "// end::foo"); } + @Test public void testSnippetWithSmallText() { SnippetLengthCheck.checkFile(failOnError(), 10, "// tag::foo", "some words", "// end::foo"); } + @Test public void testSnippetWithLeadingSpaces() { SnippetLengthCheck.checkFile(failOnError(), 10, " // tag::foo", " some words", " // end::foo"); } + @Test public void testSnippetWithEmptyLine() { SnippetLengthCheck.checkFile(failOnError(), 10, " // tag::foo", "", " some words", " // end::foo"); } + @Test public void testSnippetBrokenLeadingSpaces() { List collection = new ArrayList<>(); SnippetLengthCheck.checkFile(collect(collection), 10, " // tag::foo", "some words", " // end::foo"); assertThat(collection, equalTo(singletonList("2: snippet line should start with [ ]"))); } + @Test public void testSnippetTooLong() { List collection = new ArrayList<>(); SnippetLengthCheck.checkFile(collect(collection), 10, " // tag::foo", " too long words", " // end::foo"); assertThat(collection, equalTo(singletonList("2: snippet line should be no more than [10] characters but was [14]"))); } + @Test public void testLotsOfErrors() { List collection = new ArrayList<>(); SnippetLengthCheck.checkFile(collect(collection), 10, " // tag::foo", "asdfadf", " too long words", "asdfadf", " // end::foo"); diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/doc/RestTestFromSnippetsTaskTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/doc/RestTestFromSnippetsTaskTests.java index a4b332ff9133e..534134e78d40b 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/doc/RestTestFromSnippetsTaskTests.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/doc/RestTestFromSnippetsTaskTests.java @@ -7,17 +7,21 @@ */ package org.elasticsearch.gradle.internal.doc; -import org.elasticsearch.gradle.internal.test.GradleUnitTestCase; import org.gradle.api.InvalidUserDataException; import org.junit.Rule; +import org.junit.Test; import org.junit.rules.ExpectedException; import static org.elasticsearch.gradle.internal.doc.RestTestsFromSnippetsTask.replaceBlockQuote; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; -public class RestTestFromSnippetsTaskTests extends GradleUnitTestCase { +public class RestTestFromSnippetsTaskTests { @Rule public ExpectedException expectedEx = ExpectedException.none(); + @Test public void testInvalidBlockQuote() { String input = "\"foo\": \"\"\"bar\""; expectedEx.expect(InvalidUserDataException.class); @@ -25,10 +29,12 @@ public void testInvalidBlockQuote() { replaceBlockQuote(input); } + @Test public void testSimpleBlockQuote() { assertEquals("\"foo\": \"bort baz\"", replaceBlockQuote("\"foo\": \"\"\"bort baz\"\"\"")); } + @Test public void testMultipleBlockQuotes() { assertEquals( "\"foo\": \"bort baz\", \"bar\": \"other\"", @@ -36,11 +42,13 @@ public void testMultipleBlockQuotes() { ); } + @Test public void testEscapingInBlockQuote() { assertEquals("\"foo\": \"bort\\\" baz\"", replaceBlockQuote("\"foo\": \"\"\"bort\" baz\"\"\"")); assertEquals("\"foo\": \"bort\\n baz\"", replaceBlockQuote("\"foo\": \"\"\"bort\n baz\"\"\"")); } + @Test public void testIsDocWriteRequest() { assertTrue((boolean) RestTestsFromSnippetsTask.shouldAddShardFailureCheck("doc-index/_search")); assertFalse((boolean) RestTestsFromSnippetsTask.shouldAddShardFailureCheck("_cat")); diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/doc/SnippetsTaskTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/doc/SnippetsTaskTests.java index 50517653384ff..846835e918e0c 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/doc/SnippetsTaskTests.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/doc/SnippetsTaskTests.java @@ -7,10 +7,15 @@ */ package org.elasticsearch.gradle.internal.doc; -import org.elasticsearch.gradle.internal.test.GradleUnitTestCase; +import org.junit.Test; -public class SnippetsTaskTests extends GradleUnitTestCase { +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +public class SnippetsTaskTests { + + @Test public void testMatchSource() { SnippetsTask.Source source = SnippetsTask.matchSource("[source,console]"); assertTrue(source.getMatches()); diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/docker/DockerSupportServiceTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/docker/DockerSupportServiceTests.java index ff42761361802..ec0c155abbdcc 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/docker/DockerSupportServiceTests.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/docker/DockerSupportServiceTests.java @@ -7,7 +7,7 @@ */ package org.elasticsearch.gradle.internal.docker; -import org.elasticsearch.gradle.internal.test.GradleUnitTestCase; +import org.junit.Test; import java.util.HashMap; import java.util.List; @@ -16,9 +16,11 @@ import static org.elasticsearch.gradle.internal.docker.DockerSupportService.deriveId; import static org.elasticsearch.gradle.internal.docker.DockerSupportService.parseOsRelease; import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.MatcherAssert.assertThat; -public class DockerSupportServiceTests extends GradleUnitTestCase { +public class DockerSupportServiceTests { + @Test public void testParseOsReleaseOnOracle() { final List lines = List.of( "NAME=\"Oracle Linux Server\"", @@ -60,6 +62,7 @@ public void testParseOsReleaseOnOracle() { /** * Trailing whitespace should be removed */ + @Test public void testRemoveTrailingWhitespace() { final List lines = List.of("NAME=\"Oracle Linux Server\" "); @@ -73,6 +76,7 @@ public void testRemoveTrailingWhitespace() { /** * Comments should be removed */ + @Test public void testRemoveComments() { final List lines = List.of("# A comment", "NAME=\"Oracle Linux Server\""); @@ -83,6 +87,7 @@ public void testRemoveComments() { assertThat(expected, equalTo(results)); } + @Test public void testDeriveIdOnOracle() { final Map osRelease = new HashMap<>(); osRelease.put("ID", "ol"); diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/docker/TransformLog4jConfigFilterTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/docker/TransformLog4jConfigFilterTests.java index cc673cd758ec6..827f8fd6288ba 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/docker/TransformLog4jConfigFilterTests.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/docker/TransformLog4jConfigFilterTests.java @@ -8,17 +8,19 @@ package org.elasticsearch.gradle.internal.docker; -import org.elasticsearch.gradle.internal.test.GradleUnitTestCase; +import org.junit.Test; import java.util.List; +import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; -public class TransformLog4jConfigFilterTests extends GradleUnitTestCase { +public class TransformLog4jConfigFilterTests { /** * Check that the transformer doesn't explode when given an empty file. */ + @Test public void testTransformEmptyConfig() { runTest(List.of(), List.of()); } @@ -26,6 +28,7 @@ public void testTransformEmptyConfig() { /** * Check that the transformer leaves non-appender lines alone. */ + @Test public void testTransformEchoesNonAppenderLines() { List input = List.of( "status = error", @@ -42,6 +45,7 @@ public void testTransformEchoesNonAppenderLines() { /** * Check that the root logger appenders are filtered to just the "rolling" appender */ + @Test public void testTransformFiltersRootLogger() { List input = List.of( "rootLogger.appenderRef.console.ref = console", @@ -56,6 +60,7 @@ public void testTransformFiltersRootLogger() { /** * Check that any explicit 'console' or 'rolling_old' appenders are removed. */ + @Test public void testTransformRemoveExplicitConsoleAndRollingOldAppenders() { List input = List.of( "appender.console.type = Console", @@ -74,6 +79,7 @@ public void testTransformRemoveExplicitConsoleAndRollingOldAppenders() { /** * Check that rolling file appenders are converted to console appenders. */ + @Test public void testTransformConvertsRollingToConsole() { List input = List.of("appender.rolling.type = RollingFile", "appender.rolling.name = rolling"); @@ -85,6 +91,7 @@ public void testTransformConvertsRollingToConsole() { /** * Check that rolling file appenders have redundant properties removed. */ + @Test public void testTransformRemovedRedundantProperties() { List input = List.of( "appender.rolling.fileName = ${sys:es.logs.base_path}/${sys:es.logs.cluster_name}_server.json", @@ -106,6 +113,7 @@ public void testTransformRemovedRedundantProperties() { /** * Check that rolling file appenders have redundant properties removed. */ + @Test public void testTransformSkipsPropertiesWithLineBreaks() { List input = List.of( "appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}\\", @@ -121,6 +129,7 @@ public void testTransformSkipsPropertiesWithLineBreaks() { /** * Check that as well as skipping old appenders, logger references to them are also skipped. */ + @Test public void testTransformSkipsOldAppenderRefs() { List input = List.of( "logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling_old.ref = index_indexing_slowlog_rolling_old" @@ -132,6 +141,7 @@ public void testTransformSkipsOldAppenderRefs() { /** * Check that multiple blank lines are reduced to a single line. */ + @Test public void testMultipleBlanksReducedToOne() { List input = List.of("status = error", "", "", "rootLogger.level = info"); diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/precommit/DependencyLicensesTaskTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/precommit/DependencyLicensesTaskTests.java index 38328459866c4..e6b1f5c90b72e 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/precommit/DependencyLicensesTaskTests.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/precommit/DependencyLicensesTaskTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.gradle.internal.precommit; import org.apache.groovy.util.Maps; -import org.elasticsearch.gradle.internal.test.GradleUnitTestCase; import org.gradle.api.Action; import org.gradle.api.GradleException; import org.gradle.api.Project; @@ -34,7 +33,7 @@ import static org.hamcrest.CoreMatchers.containsString; -public class DependencyLicensesTaskTests extends GradleUnitTestCase { +public class DependencyLicensesTaskTests { private static final String PERMISSIVE_LICENSE_TEXT = "Eclipse Public License - v 2.0"; private static final String STRICT_LICENSE_TEXT = "GNU LESSER GENERAL PUBLIC LICENSE Version 3"; diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/precommit/FilePermissionsTaskTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/precommit/FilePermissionsTaskTests.java index 1ad3685a8df52..2e70e76c7b2ca 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/precommit/FilePermissionsTaskTests.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/precommit/FilePermissionsTaskTests.java @@ -7,16 +7,14 @@ */ package org.elasticsearch.gradle.internal.precommit; -import com.carrotsearch.randomizedtesting.RandomizedTest; - import org.apache.tools.ant.taskdefs.condition.Os; -import org.elasticsearch.gradle.internal.test.GradleUnitTestCase; import org.elasticsearch.gradle.util.GradleUtils; import org.gradle.api.GradleException; import org.gradle.api.Project; import org.gradle.api.plugins.JavaPlugin; import org.gradle.testfixtures.ProjectBuilder; import org.junit.Assert; +import org.junit.Test; import java.io.File; import java.nio.charset.Charset; @@ -24,10 +22,15 @@ import java.util.List; import java.util.stream.Collectors; -public class FilePermissionsTaskTests extends GradleUnitTestCase { +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeFalse; + +public class FilePermissionsTaskTests { + @Test public void testCheckPermissionsWhenAnExecutableFileExists() throws Exception { - RandomizedTest.assumeFalse("Functionality is Unix specific", Os.isFamily(Os.FAMILY_WINDOWS)); + assumeFalse("Functionality is Unix specific", Os.isFamily(Os.FAMILY_WINDOWS)); Project project = createProject(); @@ -46,8 +49,9 @@ public void testCheckPermissionsWhenAnExecutableFileExists() throws Exception { file.delete(); } + @Test public void testCheckPermissionsWhenNoFileExists() throws Exception { - RandomizedTest.assumeFalse("Functionality is Unix specific", Os.isFamily(Os.FAMILY_WINDOWS)); + assumeFalse("Functionality is Unix specific", Os.isFamily(Os.FAMILY_WINDOWS)); Project project = createProject(); @@ -60,8 +64,9 @@ public void testCheckPermissionsWhenNoFileExists() throws Exception { assertEquals("done", result.get(0)); } + @Test public void testCheckPermissionsWhenNoExecutableFileExists() throws Exception { - RandomizedTest.assumeFalse("Functionality is Unix specific", Os.isFamily(Os.FAMILY_WINDOWS)); + assumeFalse("Functionality is Unix specific", Os.isFamily(Os.FAMILY_WINDOWS)); Project project = createProject(); diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/precommit/ForbiddenPatternsTaskTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/precommit/ForbiddenPatternsTaskTests.java index 0fc234536a4db..919f3c78ffb9a 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/precommit/ForbiddenPatternsTaskTests.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/precommit/ForbiddenPatternsTaskTests.java @@ -7,13 +7,13 @@ */ package org.elasticsearch.gradle.internal.precommit; -import org.elasticsearch.gradle.internal.test.GradleUnitTestCase; import org.elasticsearch.gradle.util.GradleUtils; import org.gradle.api.GradleException; import org.gradle.api.Project; import org.gradle.api.file.FileTree; import org.gradle.api.plugins.JavaPlugin; import org.gradle.testfixtures.ProjectBuilder; +import org.junit.Test; import java.io.File; import java.io.IOException; @@ -26,8 +26,13 @@ import java.util.concurrent.Callable; import java.util.stream.Collectors; -public class ForbiddenPatternsTaskTests extends GradleUnitTestCase { +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +public class ForbiddenPatternsTaskTests { + + @Test public void testCheckInvalidPatternsWhenNoSourceFilesExist() throws Exception { Project project = createProject(); ForbiddenPatternsTask task = createTask(project); @@ -35,6 +40,7 @@ public void testCheckInvalidPatternsWhenNoSourceFilesExist() throws Exception { checkAndAssertTaskSuccessful(task); } + @Test public void testCheckInvalidPatternsWhenSourceFilesExistNoViolation() throws Exception { Project project = createProject(); ForbiddenPatternsTask task = createTask(project); @@ -43,6 +49,7 @@ public void testCheckInvalidPatternsWhenSourceFilesExistNoViolation() throws Exc checkAndAssertTaskSuccessful(task); } + @Test public void testCheckInvalidPatternsWhenSourceFilesExistHavingTab() throws Exception { Project project = createProject(); ForbiddenPatternsTask task = createTask(project); @@ -51,6 +58,7 @@ public void testCheckInvalidPatternsWhenSourceFilesExistHavingTab() throws Excep checkAndAssertTaskThrowsException(task); } + @Test public void testCheckInvalidPatternsWithCustomRule() throws Exception { Map rule = new HashMap<>(); rule.put("name", "TODO comments are not allowed"); @@ -64,6 +72,7 @@ public void testCheckInvalidPatternsWithCustomRule() throws Exception { checkAndAssertTaskThrowsException(task); } + @Test public void testCheckInvalidPatternsWhenExcludingFiles() throws Exception { Project project = createProject(); ForbiddenPatternsTask task = createTask(project); diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/precommit/UpdateShasTaskTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/precommit/UpdateShasTaskTests.java index db44c4122ee9b..174c5d0312486 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/precommit/UpdateShasTaskTests.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/precommit/UpdateShasTaskTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.gradle.internal.precommit; import org.apache.commons.io.FileUtils; -import org.elasticsearch.gradle.internal.test.GradleUnitTestCase; import org.gradle.api.GradleException; import org.gradle.api.Project; import org.gradle.api.artifacts.Dependency; @@ -30,8 +29,11 @@ import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; -public class UpdateShasTaskTests extends GradleUnitTestCase { +public class UpdateShasTaskTests { public static final String GROOVY_JAR_REGEX = "groovy-\\d\\.\\d+\\.\\d+\\.jar"; @Rule @@ -53,7 +55,6 @@ public void prepare() throws IOException { @Test public void whenDependencyDoesntExistThenShouldDeleteDependencySha() throws IOException, NoSuchAlgorithmException { - File unusedSha = createFileIn(getLicensesDir(project), "test.sha1", ""); task.updateShas(); @@ -84,10 +85,8 @@ public void whenDependencyAndWrongShaExistsThenShouldNotOverwriteShaFile() throw .findFirst() .get(); String groovyShaName = groovyJar.getName() + ".sha1"; - File groovySha = createFileIn(getLicensesDir(project), groovyShaName, "content"); task.updateShas(); - assertThat(FileUtils.readFileToString(groovySha), equalTo("content")); } diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/GenerateReleaseNotesTaskTest.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/GenerateReleaseNotesTaskTest.java index b020b1ee93126..f96ff6179101a 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/GenerateReleaseNotesTaskTest.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/GenerateReleaseNotesTaskTest.java @@ -8,7 +8,6 @@ package org.elasticsearch.gradle.internal.release; -import org.elasticsearch.gradle.internal.test.GradleUnitTestCase; import org.junit.Before; import org.junit.Test; @@ -17,6 +16,7 @@ import java.util.Set; import java.util.stream.Stream; +import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.aMapWithSize; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsInAnyOrder; @@ -32,7 +32,7 @@ import static org.mockito.Mockito.verifyNoMoreInteractions; import static org.mockito.Mockito.when; -public class GenerateReleaseNotesTaskTest extends GradleUnitTestCase { +public class GenerateReleaseNotesTaskTest { private GitWrapper gitWrapper; @Before diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/PruneChangelogsTaskTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/PruneChangelogsTaskTests.java index e49f7dbf6ab56..90f4d6b1d353b 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/PruneChangelogsTaskTests.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/PruneChangelogsTaskTests.java @@ -10,7 +10,6 @@ import org.elasticsearch.gradle.OS; import org.elasticsearch.gradle.internal.release.PruneChangelogsTask.DeleteHelper; -import org.elasticsearch.gradle.internal.test.GradleUnitTestCase; import org.gradle.api.GradleException; import org.junit.Before; import org.junit.Test; @@ -25,8 +24,10 @@ import static org.elasticsearch.gradle.OS.WINDOWS; import static org.elasticsearch.gradle.internal.release.PruneChangelogsTask.findAndDeleteFiles; import static org.elasticsearch.gradle.internal.release.PruneChangelogsTask.findPreviousVersion; +import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; +import static org.junit.Assert.assertThrows; import static org.junit.Assume.assumeFalse; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyString; @@ -36,7 +37,7 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -public class PruneChangelogsTaskTests extends GradleUnitTestCase { +public class PruneChangelogsTaskTests { private GitWrapper gitWrapper; private DeleteHelper deleteHelper; @@ -152,7 +153,7 @@ public void findAndDeleteFiles_withFilesToDeleteButDeleteFails_throwsException() when(deleteHelper.deleteFiles(any())).thenReturn(Set.of(new File("rootDir/docs/changelog/1234.yml"))); // when: - GradleException e = expectThrows( + GradleException e = assertThrows( GradleException.class, () -> findAndDeleteFiles( gitWrapper, diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/TransformTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/TransformTests.java index 4de55b9ac1a7c..b608abb7c8a9c 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/TransformTests.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/TransformTests.java @@ -18,7 +18,6 @@ import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; import com.fasterxml.jackson.dataformat.yaml.YAMLParser; -import org.elasticsearch.gradle.internal.test.GradleUnitTestCase; import org.elasticsearch.gradle.internal.test.rest.transform.headers.InjectHeaders; import org.hamcrest.CoreMatchers; import org.hamcrest.Matchers; @@ -38,7 +37,13 @@ import java.util.concurrent.atomic.LongAdder; import java.util.stream.Collectors; -public abstract class TransformTests extends GradleUnitTestCase { +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +public abstract class TransformTests { private static final YAMLFactory YAML_FACTORY = new YAMLFactory(); private static final ObjectMapper MAPPER = new ObjectMapper(YAML_FACTORY); diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/match/AddMatchTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/match/AddMatchTests.java index ce70b03f28dbe..126a6034009d6 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/match/AddMatchTests.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/match/AddMatchTests.java @@ -22,6 +22,12 @@ import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + public class AddMatchTests extends TransformTests { private static final YAMLFactory YAML_FACTORY = new YAMLFactory(); @@ -34,7 +40,7 @@ public void testAddAllNotSupported() throws Exception { JsonNode addNode = MAPPER.convertValue("_doc", JsonNode.class); assertEquals( "adding matches is only supported for named tests", - expectThrows( + assertThrows( NullPointerException.class, () -> transformTests(tests, Collections.singletonList(new AddMatch("_type", addNode, null))) ).getMessage() @@ -89,7 +95,6 @@ private void validateTest(List tests, boolean beforeTransformation) if (lastTestHasAddedObject.get() == false && matchObject.get("my_number") != null) { lastTestHasAddedObject.set(true); } - } }); assertTrue(lastTestHasMatchObject.get()); diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/match/RemoveMatchTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/match/RemoveMatchTests.java index 15497053a9b1e..8ae0cb4a83831 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/match/RemoveMatchTests.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/match/RemoveMatchTests.java @@ -23,6 +23,10 @@ import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + public class RemoveMatchTests extends TransformTests { private static final YAMLFactory YAML_FACTORY = new YAMLFactory(); diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/InjectWarningsRegexTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/InjectWarningsRegexTests.java index 70629620ac10e..0132fa5c0c52d 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/InjectWarningsRegexTests.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/InjectWarningsRegexTests.java @@ -19,6 +19,9 @@ import java.util.List; import java.util.Set; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; + public class InjectWarningsRegexTests extends InjectFeatureTests { private static final String WARNINGS_REGEX = "warnings_regex"; @@ -34,7 +37,7 @@ public void testInjectWarningsRequiresTestName() throws Exception { validateSetupDoesNotExist(tests); assertEquals( "inject warnings is only supported for named tests", - expectThrows( + assertThrows( NullPointerException.class, () -> transformTests(tests, Collections.singletonList(new InjectWarnings(new ArrayList<>(addWarnings), null))) ).getMessage() diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/InjectWarningsTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/InjectWarningsTests.java index b29e26974fe98..ef3ab0e84678d 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/InjectWarningsTests.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/warnings/InjectWarningsTests.java @@ -19,6 +19,9 @@ import java.util.List; import java.util.Set; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; + public class InjectWarningsTests extends InjectFeatureTests { Set addWarnings = Set.of("added warning"); private static final String WARNINGS = "warnings"; @@ -33,7 +36,7 @@ public void testInjectWarningsRequiresTestName() throws Exception { validateSetupDoesNotExist(tests); assertEquals( "inject warnings is only supported for named tests", - expectThrows( + assertThrows( NullPointerException.class, () -> transformTests(tests, Collections.singletonList(new InjectWarnings(new ArrayList<>(addWarnings), null))) ).getMessage() diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties index 2804eb74bb71c..3e7e1981d104a 100644 --- a/build-tools-internal/version.properties +++ b/build-tools-internal/version.properties @@ -2,7 +2,7 @@ elasticsearch = 8.4.0 lucene = 9.3.0-snapshot-2d05f5c623e bundled_jdk_vendor = openjdk -bundled_jdk = 18.0.1.1+2@65ae32619e2f40f3a9af3af1851d6e19 +bundled_jdk = 18.0.2+9@f6ad4b4450fd4d298113270ec84f30ee # optional dependencies spatial4j = 0.7 diff --git a/build-tools/build.gradle b/build-tools/build.gradle index d2f23056ced11..093a12bf97fde 100644 --- a/build-tools/build.gradle +++ b/build-tools/build.gradle @@ -123,7 +123,6 @@ dependencies { testFixturesApi gradleApi() testFixturesApi gradleTestKit() testFixturesApi buildLibs.junit - testFixturesApi buildLibs.randomized.runner testFixturesApi buildLibs.wiremock testFixturesApi platform(buildLibs.spock.platform) testFixturesApi(buildLibs.spock.core) { diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/StandaloneRestIntegTestTask.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/StandaloneRestIntegTestTask.java index 184043ba00b17..11ad0a29f5b8d 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/StandaloneRestIntegTestTask.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/StandaloneRestIntegTestTask.java @@ -90,7 +90,9 @@ public List getSharedResources() { int nodeCount = clusters.stream().mapToInt(cluster -> cluster.getNodes().size()).sum(); if (nodeCount > 0) { - locks.add(resource.getResourceLock()); + for (int i = 0; i < Math.min(nodeCount, resource.getMaxUsages()); i++) { + locks.add(resource.getResourceLock()); + } } return Collections.unmodifiableList(locks); } diff --git a/build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/BaseTestCase.java b/build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/BaseTestCase.java deleted file mode 100644 index 8aaa74b828b2a..0000000000000 --- a/build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/BaseTestCase.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.gradle.internal.test; - -import junit.framework.AssertionFailedError; - -import com.carrotsearch.randomizedtesting.JUnit4MethodProvider; -import com.carrotsearch.randomizedtesting.RandomizedRunner; -import com.carrotsearch.randomizedtesting.annotations.TestMethodProviders; -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering; - -import org.junit.Assert; -import org.junit.runner.RunWith; - -@RunWith(RandomizedRunner.class) -@TestMethodProviders({ JUnit4MethodProvider.class, JUnit3MethodProvider.class }) -@ThreadLeakLingering(linger = 5000) // wait for "Connection worker" to die -public abstract class BaseTestCase extends Assert { - - // add expectThrows from junit 5 - @FunctionalInterface - public interface ThrowingRunnable { - void run() throws Throwable; - } - - public static T expectThrows(Class expectedType, ThrowingRunnable runnable) { - try { - runnable.run(); - } catch (Throwable e) { - if (expectedType.isInstance(e)) { - return expectedType.cast(e); - } - AssertionFailedError assertion = new AssertionFailedError( - "Unexpected exception type, expected " + expectedType.getSimpleName() + " but got " + e - ); - assertion.initCause(e); - throw assertion; - } - throw new AssertionFailedError("Expected exception " + expectedType.getSimpleName() + " but no exception was thrown"); - } -} diff --git a/build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/GradleIntegrationTestCase.java b/build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/GradleIntegrationTestCase.java deleted file mode 100644 index 5f83b678fdd80..0000000000000 --- a/build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/GradleIntegrationTestCase.java +++ /dev/null @@ -1,225 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.gradle.internal.test; - -import org.apache.commons.io.FileUtils; -import org.gradle.testkit.runner.BuildResult; -import org.gradle.testkit.runner.BuildTask; -import org.gradle.testkit.runner.GradleRunner; -import org.gradle.testkit.runner.TaskOutcome; -import org.junit.Rule; -import org.junit.rules.TemporaryFolder; - -import java.io.File; -import java.io.IOException; -import java.io.UncheckedIOException; -import java.lang.management.ManagementFactory; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.List; -import java.util.Objects; -import java.util.Set; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -import static org.hamcrest.CoreMatchers.containsString; - -public abstract class GradleIntegrationTestCase extends GradleUnitTestCase { - - @Rule - public TemporaryFolder testkitTmpDir = new TemporaryFolder(); - - public File workingProjectDir = null; - - public abstract String projectName(); - - protected File getProjectDir() { - if (workingProjectDir == null) { - File root = new File("src/testKit/"); - if (root.exists() == false) { - throw new RuntimeException( - "Could not find resources dir for integration tests. " - + "Note that these tests can only be ran by Gradle and are not currently supported by the IDE" - ); - } - try { - workingProjectDir = new File(testkitTmpDir.getRoot(), projectName()); - File sourcFolder = new File(root, projectName()); - FileUtils.copyDirectory(sourcFolder, workingProjectDir); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - - } - return workingProjectDir; - } - - protected GradleRunner getGradleRunner() { - File testkit; - try { - testkit = testkitTmpDir.newFolder(); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - return new InternalAwareGradleRunner( - GradleRunner.create() - .withProjectDir(getProjectDir()) - .withPluginClasspath() - .withTestKitDir(testkit) - .forwardOutput() - .withDebug(ManagementFactory.getRuntimeMXBean().getInputArguments().toString().indexOf("-agentlib:jdwp") > 0) - ); - } - - protected File getBuildDir(String name) { - return new File(getProjectDir(), "build"); - } - - protected void assertOutputContains(String output, String... lines) { - for (String line : lines) { - assertOutputContains(output, line); - } - List index = Stream.of(lines).map(line -> output.indexOf(line)).collect(Collectors.toList()); - if (index.equals(index.stream().sorted().collect(Collectors.toList())) == false) { - fail( - "Expected the following lines to appear in this order:\n" - + Stream.of(lines).map(line -> " - `" + line + "`").collect(Collectors.joining("\n")) - + "\nTBut the order was different. Output is:\n\n```" - + output - + "\n```\n" - ); - } - } - - protected void assertOutputContains(String output, Set lines) { - for (String line : lines) { - assertOutputContains(output, line); - } - } - - protected void assertOutputContains(String output, String line) { - assertThat("Expected the following line in output:\n\n" + line + "\n\nOutput is:\n" + output, output, containsString(line)); - } - - protected void assertOutputMissing(String output, String line) { - assertFalse("Expected the following line not to be in output:\n\n" + line + "\n\nOutput is:\n" + output, output.contains(line)); - } - - protected void assertOutputMissing(String output, String... lines) { - for (String line : lines) { - assertOutputMissing(line); - } - } - - protected void assertTaskFailed(BuildResult result, String taskName) { - assertTaskOutcome(result, taskName, TaskOutcome.FAILED); - } - - protected void assertTaskSuccessful(BuildResult result, String... taskNames) { - for (String taskName : taskNames) { - assertTaskOutcome(result, taskName, TaskOutcome.SUCCESS); - } - } - - protected void assertTaskSkipped(BuildResult result, String... taskNames) { - for (String taskName : taskNames) { - assertTaskOutcome(result, taskName, TaskOutcome.SKIPPED); - } - } - - protected void assertTaskNoSource(BuildResult result, String... taskNames) { - for (String taskName : taskNames) { - assertTaskOutcome(result, taskName, TaskOutcome.NO_SOURCE); - } - } - - private void assertTaskOutcome(BuildResult result, String taskName, TaskOutcome taskOutcome) { - BuildTask task = result.task(taskName); - if (task == null) { - fail( - "Expected task `" + taskName + "` to be " + taskOutcome + ", but it did not run" + "\n\nOutput is:\n" + result.getOutput() - ); - } - assertEquals( - "Expected task `" - + taskName - + "` to be " - + taskOutcome - + " but it was: " - + task.getOutcome() - + "\n\nOutput is:\n" - + result.getOutput(), - taskOutcome, - task.getOutcome() - ); - } - - protected void assertTaskUpToDate(BuildResult result, String... taskNames) { - for (String taskName : taskNames) { - BuildTask task = result.task(taskName); - if (task == null) { - fail("Expected task `" + taskName + "` to be up-to-date, but it did not run"); - } - assertEquals( - "Expected task to be up to date but it was: " + task.getOutcome() + "\n\nOutput is:\n" + result.getOutput(), - TaskOutcome.UP_TO_DATE, - task.getOutcome() - ); - } - } - - protected void assertNoDeprecationWarning(BuildResult result) { - assertOutputMissing(result.getOutput(), "Deprecated Gradle features were used in this build"); - } - - protected void assertBuildFileExists(BuildResult result, String projectName, String path) { - Path absPath = getBuildDir(projectName).toPath().resolve(path); - assertTrue( - result.getOutput() + "\n\nExpected `" + absPath + "` to exists but it did not" + "\n\nOutput is:\n" + result.getOutput(), - Files.exists(absPath) - ); - } - - protected void assertBuildFileDoesNotExists(BuildResult result, String projectName, String path) { - Path absPath = getBuildDir(projectName).toPath().resolve(path); - assertFalse( - result.getOutput() + "\n\nExpected `" + absPath + "` bo to exists but it did" + "\n\nOutput is:\n" + result.getOutput(), - Files.exists(absPath) - ); - } - - protected String getLocalTestDownloadsPath() { - return getLocalTestPath("test.local-test-downloads-path"); - } - - private String getLocalTestPath(String propertyName) { - String property = System.getProperty(propertyName); - Objects.requireNonNull(property, propertyName + " not passed to tests"); - File file = new File(property); - assertTrue("Expected " + property + " to exist, but it did not!", file.exists()); - if (File.separator.equals("\\")) { - // Use / on Windows too, the build script is not happy with \ - return file.getAbsolutePath().replace(File.separator, "/"); - } else { - return file.getAbsolutePath(); - } - } - - public void assertOutputOnlyOnce(String output, String... text) { - for (String each : text) { - int i = output.indexOf(each); - if (i == -1) { - fail("Expected \n```" + each + "```\nto appear at most once, but it didn't at all.\n\nOutout is:\n" + output); - } - if (output.indexOf(each) != output.lastIndexOf(each)) { - fail("Expected `" + each + "` to appear at most once, but it did multiple times.\n\nOutout is:\n" + output); - } - } - } - -} diff --git a/build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/GradleThreadsFilter.java b/build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/GradleThreadsFilter.java deleted file mode 100644 index 24a12b87377a0..0000000000000 --- a/build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/GradleThreadsFilter.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.gradle.internal.test; - -import com.carrotsearch.randomizedtesting.ThreadFilter; - -/** - * Filter out threads controlled by gradle that may be created during unit tests. - * - * Currently this includes pooled threads for Exec as well as file system event watcher threads. - */ -public class GradleThreadsFilter implements ThreadFilter { - - @Override - public boolean reject(Thread t) { - return t.getName().startsWith("Exec process") - || t.getName().startsWith("File watcher consumer") - || t.getName().startsWith("Memory manager"); - - } -} diff --git a/build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/GradleUnitTestCase.java b/build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/GradleUnitTestCase.java deleted file mode 100644 index c2025a13f46d2..0000000000000 --- a/build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/GradleUnitTestCase.java +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.gradle.internal.test; - -import com.carrotsearch.randomizedtesting.JUnit4MethodProvider; -import com.carrotsearch.randomizedtesting.RandomizedRunner; -import com.carrotsearch.randomizedtesting.annotations.TestMethodProviders; -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; - -import org.junit.runner.RunWith; - -@RunWith(RandomizedRunner.class) -@TestMethodProviders({ JUnit4MethodProvider.class, JUnit3MethodProvider.class }) -@ThreadLeakFilters(defaultFilters = true, filters = { GradleThreadsFilter.class }) -public abstract class GradleUnitTestCase extends BaseTestCase {} diff --git a/build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/JUnit3MethodProvider.java b/build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/JUnit3MethodProvider.java deleted file mode 100644 index b4c9123384dab..0000000000000 --- a/build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/JUnit3MethodProvider.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.gradle.internal.test; - -import com.carrotsearch.randomizedtesting.ClassModel; -import com.carrotsearch.randomizedtesting.ClassModel.MethodModel; -import com.carrotsearch.randomizedtesting.TestMethodProvider; - -import java.lang.reflect.Method; -import java.lang.reflect.Modifier; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Map; - -/** - * Backwards compatible test* method provider (public, non-static). - * - * copy of org.apache.lucene.util.LuceneJUnit3MethodProvider to avoid a dependency between build and test fw. - */ -public final class JUnit3MethodProvider implements TestMethodProvider { - @Override - public Collection getTestMethods(Class suiteClass, ClassModel classModel) { - Map methods = classModel.getMethods(); - ArrayList result = new ArrayList<>(); - for (MethodModel mm : methods.values()) { - // Skip any methods that have overrieds/ shadows. - if (mm.getDown() != null) continue; - - Method m = mm.element; - if (m.getName().startsWith("test") - && Modifier.isPublic(m.getModifiers()) - && Modifier.isStatic(m.getModifiers()) == false - && m.getParameterTypes().length == 0) { - result.add(m); - } - } - return result; - } -} diff --git a/distribution/docker/src/docker/bin/docker-entrypoint.sh b/distribution/docker/src/docker/bin/docker-entrypoint.sh index 8ea9fcb2c0f86..d7b41b81bb7e3 100755 --- a/distribution/docker/src/docker/bin/docker-entrypoint.sh +++ b/distribution/docker/src/docker/bin/docker-entrypoint.sh @@ -81,4 +81,4 @@ fi # Signal forwarding and child reaping is handled by `tini`, which is the # actual entrypoint of the container -exec /usr/share/elasticsearch/bin/elasticsearch $POSITIONAL_PARAMETERS <<<"$KEYSTORE_PASSWORD" +exec /usr/share/elasticsearch/bin/elasticsearch "$@" $POSITIONAL_PARAMETERS <<<"$KEYSTORE_PASSWORD" diff --git a/docs/changelog/86838.yaml b/docs/changelog/86838.yaml new file mode 100644 index 0000000000000..77e1e8b2f8956 --- /dev/null +++ b/docs/changelog/86838.yaml @@ -0,0 +1,5 @@ +pr: 86838 +summary: Adding cardinality support for `random_sampler` agg +area: Aggregations +type: enhancement +issues: [] diff --git a/docs/changelog/88221.yaml b/docs/changelog/88221.yaml new file mode 100644 index 0000000000000..279133075c519 --- /dev/null +++ b/docs/changelog/88221.yaml @@ -0,0 +1,6 @@ +pr: 88221 +summary: Propagate alias filters to significance aggs filters +area: Aggregations +type: bug +issues: + - 81585 diff --git a/docs/changelog/88333.yaml b/docs/changelog/88333.yaml new file mode 100644 index 0000000000000..f72dbe19ff1b7 --- /dev/null +++ b/docs/changelog/88333.yaml @@ -0,0 +1,5 @@ +pr: 88333 +summary: "Script: Metadata for update context" +area: Infra/Scripting +type: enhancement +issues: [] diff --git a/docs/changelog/88584.yaml b/docs/changelog/88584.yaml new file mode 100644 index 0000000000000..77eb177633c11 --- /dev/null +++ b/docs/changelog/88584.yaml @@ -0,0 +1,5 @@ +pr: 88584 +summary: Fix docker positional params +area: Packaging +type: bug +issues: [] diff --git a/docs/changelog/88603.yaml b/docs/changelog/88603.yaml new file mode 100644 index 0000000000000..b369852823a2f --- /dev/null +++ b/docs/changelog/88603.yaml @@ -0,0 +1,5 @@ +pr: 88603 +summary: Enable synthetic source support on constant keyword fields +area: Mapping +type: enhancement +issues: [] diff --git a/docs/changelog/88619.yaml b/docs/changelog/88619.yaml new file mode 100644 index 0000000000000..7a31e2748d19a --- /dev/null +++ b/docs/changelog/88619.yaml @@ -0,0 +1,5 @@ +pr: 88619 +summary: Handle update error correctly +area: Transform +type: bug +issues: [] diff --git a/docs/changelog/88626.yaml b/docs/changelog/88626.yaml new file mode 100644 index 0000000000000..406f8ac8199b8 --- /dev/null +++ b/docs/changelog/88626.yaml @@ -0,0 +1,5 @@ +pr: 88626 +summary: Adding the ability to register a `PeerFinderListener` to Coordinator +area: Distributed +type: enhancement +issues: [] diff --git a/docs/changelog/88638.yaml b/docs/changelog/88638.yaml new file mode 100644 index 0000000000000..1765fa300bf11 --- /dev/null +++ b/docs/changelog/88638.yaml @@ -0,0 +1,5 @@ +pr: 88638 +summary: Fix multi-value handling in composite agg +area: Aggregations +type: bug +issues: [] diff --git a/docs/changelog/88641.yaml b/docs/changelog/88641.yaml new file mode 100644 index 0000000000000..1d7a784cd41ce --- /dev/null +++ b/docs/changelog/88641.yaml @@ -0,0 +1,5 @@ +pr: 88641 +summary: Replace health request with a state observer +area: Allocation +type: bug +issues: [] diff --git a/docs/changelog/88655.yaml b/docs/changelog/88655.yaml new file mode 100644 index 0000000000000..eb4c35c01c6be --- /dev/null +++ b/docs/changelog/88655.yaml @@ -0,0 +1,5 @@ +pr: 88655 +summary: Make `bucket_correlation` aggregation generally available +area: Machine Learning +type: feature +issues: [] diff --git a/docs/changelog/88675.yaml b/docs/changelog/88675.yaml new file mode 100644 index 0000000000000..137e418269f56 --- /dev/null +++ b/docs/changelog/88675.yaml @@ -0,0 +1,6 @@ +pr: 88675 +summary: Upgrade to OpenJDK 18.0.2+9 +area: Packaging +type: upgrade +issues: + - 88673 diff --git a/docs/reference/aggregations/bucket/random-sampler-aggregation.asciidoc b/docs/reference/aggregations/bucket/random-sampler-aggregation.asciidoc index dfbacbf43c175..efd025ed67ee6 100644 --- a/docs/reference/aggregations/bucket/random-sampler-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/random-sampler-aggregation.asciidoc @@ -94,3 +94,17 @@ higher sampling rates, the relative error is still low. NOTE: This represents the result of aggregations against a typical positively skewed APM data set which also has outliers in the upper tail. The linear dependence of the relative error on the sample size is found to hold widely, but the slope depends on the variation in the quantity being aggregated. As such, the variance in your own data may cause relative error rates to increase or decrease at a different rate. + +[[random-sampler-special-cases]] +==== Random sampling special cases + +All counts returned by the random sampler aggregation are scaled to ease visualizations and calculations. For example, +when randomly sampling a <> every +`doc_count` value for every bucket is scaled by the inverse of the random_sampler `probability` value. So, if `doc_count` +for a bucket is `10,000` with `probability: 0.1`, the actual number of documents aggregated is `1,000`. + +An exception to this is <>. Unique item +counts are not suitable for automatic scaling. When interpreting the cardinality count, compare it +to the number of sampled docs provided in the top level `doc_count` within the random_sampler aggregation. It gives +you an idea of unique values as a percentage of total values. It may not reflect, however, the exact number of unique values +for the given field. diff --git a/docs/reference/aggregations/pipeline/bucket-correlation-aggregation.asciidoc b/docs/reference/aggregations/pipeline/bucket-correlation-aggregation.asciidoc index 841632124805f..ae6ceb2f16c94 100644 --- a/docs/reference/aggregations/pipeline/bucket-correlation-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/bucket-correlation-aggregation.asciidoc @@ -5,8 +5,6 @@ Bucket correlation ++++ -experimental::[] - A sibling pipeline aggregation which executes a correlation function on the configured sibling multi-bucket aggregation. diff --git a/docs/reference/ml/anomaly-detection/ml-configuring-aggregations.asciidoc b/docs/reference/ml/anomaly-detection/ml-configuring-aggregations.asciidoc index 702fb10b2f4f8..a7bfcfb57aba1 100644 --- a/docs/reference/ml/anomaly-detection/ml-configuring-aggregations.asciidoc +++ b/docs/reference/ml/anomaly-detection/ml-configuring-aggregations.asciidoc @@ -13,8 +13,7 @@ reduces the volume of data that must be considered while detecting anomalies. TIP: If you use a terms aggregation and the cardinality of a term is high but still significantly less than your total number of documents, use -{ref}/search-aggregations-bucket-composite-aggregation.html[composite aggregations] -experimental:[Support for composite aggregations inside datafeeds is currently experimental]. +{ref}/search-aggregations-bucket-composite-aggregation.html[composite aggregations]. [discrete] [[aggs-limits-dfeeds]] @@ -78,7 +77,7 @@ PUT _ml/anomaly_detectors/farequote }, "data_description": { "time_field":"time" <1> - }, + }, "datafeed_config":{ "indices": ["farequote"], "aggregations": { @@ -137,8 +136,6 @@ includes all the values of the field instead of the top values per bucket. [[aggs-using-composite]] === Using composite aggregations in {anomaly-jobs} -experimental::[] - For `composite` aggregation support, there must be exactly one `date_histogram` value source. That value source must not be sorted in descending order. Additional `composite` aggregation value sources are allowed, such as `terms`. @@ -147,7 +144,7 @@ NOTE: A {dfeed} that uses composite aggregations may not be as performant as {dfeeds} that use scrolling or date histogram aggregations. Composite aggregations are optimized for queries that are either `match_all` or `range` filters. Other types of -queries may cause the `composite` aggregation to be ineffecient. +queries may cause the `composite` aggregation to be inefficient. Here is an example that uses a `composite` aggregation instead of a `date_histogram`. @@ -429,8 +426,7 @@ different values of a field. IMPORTANT: If you use a terms aggregation, by default it returns buckets for the top ten terms. Thus if the cardinality of the term is greater than 10, not -all terms are analyzed. In this case, consider using `composite` aggregations -experimental:[Support for composite aggregations inside datafeeds is currently experimental]. +all terms are analyzed. In this case, consider using `composite` aggregations. You can change this behavior by setting the `size` parameter. To determine the cardinality of your data, you can run searches such as: diff --git a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc index 1a9720c05530f..3fb6c11181a19 100644 --- a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc +++ b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc @@ -158,7 +158,7 @@ GET my_locations,my_geoshapes/_search -------------------------------------------------- [discrete] -==== Query Options +==== Query options [cols="<,<",options="header",] |======================================================================= @@ -173,13 +173,13 @@ accept geo points with invalid latitude or longitude, set to [[query-dsl-geo-bounding-box-query-accepted-formats]] [discrete] -==== Accepted Formats +==== Accepted formats In much the same way the `geo_point` type can accept different representations of the geo point, the filter can accept it as well: [discrete] -===== Lat Lon As Properties +===== Lat lon as properties [source,console] -------------------------------------------------- @@ -210,7 +210,7 @@ GET my_locations/_search -------------------------------------------------- [discrete] -===== Lat Lon As Array +===== Lat lon as array Format in `[lon, lat]`, note, the order of lon/lat here in order to conform with http://geojson.org/[GeoJSON]. @@ -238,7 +238,7 @@ GET my_locations/_search -------------------------------------------------- [discrete] -===== Lat Lon As String +===== Lat lon as string Format in `lat,lon`. @@ -265,7 +265,7 @@ GET my_locations/_search -------------------------------------------------- [discrete] -===== Bounding Box as Well-Known Text (WKT) +===== Bounding box as well-known text (WKT) [source,console] -------------------------------------------------- @@ -348,11 +348,9 @@ corner at `39.375,-67.5`. ==== Vertices The vertices of the bounding box can either be set by `top_left` and -`bottom_right` or by `top_right` and `bottom_left` parameters. More -over the names `topLeft`, `bottomRight`, `topRight` and `bottomLeft` -are supported. Instead of setting the values pairwise, one can use -the simple names `top`, `left`, `bottom` and `right` to set the -values separately. +`bottom_right` or by `top_right` and `bottom_left` parameters. Instead of +setting the values pairwise, one can use the simple names `top`, `left`, +`bottom` and `right` to set the values separately. [source,console] -------------------------------------------------- @@ -379,14 +377,14 @@ GET my_locations/_search -------------------------------------------------- [discrete] -==== Multi Location Per Document +==== Multi location per document The filter can work with multiple locations / points per document. Once a single location / point matches the filter, the document will be included in the filter [discrete] -==== Ignore Unmapped +==== Ignore unmapped When set to `true` the `ignore_unmapped` option will ignore an unmapped field and will not match any documents for this query. This can be useful when @@ -395,7 +393,7 @@ querying multiple indexes which might have different mappings. When set to is not mapped. [discrete] -==== Notes on Precision +==== Notes on precision Geopoints have limited precision and are always rounded down during index time. During the query time, upper boundaries of the bounding boxes are rounded down, diff --git a/docs/reference/query-dsl/geo-distance-query.asciidoc b/docs/reference/query-dsl/geo-distance-query.asciidoc index c87d185dd229c..5c1b0a1ecfc3f 100644 --- a/docs/reference/query-dsl/geo-distance-query.asciidoc +++ b/docs/reference/query-dsl/geo-distance-query.asciidoc @@ -146,13 +146,13 @@ GET my_locations,my_geoshapes/_search [discrete] -==== Accepted Formats +==== Accepted formats In much the same way the `geo_point` type can accept different representations of the geo point, the filter can accept it as well: [discrete] -===== Lat Lon As Properties +===== Lat lon as properties [source,console] -------------------------------------------------- @@ -178,7 +178,7 @@ GET /my_locations/_search -------------------------------------------------- [discrete] -===== Lat Lon As Array +===== Lat lon as array Format in `[lon, lat]`, note, the order of lon/lat here in order to conform with http://geojson.org/[GeoJSON]. @@ -205,7 +205,7 @@ GET /my_locations/_search [discrete] -===== Lat Lon As WKT String +===== Lat lon as WKT string Format in https://docs.opengeospatial.org/is/12-063r5/12-063r5.html[Well-Known Text]. @@ -280,14 +280,14 @@ The following are options allowed on the filter: coordinates (default is `STRICT`). [discrete] -==== Multi Location Per Document +==== Multi location per document The `geo_distance` filter can work with multiple locations / points per document. Once a single location / point matches the filter, the document will be included in the filter. [discrete] -==== Ignore Unmapped +==== Ignore unmapped When set to `true` the `ignore_unmapped` option will ignore an unmapped field and will not match any documents for this query. This can be useful when diff --git a/docs/reference/query-dsl/geo-polygon-query.asciidoc b/docs/reference/query-dsl/geo-polygon-query.asciidoc index 723e91e3bc6fb..c2f4f52c0c3f8 100644 --- a/docs/reference/query-dsl/geo-polygon-query.asciidoc +++ b/docs/reference/query-dsl/geo-polygon-query.asciidoc @@ -35,7 +35,7 @@ GET /_search // TEST[warning:Deprecated field [geo_polygon] used, replaced by [[geo_shape] query where polygons are defined in geojson or wkt]] [discrete] -==== Query Options +==== Query options [cols="<,<",options="header",] |======================================================================= @@ -48,10 +48,10 @@ or longitude, or `STRICT` (default is `STRICT`). |======================================================================= [discrete] -==== Allowed Formats +==== Allowed formats [discrete] -===== Lat Long as Array +===== Lat long as array Format as `[lon, lat]` @@ -85,7 +85,7 @@ GET /_search // TEST[warning:Deprecated field [geo_polygon] used, replaced by [[geo_shape] query where polygons are defined in geojson or wkt]] [discrete] -===== Lat Lon as String +===== Lat lon as string Format in `lat,lon`. @@ -151,7 +151,7 @@ The query *requires* the <> type to be set on the relevant field. [discrete] -==== Ignore Unmapped +==== Ignore unmapped When set to `true` the `ignore_unmapped` option will ignore an unmapped field and will not match any documents for this query. This can be useful when diff --git a/docs/reference/query-dsl/geo-shape-query.asciidoc b/docs/reference/query-dsl/geo-shape-query.asciidoc index 0ab731d1355fb..570a104a18480 100644 --- a/docs/reference/query-dsl/geo-shape-query.asciidoc +++ b/docs/reference/query-dsl/geo-shape-query.asciidoc @@ -21,7 +21,7 @@ pre-indexed in another index. Both formats are defined below with examples. -==== Inline Shape Definition +==== Inline shape definition Similar to the `geo_point` type, the `geo_shape` query uses http://geojson.org[GeoJSON] to represent shapes. @@ -169,7 +169,7 @@ GET /example_points/_search // TESTRESPONSE[s/"took" : 17/"took" : $body.took/] -==== Pre-Indexed Shape +==== Pre-indexed shape The query also supports using a shape which has already been indexed in another index. This is particularly useful for when you have a pre-defined list of @@ -229,7 +229,7 @@ GET /example/_search -------------------------------------------------- -==== Spatial Relations +==== Spatial relations The following is a complete list of spatial relation operators available when searching a geo field: @@ -244,7 +244,7 @@ geometry. Line geometries are not supported. geometry. [discrete] -==== Ignore Unmapped +==== Ignore unmapped When set to `true` the `ignore_unmapped` option will ignore an unmapped field and will not match any documents for this query. This can be useful when diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index 86b54f753c6bd..237d77d0a0c94 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -192,22 +192,12 @@ See <>. // [START] Security redirects -[role="exclude",id="security-minimal-setup"] -=== Set up minimal security for {es} - -Refer to <>. - [role="exclude",id="get-started-users"] ==== Create users Refer to the <> tool for resetting passwords of built-in users. -[role="exclude",id="add-built-in-users"] -==== Configure {kib} to connect to {es} with a password - -Refer to <>. - [role="exclude",id="encrypting-communications-certificates"] === Generate certificates diff --git a/docs/reference/snapshot-restore/apis/get-snapshot-api.asciidoc b/docs/reference/snapshot-restore/apis/get-snapshot-api.asciidoc index 53990eed22de1..e685badc31d4b 100644 --- a/docs/reference/snapshot-restore/apis/get-snapshot-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/get-snapshot-api.asciidoc @@ -111,6 +111,11 @@ comprising the number of shards in the index, the total size of the index in bytes, and the maximum number of segments per shard in the index. Defaults to `false`, meaning that this information is omitted. +`include_repository`:: +(Optional, Boolean) +If `true`, returns the repository name for each snapshot in the response. +Defaults to `true`. + `sort`:: (Optional, string) Allows setting a sort order for the result. Defaults to `start_time`, i.e. sorting by snapshot start time stamp. diff --git a/docs/reference/transform/painless-examples.asciidoc b/docs/reference/transform/painless-examples.asciidoc index 8b7e73f529c25..8f8048694bf57 100644 --- a/docs/reference/transform/painless-examples.asciidoc +++ b/docs/reference/transform/painless-examples.asciidoc @@ -573,7 +573,7 @@ POST _transform/_preview all_docs.add(span); } } - all_docs.sort((HashMap o1, HashMap o2)->o1['@timestamp'].toEpochMilli()compareTo(o2['@timestamp']-toEpochMilli())); + all_docs.sort((HashMap o1, HashMap o2)->o1['@timestamp'].toEpochMilli().compareTo(o2['@timestamp'].toEpochMilli())); def size = all_docs.size(); def min_time = all_docs[0]['@timestamp']; def max_time = all_docs[size-1]['@timestamp']; @@ -644,4 +644,4 @@ The API call results in a similar response: } ... -------------------------------------------------- -// NOTCONSOLE \ No newline at end of file +// NOTCONSOLE diff --git a/gradle/build.versions.toml b/gradle/build.versions.toml index 10da3176fbbec..ce56380f18e50 100644 --- a/gradle/build.versions.toml +++ b/gradle/build.versions.toml @@ -33,7 +33,6 @@ junit5-vintage = { group = "org.junit.vintage", name="junit-vintage-engine", ver maven-model = "org.apache.maven:maven-model:3.6.2" mockito-core = "org.mockito:mockito-core:1.9.5" nebula-info = "com.netflix.nebula:gradle-info-plugin:11.3.3" -randomized-runner = "com.carrotsearch.randomizedtesting:randomizedtesting-runner:2.7.7" shadow-plugin = "gradle.plugin.com.github.johnrengelman:shadow:7.1.2" spock-core = { group = "org.spockframework", name="spock-core", version.ref="spock" } spock-junit4 = { group = "org.spockframework", name="spock-junit4", version.ref="spock" } diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.update.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.update.txt index 1aacbb4d8cf40..ee2846331ee0f 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.update.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.update.txt @@ -18,3 +18,17 @@ class org.elasticsearch.painless.api.Json { String dump(def) String dump(def, boolean) } + +class org.elasticsearch.script.Metadata { + String getIndex() + String getId() + String getRouting() + long getVersion() + String getOp() + void setOp(String) + ZonedDateTime getTimestamp() +} + +class org.elasticsearch.script.UpdateScript { + Metadata metadata() +} diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/15_update.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/15_update.yml index a23a27a2e6578..e00f82af7cc76 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/15_update.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/15_update.yml @@ -123,3 +123,47 @@ - match: { error.root_cause.0.type: "illegal_argument_exception" } - match: { error.type: "illegal_argument_exception" } - match: { error.reason: "Iterable object is self-referencing itself" } + +--- +"Script Update Metadata": + - skip: + version: " - 8.3.99" + reason: "update metadata introduced in 8.4.0" + + - do: + update: + index: test_1 + id: "2" + body: + script: + source: "ctx._source.bar = metadata().id + '-extra'" + lang: "painless" + upsert: {} + scripted_upsert: true + + - do: + get: + index: test_1 + id: "2" + + - match: { _source.bar: 2-extra } + - match: { found: true } + + - do: + update: + index: test_1 + id: "2" + body: + script: + source: "metadata().op = 'delete'" + lang: "painless" + upsert: {} + scripted_upsert: true + + - do: + catch: missing + get: + index: test_1 + id: "2" + + - match: { found: false } diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/25_script_upsert.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/25_script_upsert.yml index 559a54d28a19e..865bed8de24e9 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/25_script_upsert.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/25_script_upsert.yml @@ -93,3 +93,97 @@ id: "4" - match: { _source.within_one_minute: true } + +--- +"Script Upsert Metadata": + - skip: + version: " - 8.3.99" + reason: "update metadata introduced in 8.4.0" + + - do: + catch: /routing is unavailable for insert/ + update: + index: test_1 + id: "1" + body: + script: + source: "ctx._source.foo = metadata().routing" + lang: "painless" + upsert: {} + scripted_upsert: true + + - do: + update: + index: test_1 + id: "2" + body: + script: + source: "ctx._source.foo = metadata().index + '_1'; ctx._source.bar = 'nothing'" + lang: "painless" + upsert: {} + scripted_upsert: true + + - do: + get: + index: test_1 + id: "2" + + - match: { _source.foo: test_1_1 } + - match: { _source.bar: nothing } + + - do: + update: + index: test_1 + id: "3" + body: + script: + source: "metadata().op = 'noop'; ctx._source.bar = 'skipped?'" + lang: "painless" + upsert: {} + scripted_upsert: true + + - do: + catch: missing + get: + index: test_1 + id: "3" + + - match: { found: false } + + - do: + update: + index: test_1 + id: "3" + body: + script: + source: "metadata().op = 'create'; ctx._source.bar = 'skipped?'" + lang: "painless" + upsert: {} + scripted_upsert: true + + - do: + get: + index: test_1 + id: "3" + + - match: { found: true } + - match: { _source.bar: "skipped?" } + + # update + - do: + update: + index: test_1 + id: "2" + body: + script: + source: "ctx._source.bar = metadata().op + '-extra'" + lang: "painless" + upsert: {} + scripted_upsert: true + + - do: + get: + index: test_1 + id: "2" + + - match: { _source.bar: index-extra } diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/AbstractAsyncBulkByScrollActionScriptTestCase.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/AbstractAsyncBulkByScrollActionScriptTestCase.java index 3a6a7fcb6c42c..02d5aaef31098 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/AbstractAsyncBulkByScrollActionScriptTestCase.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/AbstractAsyncBulkByScrollActionScriptTestCase.java @@ -53,7 +53,7 @@ protected T applyScript(Consumer> (params, ctx) -> new UpdateScript(Collections.emptyMap(), ctx) { @Override public void execute() { - scriptBody.accept(getCtx()); + scriptBody.accept(ctx); } } ); diff --git a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java index 2c2d4529ceb0d..0a6c03c9b6dd1 100644 --- a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java +++ b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java @@ -131,8 +131,8 @@ RequestRetryOptions getRetryOptions(LocationMode locationMode, AzureStorageSetti RetryPolicyType.EXPONENTIAL, azureStorageSettings.getMaxRetries() + 1, 60, - 50L, - 100L, + 5L, + 10L, null ); } diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/DockerTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/DockerTests.java index 0b4b48f8b87ea..b151ce51fa806 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/DockerTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/DockerTests.java @@ -1096,6 +1096,23 @@ public void test170DefaultShellIsBash() { } } + /** + * Ensure that it is possible to apply CLI options when running the image. + */ + public void test171AdditionalCliOptionsAreForwarded() throws Exception { + assumeTrue( + "Does not apply to Cloud images, because they don't use the default entrypoint", + distribution.packaging != Packaging.DOCKER_CLOUD && distribution().packaging != Packaging.DOCKER_CLOUD_ESS + ); + + runContainer(distribution(), builder().runArgs("bin/elasticsearch", "-Ecluster.name=kimchy").envVar("ELASTIC_PASSWORD", PASSWORD)); + waitForElasticsearch(installation, "elastic", PASSWORD); + + final JsonNode node = getJson("/", "elastic", PASSWORD, ServerUtils.getCaCert(installation)); + + assertThat(node.get("cluster_name").textValue(), equalTo("kimchy")); + } + /** * Check that the UBI images has the correct license information in the correct place. */ @@ -1193,7 +1210,7 @@ private List listPlugins() { /** * Check that readiness listener works */ - public void testReadiness001() throws Exception { + public void test500Readiness() throws Exception { assertFalse(readinessProbe(9399)); // Disabling security so we wait for green installation = runContainer( diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/docker/DockerRun.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/docker/DockerRun.java index caae6e2635c0f..feb95b5eb2d93 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/docker/DockerRun.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/docker/DockerRun.java @@ -34,6 +34,7 @@ public class DockerRun { private Integer uid; private Integer gid; private final List extraArgs = new ArrayList<>(); + private final List runArgs = new ArrayList<>(); private String memory = "2g"; // default to 2g memory limit private DockerRun() {} @@ -95,6 +96,11 @@ public DockerRun extraArgs(String... args) { return this; } + public DockerRun runArgs(String... args) { + Collections.addAll(this.runArgs, args); + return this; + } + String build() { final List cmd = new ArrayList<>(); @@ -144,6 +150,8 @@ String build() { // Image name cmd.add(getImageName(distribution)); + cmd.addAll(this.runArgs); + return String.join(" ", cmd); } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get.json index 927e10f2261bb..23f5f737995d0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get.json @@ -50,6 +50,38 @@ "type":"boolean", "description":"Whether to include the repository name in the snapshot info. Defaults to true." }, + "sort": { + "type": "enum", + "default": "start_time", + "options": ["start_time", "duration", "name", "repository", "index_count", "shard_count", "failed_shard_count"], + "description": "Allows setting a sort order for the result. Defaults to start_time" + }, + "size": { + "type": "integer", + "description": "Maximum number of snapshots to return. Defaults to 0 which means return all that match without limit." + }, + "order": { + "type": "enum", + "default": "asc", + "options": ["asc", "desc"], + "description": "Sort order" + }, + "from_sort_value": { + "type": "string", + "description": "Value of the current sort column at which to start retrieval." + }, + "after": { + "type": "string", + "description": "Offset identifier to start pagination from as returned by the 'next' field in the response body." + }, + "offset": { + "type": "integer", + "description": "Numeric offset to start pagination based on the snapshots matching the request. Defaults to 0" + }, + "slm_policy_filter": { + "type": "string", + "description": "Filter snapshots by a comma-separated list of SLM policy names that snapshots belong to. Accepts wildcards. Use the special pattern '_none' to match snapshots without an SLM policy" + }, "verbose":{ "type":"boolean", "description":"Whether to show verbose snapshot info or only show the basic info found in the repository index blob" diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/30_sig_terms.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/30_sig_terms.yml index 147048c8dce93..777820c0c0ba8 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/30_sig_terms.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/30_sig_terms.yml @@ -233,3 +233,52 @@ significant_terms: field: foo jlp: {} + +--- +"Test alias background filter": + - skip: + version: " - 8.3.99" + reason: fixed in 8.4 + + - do: + indices.create: + index: test_index + body: + mappings: + properties: + field1: + type: keyword + field2: + type: keyword + + - do: + indices.put_alias: + index: test_index + name: test_alias + body: {"filter": {"bool": {"filter": [{"term": {"field2": "foo"}}]}}} + - do: + index: + index: test_index + id: "1" + body: { "field1" : "1", "field2": "foo" } + + - do: + index: + index: test_index + id: "2" + body: { "field1": "2", "field2": "bar" } + + - do: + index: + index: test_index + id: "3" + body: { "field1": "3", "field2": "foo" } + + - do: + indices.refresh: {} + + - do: + search: + index: test_alias + body: {"aggs": {"sig_terms": {"significant_terms": {"field": "field1"}}}} + - match: { aggregations.sig_terms.bg_count: 2 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/450_random_sampler.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/450_random_sampler.yml index bd1b4b82b5380..4e93e9c83785f 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/450_random_sampler.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/450_random_sampler.yml @@ -208,7 +208,7 @@ setup: } - do: - catch: /\[random_sampler\] aggregation \[sampled\] does not support sampling \[cardinality\] aggregation \[unique\]/ + catch: /\[random_sampler\] aggregation \[sampled\] does not support sampling \[sampler\] aggregation \[inner_sampler\]/ search: index: data size: 0 @@ -219,7 +219,7 @@ setup: "random_sampler": { "probability": 0.1 }, - "aggs": { "unique": {"cardinality": {"field": "product"}}} + "aggs": { "inner_sampler": {"sampler": {}}} } } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java index 31eb0bdd00f30..d9a21acbd4175 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java @@ -15,15 +15,18 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.FSDirectory; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.nodes.BaseNodeResponse; import org.elasticsearch.client.internal.Requests; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -33,7 +36,9 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -41,6 +46,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.PathUtils; import org.elasticsearch.core.TimeValue; @@ -451,6 +457,40 @@ public void testCorruptionOnNetworkLayer() throws InterruptedException { connection.sendRequest(requestId, action, request, options); }); + final var allocationGivenUpFuture = new PlainActionFuture(); + final var maxRetries = MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY.get(Settings.EMPTY); + new ClusterStateObserver( + internalCluster().getCurrentMasterNodeInstance(ClusterService.class), + logger, + new ThreadContext(Settings.EMPTY) + ).waitForNextChange(new ClusterStateObserver.Listener() { + @Override + public void onNewClusterState(ClusterState state) { + allocationGivenUpFuture.onResponse(null); + } + + @Override + public void onClusterServiceClose() { + allocationGivenUpFuture.onFailure(new ElasticsearchException("closed")); + } + + @Override + public void onTimeout(TimeValue timeout) { + allocationGivenUpFuture.onFailure(new ElasticsearchException("timed out")); + } + }, state -> { + final var indexRoutingTable = state.routingTable().index("test"); + for (int shardId = 0; shardId < indexRoutingTable.size(); shardId++) { + final var replicaShards = indexRoutingTable.shard(shardId).replicaShards(); + if (replicaShards.isEmpty() + || replicaShards.stream() + .anyMatch(sr -> sr.unassigned() == false || sr.unassignedInfo().getNumFailedAllocations() < maxRetries)) { + return false; + } + } + return true; + }, TimeValue.timeValueSeconds(30)); + // can not allocate on unluckyNode client().admin() .indices() @@ -461,10 +501,10 @@ public void testCorruptionOnNetworkLayer() throws InterruptedException { .put("index.routing.allocation.include._name", primariesNode.getName() + "," + unluckyNode.getName()) ) .get(); - ensureYellowAndNoInitializingShards("test"); + allocationGivenUpFuture.actionGet(); assertThatAllShards("test", shard -> { assertThat(shard.primaryShard().currentNodeId(), equalTo(primariesNode.getId())); - assertThat(shard.replicaShards().get(0).state(), not(equalTo(ShardRoutingState.STARTED))); + assertThat(shard.replicaShards().get(0).state(), equalTo(ShardRoutingState.UNASSIGNED)); }); // can allocate on any other data node diff --git a/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java b/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java index e2751e05314ab..e56d1568d20e3 100644 --- a/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java @@ -24,7 +24,9 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.core.Tuple; +import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportRequest; @@ -307,6 +309,9 @@ private void onFailure(int idx, String nodeId, Throwable t) { } private void finishHim() { + if ((task instanceof CancellableTask t) && t.notifyIfCancelled(listener)) { + return; + } TasksResponse finalResponse; try { finalResponse = newResponse(request, responses); @@ -335,7 +340,7 @@ public void messageReceived(final NodeTaskRequest request, final TransportChanne } private class NodeTaskRequest extends TransportRequest { - private TasksRequest tasksRequest; + private final TasksRequest tasksRequest; protected NodeTaskRequest(StreamInput in) throws IOException { super(in); @@ -353,6 +358,11 @@ protected NodeTaskRequest(TasksRequest tasksRequest) { this.tasksRequest = tasksRequest; } + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new CancellableTask(id, type, action, getDescription(), parentTaskId, headers); + } + } private class NodeTasksResponse extends TransportResponse { diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java b/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java index 06b850815681e..daaa69f5786a7 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java @@ -18,7 +18,6 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Tuple; @@ -32,7 +31,9 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.UpdateCtxMap; import org.elasticsearch.script.UpdateScript; +import org.elasticsearch.script.UpsertCtxMap; import org.elasticsearch.search.lookup.SourceLookup; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; @@ -87,25 +88,16 @@ protected Result prepare(ShardId shardId, UpdateRequest request, final GetResult * Execute a scripted upsert, where there is an existing upsert document and a script to be executed. The script is executed and a new * Tuple of operation and updated {@code _source} is returned. */ - Tuple> executeScriptedUpsert(Map upsertDoc, Script script, LongSupplier nowInMillis) { - Map ctx = Maps.newMapWithExpectedSize(3); - // Tell the script that this is a create and not an update - ctx.put(ContextFields.OP, UpdateOpType.CREATE.toString()); - ctx.put(ContextFields.SOURCE, upsertDoc); - ctx.put(ContextFields.NOW, nowInMillis.getAsLong()); - ctx = executeScript(script, ctx); - - UpdateOpType operation = UpdateOpType.lenientFromString((String) ctx.get(ContextFields.OP), logger, script.getIdOrCode()); - @SuppressWarnings("unchecked") - Map newSource = (Map) ctx.get(ContextFields.SOURCE); - + Tuple> executeScriptedUpsert(Script script, UpsertCtxMap ctxMap) { + ctxMap = executeScript(script, ctxMap); + UpdateOpType operation = UpdateOpType.lenientFromString(ctxMap.getMetadata().getOp(), logger, script.getIdOrCode()); if (operation != UpdateOpType.CREATE && operation != UpdateOpType.NONE) { // Only valid options for an upsert script are "create" (the default) or "none", meaning abort upsert logger.warn("Invalid upsert operation [{}] for script [{}], doing nothing...", operation, script.getIdOrCode()); operation = UpdateOpType.NONE; } - return new Tuple<>(operation, newSource); + return new Tuple<>(operation, ctxMap.getSource()); } /** @@ -120,11 +112,14 @@ Result prepareUpsert(ShardId shardId, UpdateRequest request, final GetResult get if (request.scriptedUpsert() && request.script() != null) { // Run the script to perform the create logic IndexRequest upsert = request.upsertRequest(); - Tuple> upsertResult = executeScriptedUpsert( - upsert.sourceAsMap(), - request.script, - nowInMillis + UpsertCtxMap ctxMap = new UpsertCtxMap( + getResult.getIndex(), + getResult.getId(), + UpdateOpType.CREATE.toString(), + nowInMillis.getAsLong(), + upsert.sourceAsMap() ); + Tuple> upsertResult = executeScriptedUpsert(request.script, ctxMap); switch (upsertResult.v1()) { case CREATE -> indexRequest = Requests.indexRequest(request.index()).source(upsertResult.v2()); case NONE -> { @@ -237,24 +232,22 @@ Result prepareUpdateScriptRequest(ShardId shardId, UpdateRequest request, GetRes final String routing = calculateRouting(getResult, currentRequest); final Tuple> sourceAndContent = XContentHelper.convertToMap(getResult.internalSourceRef(), true); final XContentType updateSourceContentType = sourceAndContent.v1(); - final Map sourceAsMap = sourceAndContent.v2(); - - Map ctx = Maps.newMapWithExpectedSize(16); - ctx.put(ContextFields.OP, UpdateOpType.INDEX.toString()); // The default operation is "index" - ctx.put(ContextFields.INDEX, getResult.getIndex()); - ctx.put(ContextFields.TYPE, MapperService.SINGLE_MAPPING_NAME); - ctx.put(ContextFields.ID, getResult.getId()); - ctx.put(ContextFields.VERSION, getResult.getVersion()); - ctx.put(ContextFields.ROUTING, routing); - ctx.put(ContextFields.SOURCE, sourceAsMap); - ctx.put(ContextFields.NOW, nowInMillis.getAsLong()); - - ctx = executeScript(request.script, ctx); - UpdateOpType operation = UpdateOpType.lenientFromString((String) ctx.get(ContextFields.OP), logger, request.script.getIdOrCode()); - - @SuppressWarnings("unchecked") - final Map updatedSourceAsMap = (Map) ctx.get(ContextFields.SOURCE); + UpdateCtxMap ctxMap = executeScript( + request.script, + new UpdateCtxMap( + getResult.getIndex(), + getResult.getId(), + getResult.getVersion(), + routing, + MapperService.SINGLE_MAPPING_NAME, + UpdateOpType.INDEX.toString(), // The default operation is "index" + nowInMillis.getAsLong(), + sourceAndContent.v2() + ) + ); + UpdateOpType operation = UpdateOpType.lenientFromString(ctxMap.getMetadata().getOp(), logger, request.script.getIdOrCode()); + final Map updatedSourceAsMap = ctxMap.getSource(); switch (operation) { case INDEX -> { @@ -307,17 +300,17 @@ Result prepareUpdateScriptRequest(ShardId shardId, UpdateRequest request, GetRes } } - private Map executeScript(Script script, Map ctx) { + private T executeScript(Script script, T ctxMap) { try { if (scriptService != null) { UpdateScript.Factory factory = scriptService.compile(script, UpdateScript.CONTEXT); - UpdateScript executableScript = factory.newInstance(script.getParams(), ctx); + UpdateScript executableScript = factory.newInstance(script.getParams(), ctxMap); executableScript.execute(); } } catch (Exception e) { throw new IllegalArgumentException("failed to execute script", e); } - return ctx; + return ctxMap; } /** @@ -429,6 +422,7 @@ public static UpdateOpType lenientFromString(String operation, Logger logger, St return UpdateOpType.INDEX; case "delete": return UpdateOpType.DELETE; + case "noop": case "none": return UpdateOpType.NONE; default: diff --git a/server/src/main/java/org/elasticsearch/cluster/DiskUsage.java b/server/src/main/java/org/elasticsearch/cluster/DiskUsage.java index 3df1588b128db..69e726d16d0c6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/DiskUsage.java +++ b/server/src/main/java/org/elasticsearch/cluster/DiskUsage.java @@ -97,7 +97,7 @@ public double getFreeDiskAsPercentage() { if (totalBytes == 0) { return 100.0; } - return 100.0 * ((double) freeBytes / totalBytes); + return 100.0 * freeBytes / totalBytes; } public double getUsedDiskAsPercentage() { diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterBootstrapService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterBootstrapService.java index 097c28ff8bd12..9e1772c7d844f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterBootstrapService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterBootstrapService.java @@ -43,7 +43,7 @@ import static org.elasticsearch.discovery.DiscoveryModule.DISCOVERY_SEED_PROVIDERS_SETTING; import static org.elasticsearch.discovery.SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING; -public class ClusterBootstrapService { +public class ClusterBootstrapService implements Coordinator.PeerFinderListener { public static final Setting> INITIAL_MASTER_NODES_SETTING = Setting.listSetting( "cluster.initial_master_nodes", @@ -147,7 +147,8 @@ void logBootstrapState(Metadata metadata) { } } - void onFoundPeersUpdated() { + @Override + public void onFoundPeersUpdated() { final Set nodes = getDiscoveredNodes(); if (bootstrappingPermitted.get() && transportService.getLocalNode().isMasterNode() diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 213f713044243..957b3cf37c03d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -84,6 +84,7 @@ import java.util.Optional; import java.util.Random; import java.util.Set; +import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; import java.util.function.Supplier; @@ -174,6 +175,7 @@ public class Coordinator extends AbstractLifecycleComponent implements ClusterSt private JoinHelper.JoinAccumulator joinAccumulator; private Optional currentPublication = Optional.empty(); private final NodeHealthService nodeHealthService; + private final List peerFinderListeners; /** * @param nodeName The name of the node, used to name the {@link java.util.concurrent.ExecutorService} of the {@link SeedHostsResolver}. @@ -295,6 +297,8 @@ public Coordinator( joinHelper::logLastFailedJoinAttempt ); this.nodeHealthService = nodeHealthService; + this.peerFinderListeners = new CopyOnWriteArrayList<>(); + this.peerFinderListeners.add(clusterBootstrapService); } /** @@ -1515,6 +1519,10 @@ boolean hasIdleJoinValidationService() { return joinValidationService.isIdle(); } + public void addPeerFinderListener(PeerFinderListener peerFinderListener) { + this.peerFinderListeners.add(peerFinderListener); + } + public enum Mode { CANDIDATE, LEADER, @@ -1570,8 +1578,7 @@ protected void onFoundPeersUpdated() { } } } - - clusterBootstrapService.onFoundPeersUpdated(); + peerFinderListeners.forEach(PeerFinderListener::onFoundPeersUpdated); } } @@ -1937,4 +1944,8 @@ protected void sendApplyCommit( ); } } + + public interface PeerFinderListener { + void onFoundPeersUpdated(); + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java index 8fe89831c08ca..1ba382c725670 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java @@ -14,11 +14,14 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsClusterStateUpdateRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateAckListener; +import org.elasticsearch.cluster.ClusterStateTaskConfig; import org.elasticsearch.cluster.ClusterStateTaskExecutor; +import org.elasticsearch.cluster.ClusterStateTaskListener; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlocks; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; @@ -27,22 +30,19 @@ import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.ShardLimitValidator; -import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; import java.util.Arrays; import java.util.HashSet; -import java.util.List; import java.util.Locale; import java.util.Set; import java.util.function.BiFunction; -import static org.elasticsearch.action.support.ContextPreservingActionListener.wrapPreservingContext; import static org.elasticsearch.index.IndexSettings.same; /** @@ -56,223 +56,248 @@ public class MetadataUpdateSettingsService { private final IndexScopedSettings indexScopedSettings; private final IndicesService indicesService; private final ShardLimitValidator shardLimitValidator; - private final ThreadPool threadPool; - private final ClusterStateTaskExecutor executor; + private final ClusterStateTaskExecutor executor; public MetadataUpdateSettingsService( ClusterService clusterService, AllocationService allocationService, IndexScopedSettings indexScopedSettings, IndicesService indicesService, - ShardLimitValidator shardLimitValidator, - ThreadPool threadPool + ShardLimitValidator shardLimitValidator ) { this.clusterService = clusterService; this.allocationService = allocationService; this.indexScopedSettings = indexScopedSettings; this.indicesService = indicesService; this.shardLimitValidator = shardLimitValidator; - this.threadPool = threadPool; - this.executor = new ClusterStateTaskExecutor() { - @Override - @SuppressForbidden(reason = "consuming published cluster state for legacy reasons") - public ClusterState execute(ClusterState currentState, List> taskContexts) { - ClusterState state = currentState; - for (final var taskContext : taskContexts) { - try { - final var task = taskContext.getTask(); - state = task.execute(state); - taskContext.success(new ClusterStateTaskExecutor.LegacyClusterTaskResultActionListener(task, currentState), task); - } catch (Exception e) { - taskContext.onFailure(e); - } - } - if (state != currentState) { - // reroute in case things change that require it (like number of replicas) - state = allocationService.reroute(state, "settings update"); + this.executor = (currentState, taskContexts) -> { + ClusterState state = currentState; + for (final var taskContext : taskContexts) { + try { + final var task = taskContext.getTask(); + state = task.execute(state); + taskContext.success(task); + } catch (Exception e) { + taskContext.onFailure(e); } - return state; } + if (state != currentState) { + // reroute in case things change that require it (like number of replicas) + state = allocationService.reroute(state, "settings update"); + } + return state; }; } - public void updateSettings(final UpdateSettingsClusterStateUpdateRequest request, final ActionListener listener) { - final Settings normalizedSettings = Settings.builder() - .put(request.settings()) - .normalizePrefix(IndexMetadata.INDEX_SETTING_PREFIX) - .build(); - Settings.Builder settingsForClosedIndices = Settings.builder(); - Settings.Builder settingsForOpenIndices = Settings.builder(); - final Set skippedSettings = new HashSet<>(); - - indexScopedSettings.validate( - normalizedSettings.filter(s -> Regex.isSimpleMatchPattern(s) == false), // don't validate wildcards - false, // don't validate values here we check it below never allow to change the number of shards - true - ); // validate internal or private index settings - for (String key : normalizedSettings.keySet()) { - Setting setting = indexScopedSettings.get(key); - boolean isWildcard = setting == null && Regex.isSimpleMatchPattern(key); - assert setting != null // we already validated the normalized settings - || (isWildcard && normalizedSettings.hasValue(key) == false) - : "unknown setting: " + key + " isWildcard: " + isWildcard + " hasValue: " + normalizedSettings.hasValue(key); - settingsForClosedIndices.copy(key, normalizedSettings); - if (isWildcard || setting.isDynamic()) { - settingsForOpenIndices.copy(key, normalizedSettings); - } else { - skippedSettings.add(key); - } + private final class UpdateSettingsTask implements ClusterStateAckListener, ClusterStateTaskListener { + private final UpdateSettingsClusterStateUpdateRequest request; + private final ActionListener listener; + + private UpdateSettingsTask(UpdateSettingsClusterStateUpdateRequest request, ActionListener listener) { + this.request = request; + this.listener = listener; } - final Settings closedSettings = settingsForClosedIndices.build(); - final Settings openSettings = settingsForOpenIndices.build(); - final boolean preserveExisting = request.isPreserveExisting(); - - // TODO: move this to custom class instead of AckedClusterStateUpdateTask - AckedClusterStateUpdateTask clusterTask = new AckedClusterStateUpdateTask( - Priority.URGENT, - request, - wrapPreservingContext(listener, threadPool.getThreadContext()) - ) { - @Override - public ClusterState execute(ClusterState currentState) { - RoutingTable.Builder routingTableBuilder = null; - Metadata.Builder metadataBuilder = Metadata.builder(currentState.metadata()); - - // allow to change any settings to a closed index, and only allow dynamic settings to be changed - // on an open index - Set openIndices = new HashSet<>(); - Set closedIndices = new HashSet<>(); - final String[] actualIndices = new String[request.indices().length]; - for (int i = 0; i < request.indices().length; i++) { - Index index = request.indices()[i]; - actualIndices[i] = index.getName(); - final IndexMetadata metadata = currentState.metadata().getIndexSafe(index); - if (metadata.getState() == IndexMetadata.State.OPEN) { - openIndices.add(index); - } else { - closedIndices.add(index); - } - } - if (skippedSettings.isEmpty() == false && openIndices.isEmpty() == false) { - throw new IllegalArgumentException( - String.format( - Locale.ROOT, - "Can't update non dynamic settings [%s] for open indices %s", - skippedSettings, - openIndices - ) - ); + @Override + public boolean mustAck(DiscoveryNode discoveryNode) { + return true; + } + + @Override + public void onAllNodesAcked() { + listener.onResponse(AcknowledgedResponse.of(true)); + } + + @Override + public void onAckFailure(Exception e) { + listener.onFailure(e); + } + + @Override + public void onAckTimeout() { + listener.onResponse(AcknowledgedResponse.of(false)); + } + + @Override + public TimeValue ackTimeout() { + return request.ackTimeout(); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + + @Override + public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { + assert false : "should not be called"; + } + + ClusterState execute(ClusterState currentState) { + final Settings normalizedSettings = Settings.builder() + .put(request.settings()) + .normalizePrefix(IndexMetadata.INDEX_SETTING_PREFIX) + .build(); + Settings.Builder settingsForClosedIndices = Settings.builder(); + Settings.Builder settingsForOpenIndices = Settings.builder(); + final Set skippedSettings = new HashSet<>(); + + indexScopedSettings.validate( + normalizedSettings.filter(s -> Regex.isSimpleMatchPattern(s) == false), // don't validate wildcards + false, // don't validate values here we check it below never allow to change the number of shards + true + ); // validate internal or private index settings + for (String key : normalizedSettings.keySet()) { + Setting setting = indexScopedSettings.get(key); + boolean isWildcard = setting == null && Regex.isSimpleMatchPattern(key); + assert setting != null // we already validated the normalized settings + || (isWildcard && normalizedSettings.hasValue(key) == false) + : "unknown setting: " + key + " isWildcard: " + isWildcard + " hasValue: " + normalizedSettings.hasValue(key); + settingsForClosedIndices.copy(key, normalizedSettings); + if (isWildcard || setting.isDynamic()) { + settingsForOpenIndices.copy(key, normalizedSettings); + } else { + skippedSettings.add(key); } + } + final Settings closedSettings = settingsForClosedIndices.build(); + final Settings openSettings = settingsForOpenIndices.build(); + final boolean preserveExisting = request.isPreserveExisting(); - if (IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.exists(openSettings)) { - final int updatedNumberOfReplicas = IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(openSettings); - if (preserveExisting == false) { - // Verify that this won't take us over the cluster shard limit. - shardLimitValidator.validateShardLimitOnReplicaUpdate(currentState, request.indices(), updatedNumberOfReplicas); - - /* - * We do not update the in-sync allocation IDs as they will be removed upon the first index operation - * which makes these copies stale. - * - * TODO: should we update the in-sync allocation IDs once the data is deleted by the node? - */ - routingTableBuilder = RoutingTable.builder(currentState.routingTable()); - routingTableBuilder.updateNumberOfReplicas(updatedNumberOfReplicas, actualIndices); - metadataBuilder.updateNumberOfReplicas(updatedNumberOfReplicas, actualIndices); - logger.info("updating number_of_replicas to [{}] for indices {}", updatedNumberOfReplicas, actualIndices); - } + RoutingTable.Builder routingTableBuilder = null; + Metadata.Builder metadataBuilder = Metadata.builder(currentState.metadata()); + + // allow to change any settings to a closed index, and only allow dynamic settings to be changed + // on an open index + Set openIndices = new HashSet<>(); + Set closedIndices = new HashSet<>(); + final String[] actualIndices = new String[request.indices().length]; + for (int i = 0; i < request.indices().length; i++) { + Index index = request.indices()[i]; + actualIndices[i] = index.getName(); + final IndexMetadata metadata = currentState.metadata().getIndexSafe(index); + if (metadata.getState() == IndexMetadata.State.OPEN) { + openIndices.add(index); + } else { + closedIndices.add(index); } + } - updateIndexSettings( - openIndices, - metadataBuilder, - (index, indexSettings) -> indexScopedSettings.updateDynamicSettings( - openSettings, - indexSettings, - Settings.builder(), - index.getName() - ), - preserveExisting, - indexScopedSettings + if (skippedSettings.isEmpty() == false && openIndices.isEmpty() == false) { + throw new IllegalArgumentException( + String.format(Locale.ROOT, "Can't update non dynamic settings [%s] for open indices %s", skippedSettings, openIndices) ); + } - updateIndexSettings( - closedIndices, - metadataBuilder, - (index, indexSettings) -> indexScopedSettings.updateSettings( - closedSettings, - indexSettings, - Settings.builder(), - index.getName() - ), - preserveExisting, - indexScopedSettings - ); + if (IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.exists(openSettings)) { + final int updatedNumberOfReplicas = IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(openSettings); + if (preserveExisting == false) { + // Verify that this won't take us over the cluster shard limit. + shardLimitValidator.validateShardLimitOnReplicaUpdate(currentState, request.indices(), updatedNumberOfReplicas); - if (IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.exists(normalizedSettings) - || IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.exists(normalizedSettings)) { - for (String index : actualIndices) { - final Settings settings = metadataBuilder.get(index).getSettings(); - MetadataCreateIndexService.validateTranslogRetentionSettings(settings); - MetadataCreateIndexService.validateStoreTypeSetting(settings); - } - } - boolean changed = false; - // increment settings versions - for (final String index : actualIndices) { - if (same(currentState.metadata().index(index).getSettings(), metadataBuilder.get(index).getSettings()) == false) { - changed = true; - final IndexMetadata.Builder builder = IndexMetadata.builder(metadataBuilder.get(index)); - builder.settingsVersion(1 + builder.settingsVersion()); - metadataBuilder.put(builder); - } + /* + * We do not update the in-sync allocation IDs as they will be removed upon the first index operation + * which makes these copies stale. + * + * TODO: should we update the in-sync allocation IDs once the data is deleted by the node? + */ + routingTableBuilder = RoutingTable.builder(currentState.routingTable()); + routingTableBuilder.updateNumberOfReplicas(updatedNumberOfReplicas, actualIndices); + metadataBuilder.updateNumberOfReplicas(updatedNumberOfReplicas, actualIndices); + logger.info("updating number_of_replicas to [{}] for indices {}", updatedNumberOfReplicas, actualIndices); } + } - final ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()); - boolean changedBlocks = false; - for (IndexMetadata.APIBlock block : IndexMetadata.APIBlock.values()) { - changedBlocks |= maybeUpdateClusterBlock(actualIndices, blocks, block.block, block.setting, openSettings); - } - changed |= changedBlocks; + updateIndexSettings( + openIndices, + metadataBuilder, + (index, indexSettings) -> indexScopedSettings.updateDynamicSettings( + openSettings, + indexSettings, + Settings.builder(), + index.getName() + ), + preserveExisting, + indexScopedSettings + ); + + updateIndexSettings( + closedIndices, + metadataBuilder, + (index, indexSettings) -> indexScopedSettings.updateSettings( + closedSettings, + indexSettings, + Settings.builder(), + index.getName() + ), + preserveExisting, + indexScopedSettings + ); - if (changed == false) { - return currentState; + if (IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.exists(normalizedSettings) + || IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.exists(normalizedSettings)) { + for (String index : actualIndices) { + final Settings settings = metadataBuilder.get(index).getSettings(); + MetadataCreateIndexService.validateTranslogRetentionSettings(settings); + MetadataCreateIndexService.validateStoreTypeSetting(settings); } + } + boolean changed = false; + // increment settings versions + for (final String index : actualIndices) { + if (same(currentState.metadata().index(index).getSettings(), metadataBuilder.get(index).getSettings()) == false) { + changed = true; + final IndexMetadata.Builder builder = IndexMetadata.builder(metadataBuilder.get(index)); + builder.settingsVersion(1 + builder.settingsVersion()); + metadataBuilder.put(builder); + } + } - ClusterState updatedState = ClusterState.builder(currentState) - .metadata(metadataBuilder) - .routingTable(routingTableBuilder == null ? currentState.routingTable() : routingTableBuilder.build()) - .blocks(changedBlocks ? blocks.build() : currentState.blocks()) - .build(); + final ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()); + boolean changedBlocks = false; + for (IndexMetadata.APIBlock block : IndexMetadata.APIBlock.values()) { + changedBlocks |= maybeUpdateClusterBlock(actualIndices, blocks, block.block, block.setting, openSettings); + } + changed |= changedBlocks; - try { - for (Index index : openIndices) { - final IndexMetadata currentMetadata = currentState.metadata().getIndexSafe(index); - final IndexMetadata updatedMetadata = updatedState.metadata().getIndexSafe(index); - indicesService.verifyIndexMetadata(currentMetadata, updatedMetadata); - } - for (Index index : closedIndices) { - final IndexMetadata currentMetadata = currentState.metadata().getIndexSafe(index); - final IndexMetadata updatedMetadata = updatedState.metadata().getIndexSafe(index); - // Verifies that the current index settings can be updated with the updated dynamic settings. - indicesService.verifyIndexMetadata(currentMetadata, updatedMetadata); - // Now check that we can create the index with the updated settings (dynamic and non-dynamic). - // This step is mandatory since we allow to update non-dynamic settings on closed indices. - indicesService.verifyIndexMetadata(updatedMetadata, updatedMetadata); - } - } catch (IOException ex) { - throw ExceptionsHelper.convertToElastic(ex); - } + if (changed == false) { + return currentState; + } + + ClusterState updatedState = ClusterState.builder(currentState) + .metadata(metadataBuilder) + .routingTable(routingTableBuilder == null ? currentState.routingTable() : routingTableBuilder.build()) + .blocks(changedBlocks ? blocks.build() : currentState.blocks()) + .build(); - return updatedState; + try { + for (Index index : openIndices) { + final IndexMetadata currentMetadata = currentState.metadata().getIndexSafe(index); + final IndexMetadata updatedMetadata = updatedState.metadata().getIndexSafe(index); + indicesService.verifyIndexMetadata(currentMetadata, updatedMetadata); + } + for (Index index : closedIndices) { + final IndexMetadata currentMetadata = currentState.metadata().getIndexSafe(index); + final IndexMetadata updatedMetadata = updatedState.metadata().getIndexSafe(index); + // Verifies that the current index settings can be updated with the updated dynamic settings. + indicesService.verifyIndexMetadata(currentMetadata, updatedMetadata); + // Now check that we can create the index with the updated settings (dynamic and non-dynamic). + // This step is mandatory since we allow to update non-dynamic settings on closed indices. + indicesService.verifyIndexMetadata(updatedMetadata, updatedMetadata); + } + } catch (IOException ex) { + throw ExceptionsHelper.convertToElastic(ex); } - }; + return updatedState; + } + } + + public void updateSettings(final UpdateSettingsClusterStateUpdateRequest request, final ActionListener listener) { clusterService.submitStateUpdateTask( "update-settings " + Arrays.toString(request.indices()), - clusterTask, - clusterTask, + new UpdateSettingsTask(request, listener), + ClusterStateTaskConfig.build(Priority.URGENT, request.masterNodeTimeout()), this.executor ); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index 69aae52a20ca4..c2fb3a34780a8 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -705,7 +705,7 @@ record DiskUsageWithRelocations(DiskUsage diskUsage, long relocatingShardSize) { if (getTotalBytes() == 0L) { return 100.0; } - return 100.0 * ((double) getFreeBytes() / getTotalBytes()); + return 100.0 * getFreeBytes() / getTotalBytes(); } double getUsedDiskAsPercentage() { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java index 71369d00195da..03caedeacb334 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java @@ -20,7 +20,6 @@ import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.xcontent.XContentFieldFilter; import org.elasticsearch.core.Nullable; -import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.xcontent.XContentType; @@ -104,10 +103,7 @@ public Builder() { @Override protected Parameter[] getParameters() { - if (IndexSettings.isTimeSeriesModeEnabled()) { - return new Parameter[] { enabled, mode, includes, excludes }; - } - return new Parameter[] { enabled, includes, excludes }; + return new Parameter[] { enabled, mode, includes, excludes }; } private boolean isDefault() { diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestCtxMap.java b/server/src/main/java/org/elasticsearch/ingest/IngestCtxMap.java index b648051669567..12f196c6c13e9 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestCtxMap.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestCtxMap.java @@ -73,4 +73,14 @@ public static ZonedDateTime getTimestamp(Map ingestMetadata) { return null; } + @Override + public Map getSource() { + return source; + } + + @Override + protected Map wrapSource(Map source) { + // Not wrapped in Ingest + return source; + } } diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 0a1a79dcb014c..4908806f0a020 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -685,8 +685,7 @@ protected Node( clusterModule.getAllocationService(), settingsModule.getIndexScopedSettings(), indicesService, - shardLimitValidator, - threadPool + shardLimitValidator ); Collection pluginComponents = pluginsService.flatMap( diff --git a/server/src/main/java/org/elasticsearch/script/CtxMap.java b/server/src/main/java/org/elasticsearch/script/CtxMap.java index d66514127043a..f7fa90cf7523f 100644 --- a/server/src/main/java/org/elasticsearch/script/CtxMap.java +++ b/server/src/main/java/org/elasticsearch/script/CtxMap.java @@ -8,6 +8,7 @@ package org.elasticsearch.script; +import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.set.Sets; import java.util.AbstractCollection; @@ -28,6 +29,7 @@ * validation via {@link Metadata}. */ public class CtxMap extends AbstractMap { + protected static final String SOURCE = "_source"; protected final Map source; protected final Metadata metadata; @@ -38,7 +40,7 @@ public class CtxMap extends AbstractMap { * @param metadata the metadata map */ protected CtxMap(Map source, Metadata metadata) { - this.source = source != null ? source : new HashMap<>(); + this.source = wrapSource(source != null ? source : new HashMap<>()); this.metadata = metadata; Set badKeys = Sets.intersection(this.metadata.keySet(), this.source.keySet()); if (badKeys.size() > 0) { @@ -50,11 +52,20 @@ protected CtxMap(Map source, Metadata metadata) { } } + protected Map wrapSource(Map source) { + Map wrapper = Maps.newHashMapWithExpectedSize(1); + wrapper.put(SOURCE, source); + return wrapper; + } + /** * get the source map, if externally modified then the guarantees of this class are not enforced */ + @SuppressWarnings("unchecked") public Map getSource() { - return source; + Object rawSource = source.get(SOURCE); + assert rawSource instanceof Map : " wrapped source of unexpected type"; + return (Map) rawSource; } /** diff --git a/server/src/main/java/org/elasticsearch/script/Metadata.java b/server/src/main/java/org/elasticsearch/script/Metadata.java index f84e6d5502b61..88a93c2d6ea92 100644 --- a/server/src/main/java/org/elasticsearch/script/Metadata.java +++ b/server/src/main/java/org/elasticsearch/script/Metadata.java @@ -8,6 +8,8 @@ package org.elasticsearch.script; +import java.time.Instant; +import java.time.ZoneOffset; import java.time.ZonedDateTime; import java.util.Collections; import java.util.HashMap; @@ -41,7 +43,9 @@ public class Metadata { protected static final String ROUTING = "_routing"; protected static final String VERSION_TYPE = "_version_type"; protected static final String VERSION = "_version"; - protected static final String TYPE = "_type"; // type is deprecated so it's supported in the map but not available as a getter + protected static final String TYPE = "_type"; // type is deprecated, so it's supported in the map but not available as a getter + protected static final String TIMESTAMP = "_now"; + protected static final String OP = "op"; protected static final String IF_SEQ_NO = "_if_seq_no"; protected static final String IF_PRIMARY_TERM = "_if_primary_term"; protected static final String DYNAMIC_TEMPLATES = "_dynamic_templates"; @@ -119,7 +123,15 @@ public void setVersion(long version) { } public ZonedDateTime getTimestamp() { - throw new UnsupportedOperationException("unimplemented"); + return ZonedDateTime.ofInstant(Instant.ofEpochMilli(getNumber(TIMESTAMP).longValue()), ZoneOffset.UTC); + } + + public String getOp() { + return getString(OP); + } + + public void setOp(String op) { + put(OP, op); } // These are not available to scripts diff --git a/server/src/main/java/org/elasticsearch/script/UpdateCtxMap.java b/server/src/main/java/org/elasticsearch/script/UpdateCtxMap.java new file mode 100644 index 0000000000000..738eca6a427f4 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/script/UpdateCtxMap.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.script; + +import java.util.Map; + +/** + * Source and metadata for update (as opposed to insert via upsert) in the Update context. + */ +public class UpdateCtxMap extends CtxMap { + + public UpdateCtxMap( + String index, + String id, + long version, + String routing, + String type, + String op, + long timestamp, + Map source + ) { + super(source, new UpdateMetadata(index, id, version, routing, type, op, timestamp)); + } + + protected UpdateCtxMap(Map source, Metadata metadata) { + super(source, metadata); + } +} diff --git a/server/src/main/java/org/elasticsearch/script/UpdateMetadata.java b/server/src/main/java/org/elasticsearch/script/UpdateMetadata.java new file mode 100644 index 0000000000000..678c8a3f7faca --- /dev/null +++ b/server/src/main/java/org/elasticsearch/script/UpdateMetadata.java @@ -0,0 +1,107 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.script; + +import org.elasticsearch.common.util.Maps; + +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * The update context has read-only metadata: + * _index, _id, _version, _routing, _type (always '_doc'), _now (timestamp in millis) + * and read-write op that may be one of 'noop' or 'none' (legacy), 'index', 'delete' or null + */ +public class UpdateMetadata extends Metadata { + // AbstractAsyncBulkByScrollAction.OpType uses 'noop' rather than 'none', so unify on 'noop' but allow 'none' in + // the ctx map + + protected static final String LEGACY_NOOP_STRING = "none"; + + protected static final FieldProperty SET_ONCE_STRING = new FieldProperty<>(String.class, true, false, null); + + protected static final FieldProperty SET_ONCE_LONG = new FieldProperty<>( + Number.class, + false, + false, + FieldProperty.LONGABLE_NUMBER + ); + + static final Map> PROPERTIES = Map.of( + INDEX, + SET_ONCE_STRING, + ID, + SET_ONCE_STRING, + VERSION, + SET_ONCE_LONG, + ROUTING, + SET_ONCE_STRING, + TYPE, + SET_ONCE_STRING, + OP, + new FieldProperty<>(String.class, true, true, null), + TIMESTAMP, + SET_ONCE_LONG + ); + + protected final Set validOps; + + public UpdateMetadata(String index, String id, long version, String routing, String type, String op, long timestamp) { + this(metadataMap(index, id, version, routing, type, op, timestamp), Set.of("noop", "index", "delete"), PROPERTIES); + } + + protected UpdateMetadata(Map metadata, Set validOps, Map> properties) { + super(metadata, properties); + this.validOps = validOps; + } + + protected static Map metadataMap( + String index, + String id, + long version, + String routing, + String type, + String op, + long timestamp + ) { + Map metadata = Maps.newHashMapWithExpectedSize(PROPERTIES.size()); + metadata.put(INDEX, index); + metadata.put(ID, id); + metadata.put(VERSION, version); + metadata.put(ROUTING, routing); + metadata.put(TYPE, type); + metadata.put(OP, op); + metadata.put(TIMESTAMP, timestamp); + return metadata; + } + + @Override + public String getOp() { + String op = super.getOp(); + if (LEGACY_NOOP_STRING.equals(op) || op == null || validOps.contains(op) == false) { + // UpdateHelper.UpdateOpType.lenientFromString treats all invalid ops as "noop" + return "noop"; + } + return op; + } + + @Override + public void setOp(String op) { + // Due to existing leniency, we cannot rely on the map validator, so we must do validation here. + if (LEGACY_NOOP_STRING.equals(op)) { + throw new IllegalArgumentException("'" + LEGACY_NOOP_STRING + "' is not allowed, use 'noop' instead"); + } else if (op == null || validOps.contains(op) == false) { + throw new IllegalArgumentException( + "op must be one of [" + validOps.stream().sorted().collect(Collectors.joining(", ")) + "], not [" + op + "]" + ); + } + super.setOp(op); + } +} diff --git a/server/src/main/java/org/elasticsearch/script/UpdateScript.java b/server/src/main/java/org/elasticsearch/script/UpdateScript.java index 578e2fa7f29b1..19be8f0742fdb 100644 --- a/server/src/main/java/org/elasticsearch/script/UpdateScript.java +++ b/server/src/main/java/org/elasticsearch/script/UpdateScript.java @@ -12,7 +12,7 @@ import java.util.Map; /** - * An update script. + * A script used in the update API */ public abstract class UpdateScript { @@ -24,12 +24,11 @@ public abstract class UpdateScript { /** The generic runtime parameters for the script. */ private final Map params; - /** The update context for the script. */ - private final Map ctx; + private final UpdateCtxMap ctxMap; - public UpdateScript(Map params, Map ctx) { + public UpdateScript(Map params, UpdateCtxMap ctxMap) { this.params = params; - this.ctx = ctx; + this.ctxMap = ctxMap; } /** Return the parameters for this script. */ @@ -39,12 +38,17 @@ public Map getParams() { /** Return the update context for this script. */ public Map getCtx() { - return ctx; + return ctxMap; + } + + /** Return the update metadata for this script */ + public Metadata metadata() { + return ctxMap.getMetadata(); } public abstract void execute(); public interface Factory { - UpdateScript newInstance(Map params, Map ctx); + UpdateScript newInstance(Map params, UpdateCtxMap ctxMap); } } diff --git a/server/src/main/java/org/elasticsearch/script/UpsertCtxMap.java b/server/src/main/java/org/elasticsearch/script/UpsertCtxMap.java new file mode 100644 index 0000000000000..5a871502bf065 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/script/UpsertCtxMap.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.script; + +import java.util.Map; + +/** + * Metadata for insert via upsert in the Update context + */ +public class UpsertCtxMap extends UpdateCtxMap { + public UpsertCtxMap(String index, String id, String op, long timestamp, Map source) { + super(source, new UpsertMetadata(index, id, op, timestamp)); + } +} diff --git a/server/src/main/java/org/elasticsearch/script/UpsertMetadata.java b/server/src/main/java/org/elasticsearch/script/UpsertMetadata.java new file mode 100644 index 0000000000000..b82b89466b5be --- /dev/null +++ b/server/src/main/java/org/elasticsearch/script/UpsertMetadata.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.script; + +import org.elasticsearch.common.util.Maps; + +import java.util.Map; +import java.util.Set; + +class UpsertMetadata extends UpdateMetadata { + static final Map> PROPERTIES = Map.of( + INDEX, + SET_ONCE_STRING, + ID, + SET_ONCE_STRING, + OP, + new FieldProperty<>(String.class, true, true, null), + TIMESTAMP, + SET_ONCE_LONG + ); + + UpsertMetadata(String index, String id, String op, long timestamp) { + super(metadataMap(index, id, op, timestamp), Set.of("noop", "create"), PROPERTIES); + } + + protected static Map metadataMap(String index, String id, String op, long timestamp) { + Map metadata = Maps.newHashMapWithExpectedSize(PROPERTIES.size()); + metadata.put(INDEX, index); + metadata.put(ID, id); + metadata.put(OP, op); + metadata.put(TIMESTAMP, timestamp); + return metadata; + } + + @Override + public String getRouting() { + throw new UnsupportedOperationException("routing is unavailable for insert"); + } + + @Override + public long getVersion() { + throw new UnsupportedOperationException("version is unavailable for insert"); + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DoubleValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DoubleValuesSource.java index e1b15f0db93ea..5c86bea9fc316 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DoubleValuesSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DoubleValuesSource.java @@ -161,10 +161,14 @@ LeafBucketCollector getLeafCollector(LeafReaderContext context, LeafBucketCollec public void collect(int doc, long bucket) throws IOException { if (dvs.advanceExact(doc)) { int num = dvs.docValueCount(); + double previous = Double.MAX_VALUE; for (int i = 0; i < num; i++) { currentValue = dvs.nextValue(); missingCurrentValue = false; - next.collect(doc, bucket); + if (i == 0 || previous != currentValue) { + next.collect(doc, bucket); + previous = currentValue; + } } } else if (missingBucket) { missingCurrentValue = true; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java index f33ba1cab62f1..7b2bad458e405 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java @@ -177,10 +177,14 @@ LeafBucketCollector getLeafCollector(LeafReaderContext context, LeafBucketCollec public void collect(int doc, long bucket) throws IOException { if (dvs.advanceExact(doc)) { int num = dvs.docValueCount(); + long previous = Long.MAX_VALUE; for (int i = 0; i < num; i++) { currentValue = dvs.nextValue(); missingCurrentValue = false; - next.collect(doc, bucket); + if (i == 0 || previous != currentValue) { + next.collect(doc, bucket); + previous = currentValue; + } } } else if (missingBucket) { missingCurrentValue = true; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificanceLookup.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificanceLookup.java index 63d5ba5949cc5..a71d26061752e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificanceLookup.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificanceLookup.java @@ -15,6 +15,7 @@ import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; @@ -74,9 +75,22 @@ interface BackgroundFrequencyForLong extends Releasable { // If there is no provided background filter, but we are within a sampling context, our background docs need to take the sampling // context into account. // If there is a filter, that filter needs to take the sampling into account (if we are within a sampling context) - this.backgroundFilter = backgroundFilter == null + Query backgroundQuery = backgroundFilter == null ? samplingContext.buildSamplingQueryIfNecessary(context).orElse(null) : samplingContext.buildQueryWithSampler(backgroundFilter, context); + // Refilter to account for alias filters, if there are any. + if (backgroundQuery == null) { + Query matchAllDocsQuery = new MatchAllDocsQuery(); + Query contextFiltered = context.filterQuery(matchAllDocsQuery); + if (contextFiltered != matchAllDocsQuery) { + this.backgroundFilter = contextFiltered; + } else { + this.backgroundFilter = null; + } + } else { + Query contextFiltered = context.filterQuery(backgroundQuery); + this.backgroundFilter = contextFiltered; + } /* * We need to use a superset size that includes deleted docs or we * could end up blowing up with bad statistics that cause us to blow diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregationBuilder.java index 589394ff62846..715dd670d08fd 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregationBuilder.java @@ -101,7 +101,7 @@ public static void registerAggregators(ValuesSourceRegistry.Builder builder) { private IncludeExclude includeExclude = null; private String executionHint = null; - private QueryBuilder filterBuilder = null; + private QueryBuilder backgroundFilter = null; private TermsAggregator.BucketCountThresholds bucketCountThresholds = new BucketCountThresholds(DEFAULT_BUCKET_COUNT_THRESHOLDS); private SignificanceHeuristic significanceHeuristic = DEFAULT_SIGNIFICANCE_HEURISTIC; @@ -116,7 +116,7 @@ public SignificantTermsAggregationBuilder(StreamInput in) throws IOException { super(in); bucketCountThresholds = new BucketCountThresholds(in); executionHint = in.readOptionalString(); - filterBuilder = in.readOptionalNamedWriteable(QueryBuilder.class); + backgroundFilter = in.readOptionalNamedWriteable(QueryBuilder.class); includeExclude = in.readOptionalWriteable(IncludeExclude::new); significanceHeuristic = in.readNamedWriteable(SignificanceHeuristic.class); } @@ -129,7 +129,7 @@ protected SignificantTermsAggregationBuilder( super(clone, factoriesBuilder, metadata); this.bucketCountThresholds = new BucketCountThresholds(clone.bucketCountThresholds); this.executionHint = clone.executionHint; - this.filterBuilder = clone.filterBuilder; + this.backgroundFilter = clone.backgroundFilter; this.includeExclude = clone.includeExclude; this.significanceHeuristic = clone.significanceHeuristic; } @@ -151,9 +151,9 @@ protected SignificantTermsAggregationBuilder shallowCopy(AggregatorFactories.Bui @Override protected AggregationBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws IOException { - if (filterBuilder != null) { - QueryBuilder rewrittenFilter = filterBuilder.rewrite(queryRewriteContext); - if (rewrittenFilter != filterBuilder) { + if (backgroundFilter != null) { + QueryBuilder rewrittenFilter = backgroundFilter.rewrite(queryRewriteContext); + if (rewrittenFilter != backgroundFilter) { SignificantTermsAggregationBuilder rewritten = shallowCopy(factoriesBuilder, metadata); rewritten.backgroundFilter(rewrittenFilter); return rewritten; @@ -166,7 +166,7 @@ protected AggregationBuilder doRewrite(QueryRewriteContext queryRewriteContext) protected void innerWriteTo(StreamOutput out) throws IOException { bucketCountThresholds.writeTo(out); out.writeOptionalString(executionHint); - out.writeOptionalNamedWriteable(filterBuilder); + out.writeOptionalNamedWriteable(backgroundFilter); out.writeOptionalWriteable(includeExclude); out.writeNamedWriteable(significanceHeuristic); } @@ -265,12 +265,12 @@ public SignificantTermsAggregationBuilder backgroundFilter(QueryBuilder backgrou if (backgroundFilter == null) { throw new IllegalArgumentException("[backgroundFilter] must not be null: [" + name + "]"); } - this.filterBuilder = backgroundFilter; + this.backgroundFilter = backgroundFilter; return this; } public QueryBuilder backgroundFilter() { - return filterBuilder; + return backgroundFilter; } /** @@ -320,7 +320,7 @@ protected ValuesSourceAggregatorFactory innerBuild( config, includeExclude, executionHint, - filterBuilder, + backgroundFilter, bucketCountThresholds, executionHeuristic, context, @@ -337,8 +337,8 @@ protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) if (executionHint != null) { builder.field(TermsAggregationBuilder.EXECUTION_HINT_FIELD_NAME.getPreferredName(), executionHint); } - if (filterBuilder != null) { - builder.field(BACKGROUND_FILTER.getPreferredName(), filterBuilder); + if (backgroundFilter != null) { + builder.field(BACKGROUND_FILTER.getPreferredName(), backgroundFilter); } if (includeExclude != null) { includeExclude.toXContent(builder, params); @@ -349,7 +349,14 @@ protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) @Override public int hashCode() { - return Objects.hash(super.hashCode(), bucketCountThresholds, executionHint, filterBuilder, includeExclude, significanceHeuristic); + return Objects.hash( + super.hashCode(), + bucketCountThresholds, + executionHint, + backgroundFilter, + includeExclude, + significanceHeuristic + ); } @Override @@ -360,7 +367,7 @@ public boolean equals(Object obj) { SignificantTermsAggregationBuilder other = (SignificantTermsAggregationBuilder) obj; return Objects.equals(bucketCountThresholds, other.bucketCountThresholds) && Objects.equals(executionHint, other.executionHint) - && Objects.equals(filterBuilder, other.filterBuilder) + && Objects.equals(backgroundFilter, other.backgroundFilter) && Objects.equals(includeExclude, other.includeExclude) && Objects.equals(significanceHeuristic, other.significanceHeuristic); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregationBuilder.java index db3ea3a0f62e7..57a036a3a7600 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregationBuilder.java @@ -109,6 +109,11 @@ protected void innerWriteTo(StreamOutput out) throws IOException { } } + @Override + public boolean supportsSampling() { + return true; + } + @Override protected boolean serializeTargetValueType(Version version) { return true; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalCardinality.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalCardinality.java index 50cd873dd8947..82e332c88c767 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalCardinality.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalCardinality.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.search.aggregations.AggregationReduceContext; import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.support.SamplingContext; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; @@ -116,6 +117,15 @@ public boolean equals(Object obj) { return counts.equals(0, other.counts, 0); } + /** + * The counts created in cardinality do not lend themselves to be automatically scaled. + * Consequently, when finalizing the sampling, nothing is changed and the same object is returned + */ + @Override + public InternalAggregation finalizeSampling(SamplingContext samplingContext) { + return this; + } + AbstractHyperLogLogPlusPlus getState() { return counts; } diff --git a/server/src/main/java/org/elasticsearch/transport/TransportService.java b/server/src/main/java/org/elasticsearch/transport/TransportService.java index 1f614eaf42ed6..7d1aa070df616 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportService.java @@ -313,22 +313,25 @@ protected void doStop() { throw new UncheckedIOException(e); } finally { // The underlying transport has stopped which closed all the connections to remote nodes and hence completed all their handlers, - // but there may still be pending handlers for node-local requests since this connection is not closed. We complete them here: + // but there may still be pending handlers for node-local requests since this connection is not closed, and we may also + // (briefly) track handlers for requests which are sent concurrently with stopping even though the underlying connection is + // now closed. We complete all these outstanding handlers here: for (final Transport.ResponseContext holderToNotify : responseHandlers.prune(h -> true)) { try { final TransportResponseHandler handler = holderToNotify.handler(); final var targetNode = holderToNotify.connection().getNode(); - // Assertion only holds for TcpTransport only because other transports (used in tests) may not implement the proper - // close-connection behaviour. TODO fix this. - assert transport instanceof TcpTransport == false || targetNode.equals(localNode) + assert transport instanceof TcpTransport == false + /* other transports (used in tests) may not implement the proper close-connection behaviour. TODO fix this. */ + || targetNode.equals(localNode) + /* local node connection cannot be closed so may still have pending handlers */ + || holderToNotify.connection().isClosed() + /* connections to remote nodes must be closed by this point but could still have pending handlers */ : "expected only responses for local " + localNode + " but found handler for [" + holderToNotify.action() - + "] on [" - + (holderToNotify.connection().isClosed() ? "closed" : "open") - + "] connection to " + + "] on open connection to " + targetNode; final var exception = new SendRequestTransportException( diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java index abe7b4c889ca3..87ffd5ea5be92 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java @@ -34,7 +34,9 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.test.junit.annotations.TestLogging; @@ -66,6 +68,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.not; public class TransportTasksActionTests extends TaskManagerTestCase { @@ -100,7 +103,7 @@ public Task createTask(long id, String type, String action, TaskId parentTaskId, } public static class NodesRequest extends BaseNodesRequest { - private String requestName; + private final String requestName; NodesRequest(StreamInput in) throws IOException { super(in); @@ -178,11 +181,16 @@ static class TestTasksRequest extends BaseTasksRequest { } TestTasksRequest() {} + + @Override + public CancellableTask createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new CancellableTask(id, type, action, "testTasksRequest", parentTaskId, headers); + } } static class TestTasksResponse extends BaseTasksResponse { - private List tasks; + private final List tasks; TestTasksResponse( List tasks, @@ -303,7 +311,7 @@ public void testRunningTasksCount() throws Exception { CountDownLatch checkLatch = new CountDownLatch(1); CountDownLatch responseLatch = new CountDownLatch(1); final AtomicReference responseReference = new AtomicReference<>(); - Task mainTask = startBlockingTestNodesAction(checkLatch, new ActionListener() { + Task mainTask = startBlockingTestNodesAction(checkLatch, new ActionListener<>() { @Override public void onResponse(NodesResponse listTasksResponse) { responseReference.set(listTasksResponse); @@ -620,7 +628,81 @@ private String getAllTaskDescriptions() { return taskDescriptions.toString(); } - public void testTaskLevelActionFailures() throws ExecutionException, InterruptedException, IOException { + public void testActionParentCancellationPropagates() throws ExecutionException, InterruptedException { + setupTestNodes(Settings.EMPTY); + connectNodes(testNodes); + CountDownLatch checkLatch = new CountDownLatch(1); + CountDownLatch taskLatch = new CountDownLatch(1); + ActionFuture future = startBlockingTestNodesAction(checkLatch); + int numNodes = 2; + + CountDownLatch taskExecutesLatch = new CountDownLatch(numNodes); + TestTasksAction[] tasksActions = new TestTasksAction[numNodes]; + for (int j = 0; j < numNodes; j++) { + final int nodeId = j; + tasksActions[j] = new TestTasksAction( + "internal:testTasksAction", + testNodes[nodeId].clusterService, + testNodes[nodeId].transportService + ) { + @Override + protected void taskOperation( + Task actionTask, + TestTasksRequest request, + Task task, + ActionListener listener + ) { + try { + taskExecutesLatch.countDown(); + logger.info("Task handled on node {} {}", nodeId, actionTask); + taskLatch.await(); + assertThat(actionTask, instanceOf(CancellableTask.class)); + logger.info("Task is now proceeding with cancellation check {}", nodeId); + if (actionTask instanceof CancellableTask cancellableTask) { + assertBusy(() -> assertTrue(cancellableTask.isCancelled())); + } + listener.onResponse(new TestTaskResponse("CANCELLED")); + } catch (Exception e) { + listener.onFailure(e); + fail(e.getMessage()); + } + } + }; + } + + TestTasksRequest testTasksRequest = new TestTasksRequest(); + testTasksRequest.setActions("internal:testAction[n]"); // pick all test actions + testTasksRequest.setNodes(testNodes[0].getNodeId(), testNodes[1].getNodeId()); // only first two nodes + PlainActionFuture taskFuture = newFuture(); + CancellableTask task = (CancellableTask) testNodes[0].transportService.getTaskManager() + .registerAndExecute( + "direct", + tasksActions[0], + testTasksRequest, + testNodes[0].transportService.getLocalNodeConnection(), + taskFuture + ); + logger.info("Executing test task request and awaiting their execution"); + taskExecutesLatch.await(); + logger.info("All test tasks are now executing"); + + PlainActionFuture cancellationFuture = newFuture(); + logger.info("Cancelling tasks"); + + testNodes[0].transportService.getTaskManager().cancelTaskAndDescendants(task, "test case", false, cancellationFuture); + logger.info("Awaiting task cancellation"); + cancellationFuture.actionGet(); + logger.info("Parent task is now cancelled counting down task latch"); + taskLatch.countDown(); + expectThrows(TaskCancelledException.class, taskFuture::actionGet); + + // Release all node tasks and wait for response + checkLatch.countDown(); + NodesResponse responses = future.get(); + assertEquals(0, responses.failureCount()); + } + + public void testTaskLevelActionFailures() throws ExecutionException, InterruptedException { setupTestNodes(Settings.EMPTY); connectNodes(testNodes); CountDownLatch checkLatch = new CountDownLatch(1); @@ -688,7 +770,7 @@ protected void taskOperation( * it executes a tasks action that targets these blocked node actions. The test verifies that task actions are only * getting executed on nodes that are not listed in the node filter. */ - public void testTaskNodeFiltering() throws ExecutionException, InterruptedException, IOException { + public void testTaskNodeFiltering() throws ExecutionException, InterruptedException { setupTestNodes(Settings.EMPTY); connectNodes(testNodes); CountDownLatch checkLatch = new CountDownLatch(1); @@ -720,7 +802,7 @@ protected String[] filterNodeIds(DiscoveryNodes nodes, String[] nodesIds) { filteredNodes.add(node); } } - return filteredNodes.toArray(new String[filteredNodes.size()]); + return filteredNodes.toArray(new String[0]); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java b/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java index 5ee90a2f38684..d05b9c48e5766 100644 --- a/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java @@ -91,19 +91,19 @@ public void setUp() throws Exception { ctx.put("_timestamp", ctx.get("_now")); return null; }); - scripts.put("ctx.op = delete", vars -> { + scripts.put("ctx.op = 'delete'", vars -> { @SuppressWarnings("unchecked") final Map ctx = (Map) vars.get("ctx"); ctx.put("op", "delete"); return null; }); - scripts.put("ctx.op = bad", vars -> { + scripts.put("ctx.op = 'bad'", vars -> { @SuppressWarnings("unchecked") final Map ctx = (Map) vars.get("ctx"); ctx.put("op", "bad"); return null; }); - scripts.put("ctx.op = none", vars -> { + scripts.put("ctx.op = 'none'", vars -> { @SuppressWarnings("unchecked") final Map ctx = (Map) vars.get("ctx"); ctx.put("op", "none"); @@ -381,7 +381,7 @@ public void testIndexTimeout() { public void testDeleteTimeout() { final GetResult getResult = new GetResult("test", "1", 0, 1, 0, true, new BytesArray("{\"f\":\"v\"}"), null, null); - final UpdateRequest updateRequest = new UpdateRequest("test", "1").script(mockInlineScript("ctx.op = delete")) + final UpdateRequest updateRequest = new UpdateRequest("test", "1").script(mockInlineScript("ctx.op = 'delete'")) .timeout(randomTimeValue()); runTimeoutTest(getResult, updateRequest); } @@ -598,7 +598,7 @@ public void testUpdateScript() throws Exception { assertThat(result.updatedSourceAsMap().get("body").toString(), equalTo("foo")); // Now where the script changes the op to "delete" - request = new UpdateRequest("test", "1").script(mockInlineScript("ctx.op = delete")); + request = new UpdateRequest("test", "1").script(mockInlineScript("ctx.op = 'delete'")); result = updateHelper.prepareUpdateScriptRequest(shardId, request, getResult, ESTestCase::randomNonNegativeLong); @@ -608,9 +608,9 @@ public void testUpdateScript() throws Exception { // We treat everything else as a No-op boolean goodNoop = randomBoolean(); if (goodNoop) { - request = new UpdateRequest("test", "1").script(mockInlineScript("ctx.op = none")); + request = new UpdateRequest("test", "1").script(mockInlineScript("ctx.op = 'none'")); } else { - request = new UpdateRequest("test", "1").script(mockInlineScript("ctx.op = bad")); + request = new UpdateRequest("test", "1").script(mockInlineScript("ctx.op = 'bad'")); } result = updateHelper.prepareUpdateScriptRequest(shardId, request, getResult, ESTestCase::randomNonNegativeLong); diff --git a/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java b/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java index 254462ff32edd..9d4d03592c7ef 100644 --- a/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java @@ -43,25 +43,32 @@ public void testDiskUsageCalc() { assertThat(du.getUsedBytes(), equalTo(60L)); assertThat(du.getTotalBytes(), equalTo(100L)); - // Test that DiskUsage handles invalid numbers, as reported by some - // filesystems (ZFS & NTFS) - DiskUsage du2 = new DiskUsage("node1", "n1", "random", 100, 101); - assertThat(du2.getFreeDiskAsPercentage(), equalTo(101.0)); - assertThat(du2.getFreeBytes(), equalTo(101L)); - assertThat(du2.getUsedBytes(), equalTo(-1L)); + DiskUsage du2 = new DiskUsage("node1", "n1", "random", 100, 55); + assertThat(du2.getFreeDiskAsPercentage(), equalTo(55.0)); + assertThat(du2.getUsedDiskAsPercentage(), equalTo(45.0)); + assertThat(du2.getFreeBytes(), equalTo(55L)); + assertThat(du2.getUsedBytes(), equalTo(45L)); assertThat(du2.getTotalBytes(), equalTo(100L)); - DiskUsage du3 = new DiskUsage("node1", "n1", "random", -1, -1); - assertThat(du3.getFreeDiskAsPercentage(), equalTo(100.0)); - assertThat(du3.getFreeBytes(), equalTo(-1L)); - assertThat(du3.getUsedBytes(), equalTo(0L)); - assertThat(du3.getTotalBytes(), equalTo(-1L)); + // Test that DiskUsage handles invalid numbers, as reported by some + // filesystems (ZFS & NTFS) + DiskUsage du3 = new DiskUsage("node1", "n1", "random", 100, 101); + assertThat(du3.getFreeDiskAsPercentage(), equalTo(101.0)); + assertThat(du3.getFreeBytes(), equalTo(101L)); + assertThat(du3.getUsedBytes(), equalTo(-1L)); + assertThat(du3.getTotalBytes(), equalTo(100L)); - DiskUsage du4 = new DiskUsage("node1", "n1", "random", 0, 0); + DiskUsage du4 = new DiskUsage("node1", "n1", "random", -1, -1); assertThat(du4.getFreeDiskAsPercentage(), equalTo(100.0)); - assertThat(du4.getFreeBytes(), equalTo(0L)); + assertThat(du4.getFreeBytes(), equalTo(-1L)); assertThat(du4.getUsedBytes(), equalTo(0L)); - assertThat(du4.getTotalBytes(), equalTo(0L)); + assertThat(du4.getTotalBytes(), equalTo(-1L)); + + DiskUsage du5 = new DiskUsage("node1", "n1", "random", 0, 0); + assertThat(du5.getFreeDiskAsPercentage(), equalTo(100.0)); + assertThat(du5.getFreeBytes(), equalTo(0L)); + assertThat(du5.getUsedBytes(), equalTo(0L)); + assertThat(du5.getTotalBytes(), equalTo(0L)); } public void testRandomDiskUsage() { @@ -80,8 +87,8 @@ public void testRandomDiskUsage() { assertThat(du.getFreeBytes(), equalTo(free)); assertThat(du.getTotalBytes(), equalTo(total)); assertThat(du.getUsedBytes(), equalTo(total - free)); - assertThat(du.getFreeDiskAsPercentage(), equalTo(100.0 * ((double) free / total))); - assertThat(du.getUsedDiskAsPercentage(), equalTo(100.0 - (100.0 * ((double) free / total)))); + assertThat(du.getFreeDiskAsPercentage(), equalTo(100.0 * free / total)); + assertThat(du.getUsedDiskAsPercentage(), equalTo(100.0 - (100.0 * free / total))); } } } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java index 275076dc5d870..1dedcd06b72d6 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java @@ -2378,6 +2378,23 @@ public void testImproveConfigurationPerformsVotingConfigExclusionStateCheck() { } } + public void testPeerFinderListener() throws Exception { + try (Cluster cluster = new Cluster(3, true, Settings.EMPTY)) { + cluster.runRandomly(); + cluster.stabilise(); + ClusterNode leader = cluster.getAnyLeader(); + ClusterNode nodeWithListener = cluster.getAnyNodeExcept(leader); + AtomicBoolean listenerCalled = new AtomicBoolean(false); + nodeWithListener.coordinator.addPeerFinderListener(() -> listenerCalled.set(true)); + assertFalse(listenerCalled.get()); + leader.disconnect(); + cluster.runFor(DEFAULT_STABILISATION_TIME, "Letting disconnect take effect"); + cluster.stabilise(); + assertTrue(cluster.clusterNodes.contains(nodeWithListener)); + assertBusy(() -> assertTrue(listenerCalled.get())); + } + } + private ClusterState buildNewClusterStateWithVotingConfigExclusion( ClusterState currentState, Set newVotingConfigExclusion diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java index 4990d86330f0c..08f86b7c08664 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java @@ -282,8 +282,7 @@ public IndexMetadata verifyIndexMetadata(IndexMetadata indexMetadata, Version mi allocationService, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, indicesService, - shardLimitValidator, - threadPool + shardLimitValidator ); MetadataCreateIndexService createIndexService = new MetadataCreateIndexService( SETTINGS, diff --git a/server/src/test/java/org/elasticsearch/plugins/PluginDescriptorTests.java b/server/src/test/java/org/elasticsearch/plugins/PluginDescriptorTests.java index 02bbbeef1a513..845246dd3d606 100644 --- a/server/src/test/java/org/elasticsearch/plugins/PluginDescriptorTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/PluginDescriptorTests.java @@ -14,11 +14,14 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.test.ESTestCase; +import java.io.IOException; import java.nio.ByteBuffer; import java.nio.file.Path; import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; @@ -29,26 +32,44 @@ public class PluginDescriptorTests extends ESTestCase { - public void testReadFromProperties() throws Exception { + private static final Map DESCRIPTOR_TEMPLATE = Map.of( + "name", + "my_plugin", + "description", + "fake desc", + "version", + "1.0", + "elasticsearch.version", + Version.CURRENT.toString(), + "java.version", + System.getProperty("java.specification.version"), + "classname", + "FakePlugin", + "modulename", + "org.mymodule" + ); + + PluginDescriptor mockDescriptor(String... additionalProps) throws IOException { + assert additionalProps.length % 2 == 0; + Map propsMap = new HashMap<>(DESCRIPTOR_TEMPLATE); + for (int i = 0; i < additionalProps.length; i += 2) { + propsMap.put(additionalProps[i], additionalProps[i + 1]); + } + String[] props = new String[propsMap.size() * 2]; + int i = 0; + for (var e : propsMap.entrySet()) { + props[i] = e.getKey(); + props[i + 1] = e.getValue(); + i += 2; + } + Path pluginDir = createTempDir().resolve("fake-plugin"); - PluginTestUtil.writePluginProperties( - pluginDir, - "description", - "fake desc", - "name", - "my_plugin", - "version", - "1.0", - "elasticsearch.version", - Version.CURRENT.toString(), - "java.version", - System.getProperty("java.specification.version"), - "classname", - "FakePlugin", - "modulename", - "org.mymodule" - ); - PluginDescriptor info = PluginDescriptor.readFromProperties(pluginDir); + PluginTestUtil.writePluginProperties(pluginDir, props); + return PluginDescriptor.readFromProperties(pluginDir); + } + + public void testReadFromProperties() throws Exception { + PluginDescriptor info = mockDescriptor(); assertEquals("my_plugin", info.getName()); assertEquals("fake desc", info.getDescription()); assertEquals("1.0", info.getVersion()); @@ -58,241 +79,77 @@ public void testReadFromProperties() throws Exception { } public void testReadFromPropertiesNameMissing() throws Exception { - Path pluginDir = createTempDir().resolve("fake-plugin"); - PluginTestUtil.writePluginProperties(pluginDir); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginDescriptor.readFromProperties(pluginDir)); + var e = expectThrows(IllegalArgumentException.class, () -> mockDescriptor("name", null)); assertThat(e.getMessage(), containsString("property [name] is missing in")); - PluginTestUtil.writePluginProperties(pluginDir, "name", ""); - e = expectThrows(IllegalArgumentException.class, () -> PluginDescriptor.readFromProperties(pluginDir)); + e = expectThrows(IllegalArgumentException.class, () -> mockDescriptor("name", "")); assertThat(e.getMessage(), containsString("property [name] is missing in")); } public void testReadFromPropertiesDescriptionMissing() throws Exception { - Path pluginDir = createTempDir().resolve("fake-plugin"); - PluginTestUtil.writePluginProperties(pluginDir, "name", "fake-plugin"); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginDescriptor.readFromProperties(pluginDir)); + var e = expectThrows(IllegalArgumentException.class, () -> mockDescriptor("description", null)); assertThat(e.getMessage(), containsString("[description] is missing")); } public void testReadFromPropertiesVersionMissing() throws Exception { - Path pluginDir = createTempDir().resolve("fake-plugin"); - PluginTestUtil.writePluginProperties(pluginDir, "description", "fake desc", "name", "fake-plugin"); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginDescriptor.readFromProperties(pluginDir)); + var e = expectThrows(IllegalArgumentException.class, () -> mockDescriptor("version", null)); assertThat(e.getMessage(), containsString("[version] is missing")); } public void testReadFromPropertiesElasticsearchVersionMissing() throws Exception { - Path pluginDir = createTempDir().resolve("fake-plugin"); - PluginTestUtil.writePluginProperties(pluginDir, "description", "fake desc", "name", "my_plugin", "version", "1.0"); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginDescriptor.readFromProperties(pluginDir)); + var e = expectThrows(IllegalArgumentException.class, () -> mockDescriptor("elasticsearch.version", null)); assertThat(e.getMessage(), containsString("[elasticsearch.version] is missing")); } public void testReadFromPropertiesElasticsearchVersionEmpty() throws Exception { - Path pluginDir = createTempDir().resolve("fake-plugin"); - PluginTestUtil.writePluginProperties( - pluginDir, - "description", - "fake desc", - "name", - "my_plugin", - "version", - "1.0", - "elasticsearch.version", - " " - ); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginDescriptor.readFromProperties(pluginDir)); + var e = expectThrows(IllegalArgumentException.class, () -> mockDescriptor("elasticsearch.version", " ")); assertThat(e.getMessage(), containsString("[elasticsearch.version] is missing")); } public void testReadFromPropertiesJavaVersionMissing() throws Exception { - Path pluginDir = createTempDir().resolve("fake-plugin"); - PluginTestUtil.writePluginProperties( - pluginDir, - "description", - "fake desc", - "name", - "my_plugin", - "elasticsearch.version", - Version.CURRENT.toString(), - "version", - "1.0" - ); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginDescriptor.readFromProperties(pluginDir)); + var e = expectThrows(IllegalArgumentException.class, () -> mockDescriptor("java.version", null)); assertThat(e.getMessage(), containsString("[java.version] is missing")); } public void testReadFromPropertiesBadJavaVersionFormat() throws Exception { - String pluginName = "fake-plugin"; - Path pluginDir = createTempDir().resolve(pluginName); - PluginTestUtil.writePluginProperties( - pluginDir, - "description", - "fake desc", - "name", - pluginName, - "elasticsearch.version", - Version.CURRENT.toString(), - "java.version", - "1.7.0_80", - "classname", - "FakePlugin", - "version", - "1.0" - ); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginDescriptor.readFromProperties(pluginDir)); + var e = expectThrows(IllegalArgumentException.class, () -> mockDescriptor("java.version", "1.7.0_80")); assertThat(e.getMessage(), equalTo("Invalid version string: '1.7.0_80'")); } public void testReadFromPropertiesBogusElasticsearchVersion() throws Exception { - Path pluginDir = createTempDir().resolve("fake-plugin"); - PluginTestUtil.writePluginProperties( - pluginDir, - "description", - "fake desc", - "version", - "1.0", - "name", - "my_plugin", - "elasticsearch.version", - "bogus" - ); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginDescriptor.readFromProperties(pluginDir)); + var e = expectThrows(IllegalArgumentException.class, () -> mockDescriptor("elasticsearch.version", "bogus")); assertThat(e.getMessage(), containsString("version needs to contain major, minor, and revision")); } public void testReadFromPropertiesJvmMissingClassname() throws Exception { - Path pluginDir = createTempDir().resolve("fake-plugin"); - PluginTestUtil.writePluginProperties( - pluginDir, - "description", - "fake desc", - "name", - "my_plugin", - "version", - "1.0", - "elasticsearch.version", - Version.CURRENT.toString(), - "java.version", - System.getProperty("java.specification.version") - ); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginDescriptor.readFromProperties(pluginDir)); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> mockDescriptor("classname", null)); assertThat(e.getMessage(), containsString("property [classname] is missing")); } public void testReadFromPropertiesModulenameFallback() throws Exception { - Path pluginDir = createTempDir().resolve("fake-plugin"); - PluginTestUtil.writePluginProperties( - pluginDir, - "description", - "fake desc", - "name", - "my_plugin", - "version", - "1.0", - "elasticsearch.version", - Version.CURRENT.toString(), - "java.version", - System.getProperty("java.specification.version"), - "classname", - "FakePlugin" - ); - PluginDescriptor info = PluginDescriptor.readFromProperties(pluginDir); + PluginDescriptor info = mockDescriptor("modulename", null); assertThat(info.getModuleName().isPresent(), is(false)); assertThat(info.getExtendedPlugins(), empty()); } public void testReadFromPropertiesModulenameEmpty() throws Exception { - Path pluginDir = createTempDir().resolve("fake-plugin"); - PluginTestUtil.writePluginProperties( - pluginDir, - "description", - "fake desc", - "name", - "my_plugin", - "version", - "1.0", - "elasticsearch.version", - Version.CURRENT.toString(), - "java.version", - System.getProperty("java.specification.version"), - "classname", - "FakePlugin", - "modulename", - " " - ); - PluginDescriptor info = PluginDescriptor.readFromProperties(pluginDir); + PluginDescriptor info = mockDescriptor("modulename", " "); assertThat(info.getModuleName().isPresent(), is(false)); assertThat(info.getExtendedPlugins(), empty()); } public void testExtendedPluginsSingleExtension() throws Exception { - Path pluginDir = createTempDir().resolve("fake-plugin"); - PluginTestUtil.writePluginProperties( - pluginDir, - "description", - "fake desc", - "name", - "my_plugin", - "version", - "1.0", - "elasticsearch.version", - Version.CURRENT.toString(), - "java.version", - System.getProperty("java.specification.version"), - "classname", - "FakePlugin", - "extended.plugins", - "foo" - ); - PluginDescriptor info = PluginDescriptor.readFromProperties(pluginDir); + PluginDescriptor info = mockDescriptor("extended.plugins", "foo"); assertThat(info.getExtendedPlugins(), contains("foo")); } public void testExtendedPluginsMultipleExtensions() throws Exception { - Path pluginDir = createTempDir().resolve("fake-plugin"); - PluginTestUtil.writePluginProperties( - pluginDir, - "description", - "fake desc", - "name", - "my_plugin", - "version", - "1.0", - "elasticsearch.version", - Version.CURRENT.toString(), - "java.version", - System.getProperty("java.specification.version"), - "classname", - "FakePlugin", - "extended.plugins", - "foo,bar,baz" - ); - PluginDescriptor info = PluginDescriptor.readFromProperties(pluginDir); + PluginDescriptor info = mockDescriptor("extended.plugins", "foo,bar,baz"); assertThat(info.getExtendedPlugins(), contains("foo", "bar", "baz")); } public void testExtendedPluginsEmpty() throws Exception { - Path pluginDir = createTempDir().resolve("fake-plugin"); - PluginTestUtil.writePluginProperties( - pluginDir, - "description", - "fake desc", - "name", - "my_plugin", - "version", - "1.0", - "elasticsearch.version", - Version.CURRENT.toString(), - "java.version", - System.getProperty("java.specification.version"), - "classname", - "FakePlugin", - "extended.plugins", - "" - ); - PluginDescriptor info = PluginDescriptor.readFromProperties(pluginDir); + PluginDescriptor info = mockDescriptor("extended.plugins", ""); assertThat(info.getExtendedPlugins(), empty()); } @@ -368,27 +225,7 @@ public void testPluginListSorted() { } public void testUnknownProperties() throws Exception { - Path pluginDir = createTempDir().resolve("fake-plugin"); - PluginTestUtil.writePluginProperties( - pluginDir, - "extra", - "property", - "unknown", - "property", - "description", - "fake desc", - "classname", - "Foo", - "name", - "my_plugin", - "version", - "1.0", - "elasticsearch.version", - Version.CURRENT.toString(), - "java.version", - System.getProperty("java.specification.version") - ); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginDescriptor.readFromProperties(pluginDir)); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> mockDescriptor("extra", "property")); assertThat(e.getMessage(), containsString("Unknown properties for plugin [my_plugin] in plugin descriptor")); } diff --git a/server/src/test/java/org/elasticsearch/script/UpdateCtxMapTests.java b/server/src/test/java/org/elasticsearch/script/UpdateCtxMapTests.java new file mode 100644 index 0000000000000..1cbb67ff30f7e --- /dev/null +++ b/server/src/test/java/org/elasticsearch/script/UpdateCtxMapTests.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.script; + +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; + +import java.time.ZonedDateTime; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.hasEntry; + +public class UpdateCtxMapTests extends ESTestCase { + UpdateCtxMap map; + Metadata meta; + + private final long TS = 704289600000L; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + map = new UpdateCtxMap("myIndex", "myId", 5, "myRoute", "myType", "index", TS, Map.of("foo", "bar")); + meta = map.getMetadata(); + } + + @SuppressWarnings("unchecked") + public void testSourceWrapping() { + assertThat((Map) map.get("_source"), hasEntry("foo", "bar")); + assertThat(map.getSource(), hasEntry("foo", "bar")); + } + + public void testGetters() { + assertEquals("myIndex", meta.getIndex()); + assertEquals("myId", meta.getId()); + assertEquals(5, meta.getVersion()); + assertEquals("myRoute", meta.getRouting()); + assertEquals("index", meta.getOp()); + } + + public void testMetadataImmutable() { + IllegalArgumentException err = expectThrows(IllegalArgumentException.class, () -> meta.put("_index", "myIndex2")); + assertEquals("_index cannot be updated", err.getMessage()); + err = expectThrows(IllegalArgumentException.class, () -> meta.put("_id", "myId")); + assertEquals("_id cannot be updated", err.getMessage()); + err = expectThrows(IllegalArgumentException.class, () -> meta.put("_routing", "myRouting")); + assertEquals("_routing cannot be updated", err.getMessage()); + err = expectThrows(IllegalArgumentException.class, () -> meta.put("_type", "myType")); + assertEquals("_type cannot be updated", err.getMessage()); + err = expectThrows(IllegalArgumentException.class, () -> meta.put("_version", 10)); + assertEquals("_version cannot be updated", err.getMessage()); + } + + public void testValidOps() { + List ops = List.of("noop", "index", "delete"); + for (String op : ops) { + meta.setOp(op); + assertEquals(op, meta.getOp()); + } + for (String op : ops) { + map.put("op", op); + assertEquals(op, map.get("op")); + } + } + + public void testInvalidOp() { + IllegalArgumentException err = expectThrows(IllegalArgumentException.class, () -> meta.setOp("none")); + assertEquals("'none' is not allowed, use 'noop' instead", err.getMessage()); + err = expectThrows(IllegalArgumentException.class, () -> meta.setOp(null)); + assertEquals("op must be one of [delete, index, noop], not [null]", err.getMessage()); + err = expectThrows(IllegalArgumentException.class, () -> meta.setOp("foo")); + assertEquals("op must be one of [delete, index, noop], not [foo]", err.getMessage()); + meta.put("op", "none"); + assertEquals("noop", meta.getOp()); + meta.put("op", "foo"); + assertEquals("noop", meta.getOp()); + meta.remove("op"); + assertEquals("noop", meta.getOp()); + meta.put("op", "index"); + assertEquals("index", meta.getOp()); + } + + public void testTimestamp() { + IllegalArgumentException err = expectThrows(IllegalArgumentException.class, () -> meta.put("_now", 1234)); + assertEquals("_now cannot be updated", err.getMessage()); + assertEquals(TS, meta.get("_now")); + ZonedDateTime zdt = meta.getTimestamp(); + assertEquals(4, zdt.getMonthValue()); + assertEquals(26, zdt.getDayOfMonth()); + assertEquals(1992, zdt.getYear()); + } +} diff --git a/server/src/test/java/org/elasticsearch/script/UpsertCtxMapTests.java b/server/src/test/java/org/elasticsearch/script/UpsertCtxMapTests.java new file mode 100644 index 0000000000000..f0dff563caa11 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/script/UpsertCtxMapTests.java @@ -0,0 +1,79 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.script; + +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; + +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.hasEntry; + +public class UpsertCtxMapTests extends ESTestCase { + UpsertCtxMap map; + Metadata meta; + long TS = 922860000000L; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + map = new UpsertCtxMap("myIndex", "myId", "create", TS, Map.of("foo", "bar")); + meta = map.getMetadata(); + } + + @SuppressWarnings("unchecked") + public void testSourceWrapping() { + assertThat((Map) map.get("_source"), hasEntry("foo", "bar")); + assertThat(map.getSource(), hasEntry("foo", "bar")); + } + + public void testGetters() { + assertEquals("myIndex", meta.getIndex()); + assertEquals("myId", meta.getId()); + assertEquals("create", meta.getOp()); + UnsupportedOperationException err = expectThrows(UnsupportedOperationException.class, () -> meta.getVersion()); + assertEquals("version is unavailable for insert", err.getMessage()); + err = expectThrows(UnsupportedOperationException.class, () -> meta.getRouting()); + assertEquals("routing is unavailable for insert", err.getMessage()); + } + + public void testMetadataImmutable() { + IllegalArgumentException err = expectThrows(IllegalArgumentException.class, () -> meta.put("_index", "myIndex2")); + assertEquals("_index cannot be updated", err.getMessage()); + err = expectThrows(IllegalArgumentException.class, () -> meta.put("_id", "myId")); + assertEquals("_id cannot be updated", err.getMessage()); + err = expectThrows(IllegalArgumentException.class, () -> meta.put("_now", 1234)); + assertEquals("_now cannot be updated", err.getMessage()); + } + + public void testValidOps() { + List ops = List.of("noop", "create"); + for (String op : ops) { + meta.setOp(op); + assertEquals(op, meta.getOp()); + } + for (String op : ops) { + map.put("op", op); + assertEquals(op, map.get("op")); + } + } + + public void testNoneOp() { + IllegalArgumentException err = expectThrows(IllegalArgumentException.class, () -> meta.setOp("none")); + assertEquals("'none' is not allowed, use 'noop' instead", err.getMessage()); + meta.put("op", "none"); + assertEquals("noop", meta.getOp()); + meta.remove("op"); + assertEquals("noop", meta.getOp()); + meta.put("op", "create"); + assertEquals("create", meta.getOp()); + } +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java index 9dd973bc9eb9d..00f188e536fc9 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java @@ -67,6 +67,7 @@ import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.bucket.InternalSingleBucketAggregation; import org.elasticsearch.search.aggregations.bucket.filter.FilterAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; @@ -80,6 +81,8 @@ import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.Max; import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.Sum; +import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.TopHits; import org.elasticsearch.search.aggregations.metrics.TopHitsAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValueType; @@ -1122,6 +1125,47 @@ public void testMultiValuedWithKeywordDesc() throws Exception { }); } + public void testMultiValuedWithLong() throws Exception { + final List>> dataset = new ArrayList<>(); + dataset.addAll(Arrays.asList(Map.of("long", List.of(10L, 10L)))); + testSearchCase( + Arrays.asList(new MatchAllDocsQuery(), new FieldExistsQuery("long")), + dataset, + () -> new CompositeAggregationBuilder("name", Arrays.asList(new TermsValuesSourceBuilder("long").field("long"))).subAggregation( + new SumAggregationBuilder("sum").field("long") + ), + (InternalComposite result) -> { + assertEquals(1, result.getBuckets().size()); + assertEquals("{long=10}", result.afterKey().toString()); + InternalMultiBucketAggregation.InternalBucket bucket = result.getBuckets().get(0); + assertEquals("{long=10}", bucket.getKeyAsString()); + assertEquals(1L, bucket.getDocCount()); + assertThat(bucket.getAggregations().get("sum"), instanceOf(Sum.class)); + assertEquals(20L, ((Sum) bucket.getAggregations().get("sum")).value(), 0.01d); + } + ); + } + + public void testMultiValuedWithDouble() throws Exception { + final List>> dataset = new ArrayList<>(); + dataset.addAll(Arrays.asList(Map.of("double", List.of(10.0d, 10.0d)))); + testSearchCase( + Arrays.asList(new MatchAllDocsQuery(), new FieldExistsQuery("double")), + dataset, + () -> new CompositeAggregationBuilder("name", Arrays.asList(new TermsValuesSourceBuilder("double").field("double"))) + .subAggregation(new SumAggregationBuilder("sum").field("double")), + (InternalComposite result) -> { + assertEquals(1, result.getBuckets().size()); + assertEquals("{double=10.0}", result.afterKey().toString()); + InternalMultiBucketAggregation.InternalBucket bucket = result.getBuckets().get(0); + assertEquals("{double=10.0}", bucket.getKeyAsString()); + assertEquals(1L, bucket.getDocCount()); + assertThat(bucket.getAggregations().get("sum"), instanceOf(Sum.class)); + assertEquals(20.0d, ((Sum) bucket.getAggregations().get("sum")).value(), 0.01d); + } + ); + } + public void testWithKeywordAndLong() throws Exception { final List>> dataset = new ArrayList<>(); dataset.addAll( diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregatorTests.java index 54e6848b8c710..e1618ac7cb600 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregatorTests.java @@ -408,6 +408,49 @@ public void testFieldAlias() throws IOException { } } + public void testFieldBackground() throws IOException { + TextFieldType textFieldType = new TextFieldType("text"); + textFieldType.setFielddata(true); + + IndexWriterConfig indexWriterConfig = newIndexWriterConfig(new StandardAnalyzer()); + indexWriterConfig.setMaxBufferedDocs(100); + indexWriterConfig.setRAMBufferSizeMB(100); // flush on open to have a single segment + + try (Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, indexWriterConfig)) { + addMixedTextDocs(w); + + SignificantTermsAggregationBuilder agg = significantTerms("sig_text").field("text"); + SignificantTermsAggregationBuilder backgroundAgg = significantTerms("sig_text").field("text"); + + String executionHint = randomExecutionHint(); + agg.executionHint(executionHint); + backgroundAgg.executionHint(executionHint); + + QueryBuilder backgroundFilter = QueryBuilders.termsQuery("text", "odd"); + backgroundAgg.backgroundFilter(backgroundFilter); + + try (IndexReader reader = DirectoryReader.open(w)) { + assertEquals("test expects a single segment", 1, reader.leaves().size()); + IndexSearcher searcher = new IndexSearcher(reader); + + SignificantTerms evenTerms = searchAndReduce(searcher, new TermQuery(new Term("text", "even")), agg, textFieldType); + SignificantTerms backgroundEvenTerms = searchAndReduce( + searcher, + new TermQuery(new Term("text", "even")), + backgroundAgg, + textFieldType + ); + + assertFalse(evenTerms.getBuckets().isEmpty()); + assertFalse(backgroundEvenTerms.getBuckets().isEmpty()); + assertEquals(((InternalMappedSignificantTerms) evenTerms).getSubsetSize(), 5); + assertEquals(((InternalMappedSignificantTerms) evenTerms).getSupersetSize(), 10); + assertEquals(((InternalMappedSignificantTerms) backgroundEvenTerms).getSubsetSize(), 5); + assertEquals(((InternalMappedSignificantTerms) backgroundEvenTerms).getSupersetSize(), 5); + } + } + } + public void testAllDocsWithoutStringFieldviaGlobalOrds() throws IOException { testAllDocsWithoutStringField("global_ordinals"); } diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java index 7ec5c6b382856..2c95cf6b08da8 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java @@ -791,6 +791,10 @@ public final void testSyntheticSource() throws IOException { assertThat(syntheticSource(mapper, b -> b.field("field", syntheticSourceExample.inputValue)), equalTo(expected)); } + protected boolean supportsEmptyInputArray() { + return true; + } + public final void testSyntheticSourceMany() throws IOException { int maxValues = randomBoolean() ? 1 : 5; SyntheticSourceSupport support = syntheticSourceSupport(); @@ -810,7 +814,7 @@ public final void testSyntheticSourceMany() throws IOException { ) ) { for (int i = 0; i < count; i++) { - if (rarely()) { + if (rarely() && supportsEmptyInputArray()) { expected[i] = "{}"; iw.addDocument(mapper.parse(source(b -> b.startArray("field").endArray())).rootDoc()); continue; @@ -868,6 +872,7 @@ public final void testSyntheticSourceInObject() throws IOException { } public final void testSyntheticEmptyList() throws IOException { + assumeTrue("Field does not support [] as input", supportsEmptyInputArray()); SyntheticSourceExample syntheticSourceExample = syntheticSourceSupport().example(5); DocumentMapper mapper = createDocumentMapper(syntheticSourceMapping(b -> { b.startObject("field"); diff --git a/test/framework/src/main/java/org/elasticsearch/plugins/PluginTestUtil.java b/test/framework/src/main/java/org/elasticsearch/plugins/PluginTestUtil.java index ec3ae93cc63a6..87ae234bdcbb3 100644 --- a/test/framework/src/main/java/org/elasticsearch/plugins/PluginTestUtil.java +++ b/test/framework/src/main/java/org/elasticsearch/plugins/PluginTestUtil.java @@ -27,7 +27,10 @@ private static void writeProperties(Path propertiesFile, String... stringProps) Files.createDirectories(propertiesFile.getParent()); Properties properties = new Properties(); for (int i = 0; i < stringProps.length; i += 2) { - properties.put(stringProps[i], stringProps[i + 1]); + String value = stringProps[i + 1]; + if (value != null) { + properties.put(stringProps[i], stringProps[i + 1]); + } } try (OutputStream out = Files.newOutputStream(propertiesFile)) { properties.store(out, ""); diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/AbstractBlobContainerRetriesTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/AbstractBlobContainerRetriesTestCase.java index f0c32d8b14a34..7451d705aa2d2 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/AbstractBlobContainerRetriesTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/AbstractBlobContainerRetriesTestCase.java @@ -304,6 +304,7 @@ public void testReadBlobWithNoHttpResponse() { ); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/88666") public void testReadBlobWithPrematureConnectionClose() { final int maxRetries = randomInt(20); final BlobContainer blobContainer = createBlobContainer(maxRetries, null, null, null); diff --git a/x-pack/docs/en/rest-api/security/create-api-keys.asciidoc b/x-pack/docs/en/rest-api/security/create-api-keys.asciidoc index 5798560a4f457..0373bda872270 100644 --- a/x-pack/docs/en/rest-api/security/create-api-keys.asciidoc +++ b/x-pack/docs/en/rest-api/security/create-api-keys.asciidoc @@ -50,7 +50,7 @@ The following parameters can be specified in the body of a POST or PUT request: [[api-key-role-descriptors]] `role_descriptors`:: -(Optional, array-of-role-descriptor) An array of role descriptors for this API +(Optional, object) The role descriptors for this API key. This parameter is optional. When it is not specified or is an empty array, then the API key will have a _point in time snapshot of permissions of the authenticated user_. If you supply role descriptors then the resultant permissions @@ -144,7 +144,7 @@ API key information. <1> Unique `id` for this API key <2> Optional expiration in milliseconds for this API key <3> Generated API key -<4> API key credentials which is the Base64-encoding of the UTF-8 +<4> API key credentials which is the Base64-encoding of the UTF-8 representation of the `id` and `api_key` joined by a colon (`:`). To use the generated API key, send a request with an `Authorization` header that diff --git a/x-pack/docs/en/rest-api/security/grant-api-keys.asciidoc b/x-pack/docs/en/rest-api/security/grant-api-keys.asciidoc index 1cdc2aafb2657..3d38f82cb95d0 100644 --- a/x-pack/docs/en/rest-api/security/grant-api-keys.asciidoc +++ b/x-pack/docs/en/rest-api/security/grant-api-keys.asciidoc @@ -70,7 +70,7 @@ expire. (Required, string) Specifies the name for this API key. `role_descriptors`::: -(Optional, array-of-role-descriptor) An array of role descriptors for this API +(Optional, object) The role descriptors for this API key. This parameter is optional. When it is not specified or is an empty array, the API key has a point in time snapshot of permissions of the specified user or access token. If you supply role descriptors, the resultant permissions are an diff --git a/x-pack/docs/en/rest-api/security/update-api-key.asciidoc b/x-pack/docs/en/rest-api/security/update-api-key.asciidoc index d6ec551ad7ded..c8104c295e479 100644 --- a/x-pack/docs/en/rest-api/security/update-api-key.asciidoc +++ b/x-pack/docs/en/rest-api/security/update-api-key.asciidoc @@ -1,5 +1,275 @@ [role="xpack"] [[security-api-update-api-key]] -=== Update API key information API +=== Update API key API -coming::[8.4.0] +++++ +Update API key +++++ + +[[security-api-update-api-key-request]] +==== {api-request-title} + +`PUT /_security/api_key/` + +[[security-api-update-api-key-prereqs]] +==== {api-prereq-title} + +* To use this API, you must have at least the `manage_own_api_key` cluster privilege. +Users can only update API keys that they created or that were granted to them. +To update another user's API key, use the <> +to submit a request on behalf of another user. + +IMPORTANT: It's not possible to use an API key as the authentication credential for this API. +To update an API key, the owner user's credentials are required. + +[[security-api-update-api-key-desc]] +==== {api-description-title} + +Use this API to update API keys created by the <> or <> APIs. +It's not possible to update expired API keys, or API keys that have been invalidated by <>. + +This API supports updates to an API key's access scope and metadata. +The access scope of an API key is derived from the <> you specify in the request, and a snapshot of the owner user's permissions at the time of the request. +The snapshot of the owner's permissions is updated automatically on every call. + +[IMPORTANT] +==== +If you don't specify <> in the request, a call to this API might still change the API key's access scope. +This change can occur if the owner user's permissions have changed since the API key was created or last modified. +==== + +[[security-api-update-api-key-path-params]] +==== {api-path-parms-title} + +`id`:: +(Required, string) The ID of the API key to update. + +[[security-api-update-api-key-request-body]] +==== {api-request-body-title} + +You can specify the following parameters in the request body, which is optional. + +[[security-api-update-api-key-api-key-role-descriptors]] +`role_descriptors`:: +(Optional, object) The role descriptors to assign to this API key. +The API key's effective permissions are an intersection of its assigned privileges and the point in time snapshot of permissions of the owner user. +If no privileges are assigned, the API key inherits the owner user's full permissions. +You can assign new privileges to the API key by specifying them in this parameter. +To remove assigned privileges, you can supply an empty `role_descriptors` parameter. +The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter or not. +The structure of a role descriptor is the same as the request for the <>. + +`metadata`:: +(Optional, object) Arbitrary metadata that you want to associate with the API key. +It supports nested data structure. +Within the `metadata` object, top-level keys beginning with `_` are reserved for system usage. +When specified, this fully replaces metadata previously associated with the API key. + +[[security-api-update-api-key-response-body]] +==== {api-response-body-title} + +`updated`:: +(boolean) If `true`, the API key was updated. +If `false`, the API key didn't change because no change was detected. + +[[security-api-update-api-key-example]] +==== {api-examples-title} + +If you create an API key as follows: + +[source,console] +------------------------------------------------------------ +POST /_security/api_key +{ + "name": "my-api-key", + "role_descriptors": { + "role-a": { + "cluster": ["all"], + "index": [ + { + "names": ["index-a*"], + "privileges": ["read"] + } + ] + } + }, + "metadata": { + "application": "my-application", + "environment": { + "level": 1, + "trusted": true, + "tags": ["dev", "staging"] + } + } +} +------------------------------------------------------------ + +A successful call returns a JSON structure that provides API key information. +For example: + +[source,console-result] +-------------------------------------------------- +{ + "id": "VuaCfGcBCdbkQm-e5aOx", + "name": "my-api-key", + "api_key": "ui2lp2axTNmsyakw9tvNnw", + "encoded": "VnVhQ2ZHY0JDZGJrUW0tZTVhT3g6dWkybHAyYXhUTm1zeWFrdzl0dk5udw==" +} +-------------------------------------------------- +// TESTRESPONSE[s/VuaCfGcBCdbkQm-e5aOx/$body.id/] +// TESTRESPONSE[s/ui2lp2axTNmsyakw9tvNnw/$body.api_key/] +// TESTRESPONSE[s/VnVhQ2ZHY0JDZGJrUW0tZTVhT3g6dWkybHAyYXhUTm1zeWFrdzl0dk5udw==/$body.encoded/] + +For the examples below, assume that the owner user's permissions are: + +[[security-api-update-api-key-examples-user-permissions]] +[source,js] +-------------------------------------------------- +{ + "cluster": ["all"], + "index": [ + { + "names": ["*"], + "privileges": ["all"] + } + ] +} +-------------------------------------------------- +// NOTCONSOLE + +The following example updates the API key created above, assigning it new role descriptors and metadata: + +[source,console] +---- +PUT /_security/api_key/VuaCfGcBCdbkQm-e5aOx +{ + "role_descriptors": { + "role-a": { + "index": [ + { + "names": ["*"], + "privileges": ["write"] + } + ] + } + }, + "metadata": { + "environment": { + "level": 2, + "trusted": true, + "tags": ["production"] + } + } +} +---- +// TEST[s/VuaCfGcBCdbkQm-e5aOx/\${body.id}/] +// TEST[continued] + +A successful call returns a JSON structure indicating that the API key was updated: + +[source,console-result] +---- +{ + "updated": true +} +---- + +The API key's effective permissions after the update will be the intersection of the supplied role descriptors and the <>: + +[source,js] +-------------------------------------------------- +{ + "index": [ + { + "names": ["*"], + "privileges": ["write"] + } + ] +} +-------------------------------------------------- +// NOTCONSOLE + +The following example removes the API key's previously assigned permissions. + +[source,console] +---- +PUT /_security/api_key/VuaCfGcBCdbkQm-e5aOx +{ + "role_descriptors": {} +} +---- +// TEST[skip:api key id not available anymore] + +Which returns the response: + +[source,console-result] +---- +{ + "updated": true +} +---- + +The API key's effective permissions after the update will the same as the <>: + +[source,js] +-------------------------------------------------- +{ + "cluster": ["all"], + "index": [ + { + "names": ["*"], + "privileges": ["all"] + } + ] +} +-------------------------------------------------- +// NOTCONSOLE + +For the next example, assume that the owner user's permissions have changed from <> to: + +[source,js] +-------------------------------------------------- +{ + "cluster": ["manage_security"], + "index": [ + { + "names": ["*"], + "privileges": ["read"] + } + ] +} +-------------------------------------------------- +// NOTCONSOLE + +The following request auto-updates the snapshot of the user's permissions associated with the API key: + +[source,console] +---- +PUT /_security/api_key/VuaCfGcBCdbkQm-e5aOx +---- +// TEST[skip:api key id not available anymore] + +Which returns the response: + +[source,console-result] +---- +{ + "updated": true +} +---- + +Resulting in the following effective permissions for the API key: + +[source,js] +-------------------------------------------------- +{ + "cluster": ["manage_security"], + "index": [ + { + "names": ["*"], + "privileges": ["read"] + } + ] +} +-------------------------------------------------- +// NOTCONSOLE diff --git a/x-pack/docs/en/security/configuring-stack-security.asciidoc b/x-pack/docs/en/security/configuring-stack-security.asciidoc index ac1eb887a39de..4fd0f5aa2e04d 100644 --- a/x-pack/docs/en/security/configuring-stack-security.asciidoc +++ b/x-pack/docs/en/security/configuring-stack-security.asciidoc @@ -1,5 +1,5 @@ [[configuring-stack-security]] -== Start the Elastic Stack with security enabled +== Start the {stack} with security enabled automatically When you start {es} for the first time, the following security configuration occurs automatically: @@ -12,8 +12,8 @@ generated for the transport and HTTP layers. You can then start {kib} and enter the enrollment token, which is valid for 30 minutes. This token automatically applies the security settings from your {es} -cluster, authenticates to {es} with the built-in `kibana` service account, and writes the -security configuration to `kibana.yml`. +cluster, authenticates to {es} with the built-in `kibana` service account, and +writes the security configuration to `kibana.yml`. NOTE: There are <> where security can't be configured automatically because the node startup process detects that diff --git a/x-pack/docs/en/security/images/elastic-security-overview.png b/x-pack/docs/en/security/images/elastic-security-overview.png index 90865d14d36bd..4cf6b08f5a716 100644 Binary files a/x-pack/docs/en/security/images/elastic-security-overview.png and b/x-pack/docs/en/security/images/elastic-security-overview.png differ diff --git a/x-pack/docs/en/security/index.asciidoc b/x-pack/docs/en/security/index.asciidoc index 838e57ccdd593..c3f3a9295c3dc 100644 --- a/x-pack/docs/en/security/index.asciidoc +++ b/x-pack/docs/en/security/index.asciidoc @@ -11,16 +11,15 @@ safe, adhere to the <>. The first principle is to run {es} with security enabled. Configuring security can be complicated, so we made it easy to -<> by -default. Just start {es} to enable and configure the {stack} security features. -You can then connect a {kib} instance to your -secured {es} cluster and enroll additional nodes. You'll have password -protection, internode communication secured with Transport Layer Security (TLS), -and encrypted connections between {es} and {kib}. - -If you prefer to manage security on your own, you can -<> to secure {es} -clusters and any clients that communicate with your clusters. You can also +<>. +For any new clusters, just start {es} to automatically enable password protection, +secure internode communication with Transport Layer Security (TLS), and encrypt +connections between {es} and {kib}. + +If you have an existing, unsecured cluster (or prefer to manage security on your +own), you can +<> to secure +{es} clusters and any clients that communicate with your clusters. You can also implement additional security measures, such as role-based access control, IP filtering, and auditing. diff --git a/x-pack/docs/en/security/securing-communications/security-minimal-setup.asciidoc b/x-pack/docs/en/security/securing-communications/security-minimal-setup.asciidoc new file mode 100644 index 0000000000000..8032730fef747 --- /dev/null +++ b/x-pack/docs/en/security/securing-communications/security-minimal-setup.asciidoc @@ -0,0 +1,167 @@ +[[security-minimal-setup]] +=== Set up minimal security for {es} +++++ +Set up minimal security +++++ + +IMPORTANT: You only need to complete the following steps if you're running an +existing, unsecured cluster and want to enable the {es} {security-features}. + +In {es} 8.0 and later, security is +<> when you start {es} for the +first time. + +If you're running an existing {es} cluster where security is disabled, you can +manually enable the {es} {security-features} and then create passwords for +built-in users. You can add more users later, but using the built-in users +simplifies the process of enabling security for your cluster. + +include::../security-manual-configuration.asciidoc[tag=minimal-security-note] + +==== Enable {es} security features + +Enabling the {es} security features provides basic authentication so +that you can run a local cluster with username and password authentication. + +. On *every* node in your cluster, stop both {kib} and {es} if they are running. + +. On *every* node in your cluster, add the `xpack.security.enabled` setting to +the `$ES_PATH_CONF/elasticsearch.yml` file and set the value to `true`: ++ +[source,yaml] +---- +xpack.security.enabled: true +---- ++ +NOTE: The `$ES_PATH_CONF` variable is the path for the {es} +configuration files. If you installed {es} using archive distributions +(`zip` or `tar.gz`), the variable defaults to `$ES_HOME/config`. If you used +package distributions (Debian or RPM), the variable defaults to `/etc/elasticsearch`. + +. If your cluster has a single node, add the `discovery.type` setting in the +`$ES_PATH_CONF/elasticsearch.yml` file and set the value to `single-node`. This +setting ensures that your node does not inadvertently connect to other clusters +that might be running on your network. ++ +[source,yaml] +---- +discovery.type: single-node +---- + +[[security-create-builtin-users]] +==== Set passwords for built-in users + +To communicate with the cluster, you must configure a username for the built-in +users. Unless you enable anonymous access, all requests that don’t include a +username and password are rejected. + +NOTE: You only need to set passwords for the `elastic` and `kibana_system` users +when enabling minimal or basic security. + +. On *every* node in your cluster, start {es}. For example, if you installed +{es} with a `.tar.gz` package, run the following command from the `ES_HOME` +directory: ++ +[source,shell] +---- +./bin/elasticsearch +---- + +. In another terminal window, set the passwords for the built-in users by +running the <> utility. ++ +IMPORTANT: You can run the `elasticsearch-reset-password` utility +against any node in your cluster. However, you should only run this utility *one +time* for the entire cluster. ++ +Using the `auto` parameter +outputs randomly-generated passwords to the console that you can change later +if necessary: ++ +[source,shell] +---- +./bin/elasticsearch-reset-password auto +---- ++ +If you want to use your own passwords, run the command with the +`interactive` parameter instead of the `auto` parameter. Using this mode +steps you through password configuration for all of the built-in users. ++ +[source,shell] +---- +./bin/elasticsearch-reset-password interactive +---- + +. Save the generated passwords. You'll need them to add the built-in user to +{kib}. + +*Next*: <> + +[[add-built-in-users]] +==== Configure {kib} to connect to {es} with a password + +When the {es} security features are enabled, users must log in to {kib} with a +valid username and password. + +You'll configure {kib} to use the built-in `kibana_system` user and the password that +you created earlier. {kib} performs some background tasks that require use of the +`kibana_system` user. + +This account is not meant for individual users and does not have permission to log in +to {kib} from a browser. Instead, you'll log in to {kib} as the `elastic` superuser. + +. Add the `elasticsearch.username` setting to the `KIB_PATH_CONF/kibana.yml` +file and set the value to the `kibana_system` user: ++ +[source,yaml] +---- +elasticsearch.username: "kibana_system" +---- ++ +NOTE: The `KIB_PATH_CONF` variable is the path for the {kib} +configuration files. If you installed {kib} using archive distributions +(`zip` or `tar.gz`), the variable defaults to `KIB_HOME/config`. If you used +package distributions (Debian or RPM), the variable defaults to `/etc/kibana`. + +. From the directory where you installed {kib}, run the following commands +to create the {kib} keystore and add the secure settings: + + a. Create the {kib} keystore: ++ +[source,shell] +---- +./bin/kibana-keystore create +---- + + b. Add the password for the `kibana_system` user to the {kib} keystore: ++ +[source,shell] +---- +./bin/kibana-keystore add elasticsearch.password +---- ++ +When prompted, enter the password for the `kibana_system` user. + +. Restart {kib}. For example, if you installed {kib} with a `.tar.gz` package, run the following command from the {kib} directory: ++ +[source,shell] +---- +./bin/kibana +---- + +. Log in to {kib} as the `elastic` user. Use this superuser account to +{kibana-ref}/tutorial-secure-access-to-kibana.html[manage spaces, create new users, and assign roles]. If you're running {kib} locally, go to `http://localhost:5601` to view the login page. + +[[minimal-security-whatsnext]] +==== What's next? + +Congratulations! You enabled password protection for your local cluster to +prevent unauthorized access. You can log in to {kib} securely as the `elastic` +user and create additional users and roles. If you're running a <>, then you can stop here. + +If your cluster has multiple nodes, then you must configure Transport Layer +Security (TLS) between nodes. <> clusters +will not start if you do not enable TLS. + +<> to +secure all internal communication between nodes in your cluster. diff --git a/x-pack/docs/en/security/security-manual-configuration.asciidoc b/x-pack/docs/en/security/security-manual-configuration.asciidoc index 1dee20abeffbb..b57797b519cf1 100644 --- a/x-pack/docs/en/security/security-manual-configuration.asciidoc +++ b/x-pack/docs/en/security/security-manual-configuration.asciidoc @@ -1,8 +1,5 @@ [[manually-configure-security]] == Manually configure security -++++ -Configure security -++++ Security needs vary depending on whether you're developing locally on your laptop or securing all communications in a production environment. Regardless @@ -11,9 +8,10 @@ incredibly important to protect your data. That's why security is <> in {es} 8.0 and later. -If you want to use your own Certificate Authority (CA) or would rather manually -configure security, the following scenarios provide steps for configuring TLS -on the transport layer, plus securing HTTPS traffic if you want it. +If you want to enable security on an existing, unsecured cluster, use your own +Certificate Authority (CA), or would rather manually configure security, the +following scenarios provide steps for configuring TLS on the transport layer, +plus securing HTTPS traffic if you want it. If you configure security manually _before_ starting your {es} nodes, the auto-configuration process will respect your security configuration. You can @@ -22,6 +20,26 @@ adjust your TLS configuration at any time, such as image::images/elastic-security-overview.png[Elastic Security layers] +[discrete] +[[security-minimal-overview]] +=== Minimal security ({es} Development) + +If you've been working with {es} and want to enable security on your existing, +unsecured cluster, start here. You'll set passwords for the built-in users to prevent +unauthorized access to your local cluster, and also configure password +authentication for {kib}. + +// tag::minimal-security-note[] +IMPORTANT: The minimal security scenario is not sufficient for +<> clusters. If your cluster has multiple +nodes, you must enable minimal security and then +<> between +nodes. + +// end::minimal-security-note[] + +<> + [discrete] [[security-basic-overview]] === Basic security ({es} + {kib}) @@ -54,6 +72,8 @@ cluster are secure. <> +include::securing-communications/security-minimal-setup.asciidoc[] + include::securing-communications/security-basic-setup.asciidoc[] include::securing-communications/security-basic-setup-https.asciidoc[] diff --git a/x-pack/plugin/fleet/src/internalClusterTest/java/org/elasticsearch/xpack/fleet/action/GetGlobalCheckpointsActionIT.java b/x-pack/plugin/fleet/src/internalClusterTest/java/org/elasticsearch/xpack/fleet/action/GetGlobalCheckpointsActionIT.java index 4380029a331ff..989d4d10dc386 100644 --- a/x-pack/plugin/fleet/src/internalClusterTest/java/org/elasticsearch/xpack/fleet/action/GetGlobalCheckpointsActionIT.java +++ b/x-pack/plugin/fleet/src/internalClusterTest/java/org/elasticsearch/xpack/fleet/action/GetGlobalCheckpointsActionIT.java @@ -31,6 +31,7 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -70,9 +71,7 @@ public void testGetGlobalCheckpoints() throws Exception { ); final GetGlobalCheckpointsAction.Response response = client().execute(GetGlobalCheckpointsAction.INSTANCE, request).get(); long[] expected = new long[shards]; - for (int i = 0; i < shards; ++i) { - expected[i] = -1; - } + Arrays.fill(expected, -1); assertArrayEquals(expected, response.globalCheckpoints()); final int totalDocuments = shards * 3; @@ -149,7 +148,7 @@ public void testPollGlobalCheckpointAdvancement() throws Exception { } - public void testPollGlobalCheckpointAdvancementTimeout() throws Exception { + public void testPollGlobalCheckpointAdvancementTimeout() { String indexName = "test_index"; client().admin() .indices() @@ -182,7 +181,7 @@ public void testPollGlobalCheckpointAdvancementTimeout() throws Exception { assertEquals(29L, response.globalCheckpoints()[0]); } - public void testMustProvideCorrectNumberOfShards() throws Exception { + public void testMustProvideCorrectNumberOfShards() { String indexName = "test_index"; client().admin() .indices() @@ -214,7 +213,7 @@ public void testMustProvideCorrectNumberOfShards() throws Exception { ); } - public void testWaitForAdvanceOnlySupportsOneShard() throws Exception { + public void testWaitForAdvanceOnlySupportsOneShard() { String indexName = "test_index"; client().admin() .indices() @@ -305,7 +304,7 @@ public void testWaitOnIndexCreated() throws Exception { assertFalse(response.timedOut()); } - public void testPrimaryShardsNotReadyNoWait() throws Exception { + public void testPrimaryShardsNotReadyNoWait() { final GetGlobalCheckpointsAction.Request request = new GetGlobalCheckpointsAction.Request( "not-assigned", false, @@ -333,7 +332,7 @@ public void testPrimaryShardsNotReadyNoWait() throws Exception { assertEquals("Primary shards were not active [shards=1, active=0]", exception.getMessage()); } - public void testWaitOnPrimaryShardsReadyTimeout() throws Exception { + public void testWaitOnPrimaryShardsReadyTimeout() { TimeValue timeout = TimeValue.timeValueMillis(between(1, 100)); final GetGlobalCheckpointsAction.Request request = new GetGlobalCheckpointsAction.Request( "not-assigned", @@ -400,4 +399,50 @@ public void testWaitOnPrimaryShardsReady() throws Exception { assertThat(response.globalCheckpoints()[0], equalTo(0L)); assertFalse(response.timedOut()); } + + public void testWaitOnPrimaryShardThrottled() throws Exception { + + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings( + Settings.builder().put(CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING.getKey(), 0).build() + ) + .get(); + + String indexName = "throttled"; + client().admin() + .indices() + .prepareCreate(indexName) + .setWaitForActiveShards(ActiveShardCount.NONE) + .setSettings( + Settings.builder() + .put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.REQUEST) + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + ) + .get(); + + long start = System.nanoTime(); + var future = client().execute( + GetGlobalCheckpointsAction.INSTANCE, + new GetGlobalCheckpointsAction.Request(indexName, true, true, EMPTY_ARRAY, TEN_SECONDS) + ); + Thread.sleep(randomIntBetween(10, 100)); + + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings( + Settings.builder().putNull(CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING.getKey()).build() + ) + .get(); + client().prepareIndex(indexName).setId(Integer.toString(0)).setSource("{}", XContentType.JSON).get(); + + var response = future.actionGet(); + long elapsed = TimeValue.timeValueNanos(System.nanoTime() - start).seconds(); + assertThat(elapsed, lessThanOrEqualTo(TEN_SECONDS.seconds())); + assertThat(response.globalCheckpoints()[0], equalTo(0L)); + assertFalse(response.timedOut()); + } } diff --git a/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/action/GetGlobalCheckpointsAction.java b/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/action/GetGlobalCheckpointsAction.java index 945951653f0c8..b7856daa8d842 100644 --- a/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/action/GetGlobalCheckpointsAction.java +++ b/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/action/GetGlobalCheckpointsAction.java @@ -16,11 +16,11 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.UnavailableShardsException; -import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.IndexRoutingTable; @@ -35,8 +35,10 @@ import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.ToXContentObject; @@ -156,6 +158,7 @@ public static class TransportAction extends org.elasticsearch.action.support.Tra private final ClusterService clusterService; private final NodeClient client; private final IndexNameExpressionResolver resolver; + private final ThreadPool threadPool; @Inject public TransportAction( @@ -163,12 +166,14 @@ public TransportAction( final TransportService transportService, final ClusterService clusterService, final NodeClient client, - final IndexNameExpressionResolver resolver + final IndexNameExpressionResolver resolver, + final ThreadPool threadPool ) { super(NAME, actionFilters, transportService.getTaskManager()); this.clusterService = clusterService; this.client = client; this.resolver = resolver; + this.threadPool = threadPool; } @Override @@ -180,7 +185,7 @@ protected void doExecute(Task task, Request request, ActionListener li index = resolver.concreteSingleIndex(state, request); } catch (IndexNotFoundException e) { if (request.waitForIndex()) { - handleIndexNotReady(request, listener); + handleIndexNotReady(state, request, listener); } else { listener.onFailure(e); } @@ -194,7 +199,7 @@ protected void doExecute(Task task, Request request, ActionListener li new CheckpointFetcher(client, request, listener, indexMetadata, request.timeout()).run(); } else { if (request.waitForIndex()) { - handleIndexNotReady(request, listener); + handleIndexNotReady(state, request, listener); } else { int active = routingTable.primaryShardsActive(); int total = indexMetadata.getNumberOfShards(); @@ -205,60 +210,72 @@ protected void doExecute(Task task, Request request, ActionListener li } } - private void handleIndexNotReady(final Request request, final ActionListener responseListener) { + private void handleIndexNotReady(ClusterState initialState, Request request, ActionListener listener) { long startNanos = System.nanoTime(); - client.admin() - .cluster() - .prepareHealth(request.index) - .setLocal(true) - .setTimeout(request.timeout()) - .setWaitForYellowStatus() - .setWaitForNoInitializingShards(true) - .execute(new ActionListener<>() { - @Override - public void onResponse(ClusterHealthResponse healthResponse) { - final long elapsedNanos = System.nanoTime() - startNanos; - final ClusterState state = clusterService.state(); - final Index index; - try { - index = resolver.concreteSingleIndex(state, request); - } catch (Exception e) { - responseListener.onFailure(e); - return; - } - - final IndexMetadata indexMetadata = state.getMetadata().index(index); - final IndexRoutingTable routingTable = state.routingTable().index(index); - + var observer = new ClusterStateObserver(initialState, clusterService, request.timeout(), logger, threadPool.getThreadContext()); + + observer.waitForNextChange(new ClusterStateObserver.Listener() { + @Override + public void onNewClusterState(ClusterState state) { + try { + var index = resolver.concreteSingleIndex(state, request); + long elapsedNanos = System.nanoTime() - startNanos; long remainingNanos = request.timeout().nanos() - elapsedNanos; - if (routingTable.allPrimaryShardsActive() && remainingNanos > 0) { + if (remainingNanos > 0) { new CheckpointFetcher( client, request, - responseListener, - indexMetadata, + listener, + state.getMetadata().index(index), TimeValue.timeValueNanos(remainingNanos) ).run(); } else { - int active = routingTable.primaryShardsActive(); - int total = indexMetadata.getNumberOfShards(); - responseListener.onFailure( + listener.onFailure( new UnavailableShardsException( null, "Primary shards were not active within timeout [timeout={}, shards={}, active={}]", request.timeout(), - total, - active + state.getMetadata().index(index).getNumberOfShards(), + state.routingTable().index(index).primaryShardsActive() ) ); } + } catch (Exception e) { + listener.onFailure(e); } + } - @Override - public void onFailure(Exception e) { - responseListener.onFailure(e); + @Override + public void onTimeout(TimeValue timeout) { + try { + var state = clusterService.state(); + var index = resolver.concreteSingleIndex(state, request); + listener.onFailure( + new UnavailableShardsException( + null, + "Primary shards were not active within timeout [timeout={}, shards={}, active={}]", + request.timeout(), + state.getMetadata().index(index).getNumberOfShards(), + state.routingTable().index(index).primaryShardsActive() + ) + ); + } catch (Exception e) { + listener.onFailure(e); } - }); + } + + @Override + public void onClusterServiceClose() { + listener.onFailure(new NodeClosedException(clusterService.localNode())); + } + }, state -> { + try { + var index = resolver.concreteSingleIndex(state, request); + return state.routingTable().index(index).allPrimaryShardsActive(); + } catch (Exception e) { + return false; + } + }, request.timeout()); } private static class CheckpointFetcher extends ActionRunnable { diff --git a/x-pack/plugin/mapper-constant-keyword/src/internalClusterTest/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapperTests.java b/x-pack/plugin/mapper-constant-keyword/src/internalClusterTest/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapperTests.java index e9077f3cb8a97..1782847e7a06a 100644 --- a/x-pack/plugin/mapper-constant-keyword/src/internalClusterTest/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapperTests.java +++ b/x-pack/plugin/mapper-constant-keyword/src/internalClusterTest/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapperTests.java @@ -31,6 +31,7 @@ import java.util.List; import static org.elasticsearch.index.mapper.MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; public class ConstantKeywordFieldMapperTests extends MapperTestCase { @@ -207,11 +208,39 @@ protected boolean allowsNullValues() { @Override protected SyntheticSourceSupport syntheticSourceSupport() { - throw new AssumptionViolatedException("not supported"); + String value = randomUnicodeOfLength(5); + return new SyntheticSourceSupport() { + @Override + public SyntheticSourceExample example(int maxValues) { + return new SyntheticSourceExample(value, value, b -> { + b.field("type", "constant_keyword"); + b.field("value", value); + }); + } + + @Override + public List invalidExample() throws IOException { + throw new AssumptionViolatedException("copy_to on constant_keyword not supported"); + } + }; } @Override protected IngestScriptSupport ingestScriptSupport() { throw new AssumptionViolatedException("not supported"); } + + public void testNullValueSyntheticSource() throws IOException { + DocumentMapper mapper = createDocumentMapper(syntheticSourceMapping(b -> { + b.startObject("field"); + b.field("type", "constant_keyword"); + b.endObject(); + })); + assertThat(syntheticSource(mapper, b -> {}), equalTo("{}")); + } + + @Override + protected boolean supportsEmptyInputArray() { + return false; + } } diff --git a/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java b/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java index f1f1bccba22c7..a93f2417ef5b2 100644 --- a/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java +++ b/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java @@ -34,6 +34,7 @@ import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperBuilderContext; import org.elasticsearch.index.mapper.MapperParsingException; +import org.elasticsearch.index.mapper.SourceLoader; import org.elasticsearch.index.mapper.ValueFetcher; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; @@ -307,4 +308,25 @@ protected String contentType() { return CONTENT_TYPE; } + @Override + public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { + return (reader, docIdsInLeaf) -> new SourceLoader.SyntheticFieldLoader.Leaf() { + @Override + public boolean empty() { + return fieldType().value == null; + } + + @Override + public boolean advanceToDoc(int docId) throws IOException { + return fieldType().value != null; + } + + @Override + public void write(XContentBuilder b) throws IOException { + if (fieldType().value != null) { + b.field(simpleName(), fieldType().value); + } + } + }; + } } diff --git a/x-pack/plugin/mapper-constant-keyword/src/yamlRestTest/resources/rest-api-spec/test/20_synthetic_source.yml b/x-pack/plugin/mapper-constant-keyword/src/yamlRestTest/resources/rest-api-spec/test/20_synthetic_source.yml new file mode 100644 index 0000000000000..635e09d82c41e --- /dev/null +++ b/x-pack/plugin/mapper-constant-keyword/src/yamlRestTest/resources/rest-api-spec/test/20_synthetic_source.yml @@ -0,0 +1,38 @@ +constant_keyword: + - skip: + version: " - 8.3.99" + reason: introduced in 8.4.0 + + - do: + indices.create: + index: test + body: + mappings: + _source: + mode: synthetic + properties: + const_kwd: + type: constant_keyword + value: bar + kwd: + type: keyword + + - do: + index: + index: test + id: 1 + refresh: true + body: + kwd: foo + + - do: + search: + index: test + body: + query: + ids: + values: [1] + - match: + hits.hits.0._source: + kwd: foo + const_kwd: bar diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java index dc2ecdbae2b24..51648fdca3d92 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java @@ -773,6 +773,26 @@ public void testNotifications() throws IOException { assertSystemNotificationsContain("Rebalanced trained model allocations because [model deployment started]"); } + public void testStartDeployment_TooManyAllocations() throws IOException { + String modelId = "test_start_deployment_too_many_allocations"; + createTrainedModel(modelId); + putModelDefinition(modelId); + putVocabulary(List.of("these", "are", "my", "words"), modelId); + + ResponseException ex = expectThrows( + ResponseException.class, + () -> startDeployment(modelId, AllocationStatus.State.STARTED.toString(), 100, 1) + ); + assertThat(ex.getResponse().getStatusLine().getStatusCode(), equalTo(429)); + assertThat( + EntityUtils.toString(ex.getResponse().getEntity()), + containsString("Could not start deployment because there are not enough resources to provide all requested allocations") + ); + + Response response = getTrainedModelStats(modelId); + assertThat(EntityUtils.toString(response.getEntity()), not(containsString("deployment_stats"))); + } + @SuppressWarnings("unchecked") private void assertAllocationCount(String modelId, int expectedAllocationCount) throws IOException { Response response = getTrainedModelStats(modelId); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportEvaluateDataFrameAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportEvaluateDataFrameAction.java index 1e12eb776635b..f6a0ed99bfe82 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportEvaluateDataFrameAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportEvaluateDataFrameAction.java @@ -74,7 +74,7 @@ protected void doExecute( EvaluateDataFrameAction.Request request, ActionListener listener ) { - TaskId parentTaskId = new TaskId(clusterService.getNodeName(), task.getId()); + TaskId parentTaskId = new TaskId(clusterService.localNode().getId(), task.getId()); ActionListener> resultsListener = ActionListener.wrap(unused -> { EvaluateDataFrameAction.Response response = new EvaluateDataFrameAction.Response( request.getEvaluation().getName(), diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportExplainDataFrameAnalyticsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportExplainDataFrameAnalyticsAction.java index ea0f37896b3c4..14efe2888c438 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportExplainDataFrameAnalyticsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportExplainDataFrameAnalyticsAction.java @@ -125,7 +125,7 @@ private void explain( ExplainDataFrameAnalyticsAction.Request request, ActionListener listener ) { - TaskId parentTaskId = new TaskId(clusterService.getNodeName(), task.getId()); + TaskId parentTaskId = new TaskId(clusterService.localNode().getId(), task.getId()); final ExtractedFieldsDetectorFactory extractedFieldsDetectorFactory = new ExtractedFieldsDetectorFactory( new ParentTaskAssigningClient(client, parentTaskId) ); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCategoriesAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCategoriesAction.java index 08b220ae513a8..9bd02f425c568 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCategoriesAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCategoriesAction.java @@ -46,7 +46,7 @@ public TransportGetCategoriesAction( @Override protected void doExecute(Task task, GetCategoriesAction.Request request, ActionListener listener) { - TaskId parentTaskId = new TaskId(clusterService.getNodeName(), task.getId()); + TaskId parentTaskId = new TaskId(clusterService.localNode().getId(), task.getId()); jobManager.jobExists(request.getJobId(), parentTaskId, ActionListener.wrap(jobExists -> { Integer from = request.getPageParams() != null ? request.getPageParams().getFrom() : null; Integer size = request.getPageParams() != null ? request.getPageParams().getSize() : null; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDataFrameAnalyticsStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDataFrameAnalyticsStatsAction.java index ac585ac22b81c..6d41f438c69e4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDataFrameAnalyticsStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDataFrameAnalyticsStatsAction.java @@ -158,7 +158,7 @@ protected void doExecute( GetDataFrameAnalyticsStatsAction.Request request, ActionListener listener ) { - TaskId parentTaskId = new TaskId(clusterService.getNodeName(), task.getId()); + TaskId parentTaskId = new TaskId(clusterService.localNode().getId(), task.getId()); logger.debug("Get stats for data frame analytics [{}]", request.getId()); ActionListener getResponseListener = ActionListener.wrap(getResponse -> { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsAction.java index e218e0ae31a04..31d2334cf4f1d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsAction.java @@ -61,7 +61,7 @@ protected void masterOperation( ClusterState state, ActionListener listener ) { - TaskId parentTaskId = new TaskId(clusterService.getNodeName(), task.getId()); + TaskId parentTaskId = new TaskId(clusterService.localNode().getId(), task.getId()); logger.debug("Get datafeed '{}'", request.getDatafeedId()); datafeedManager.getDatafeeds( diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsStatsAction.java index fafeaa0c95e60..544ce742521be 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsStatsAction.java @@ -68,7 +68,7 @@ protected void doExecute(Task task, Request request, ActionListener li ClusterState state = clusterService.state(); final PersistentTasksCustomMetadata tasksInProgress = state.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); final Response.Builder responseBuilder = new Response.Builder(); - final TaskId parentTaskId = new TaskId(clusterService.getNodeName(), task.getId()); + final TaskId parentTaskId = new TaskId(clusterService.localNode().getId(), task.getId()); // 5. Build response ActionListener runtimeStateListener = ActionListener.wrap(runtimeStateResponse -> { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java index 66676bae07c24..3f101f57cda9a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java @@ -143,10 +143,11 @@ protected void doExecute( TrainedModelAssignment trainedModelAssignment = assignment.getModelAssignment(stats.getModelId()); if (trainedModelAssignment != null) { stats.setState(trainedModelAssignment.getAssignmentState()).setReason(trainedModelAssignment.getReason().orElse(null)); - if (trainedModelAssignment.getNodeRoutingTable() - .values() - .stream() - .allMatch(ri -> ri.getState().equals(RoutingState.FAILED))) { + if (trainedModelAssignment.getNodeRoutingTable().isEmpty() == false + && trainedModelAssignment.getNodeRoutingTable() + .values() + .stream() + .allMatch(ri -> ri.getState().equals(RoutingState.FAILED))) { stats.setState(AssignmentState.FAILED); if (stats.getReason() == null) { stats.setReason("All node routes are failed; see node route reason for details"); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobModelSnapshotsUpgradeStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobModelSnapshotsUpgradeStatsAction.java index 88ec82124bbe2..d78d4cf6f6587 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobModelSnapshotsUpgradeStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobModelSnapshotsUpgradeStatsAction.java @@ -76,7 +76,7 @@ protected void masterOperation(Task task, Request request, ClusterState state, A logger.debug(() -> format("[%s] get stats for model snapshot [%s] upgrades", request.getJobId(), request.getSnapshotId())); final PersistentTasksCustomMetadata tasksInProgress = state.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); final Collection> snapshotUpgrades = MlTasks.snapshotUpgradeTasks(tasksInProgress); - final TaskId parentTaskId = new TaskId(clusterService.getNodeName(), task.getId()); + final TaskId parentTaskId = new TaskId(clusterService.localNode().getId(), task.getId()); // 2. Now that we have the job IDs, find the relevant model snapshot upgrades ActionListener> expandIdsListener = ActionListener.wrap(jobs -> { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsAction.java index 2133177902d3a..34da64aa5a5e0 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsAction.java @@ -69,7 +69,7 @@ protected void masterOperation( ClusterState state, ActionListener listener ) { - TaskId parentTaskId = new TaskId(clusterService.getNodeName(), task.getId()); + TaskId parentTaskId = new TaskId(clusterService.localNode().getId(), task.getId()); logger.debug("Get job '{}'", request.getJobId()); jobManager.expandJobBuilders( request.getJobId(), diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsAction.java index 9ab41c7c15111..d19fa1af7ca75 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsAction.java @@ -95,7 +95,7 @@ public TransportGetJobsStatsAction( @Override protected void doExecute(Task task, GetJobsStatsAction.Request request, ActionListener finalListener) { logger.debug("Get stats for job [{}]", request.getJobId()); - TaskId parentTaskId = new TaskId(clusterService.getNodeName(), task.getId()); + TaskId parentTaskId = new TaskId(clusterService.localNode().getId(), task.getId()); ClusterState state = clusterService.state(); PersistentTasksCustomMetadata tasks = state.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); @@ -144,7 +144,7 @@ protected void taskOperation( JobTask task, ActionListener> listener ) { - TaskId parentTaskId = new TaskId(clusterService.getNodeName(), actionTask.getId()); + TaskId parentTaskId = new TaskId(clusterService.localNode().getId(), actionTask.getId()); String jobId = task.getJobId(); ClusterState state = clusterService.state(); PersistentTasksCustomMetadata tasks = state.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetModelSnapshotsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetModelSnapshotsAction.java index f5d240d6d0ef3..8018f3f24fce0 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetModelSnapshotsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetModelSnapshotsAction.java @@ -53,7 +53,7 @@ protected void doExecute( GetModelSnapshotsAction.Request request, ActionListener listener ) { - TaskId parentTaskId = new TaskId(clusterService.getNodeName(), task.getId()); + TaskId parentTaskId = new TaskId(clusterService.localNode().getId(), task.getId()); logger.debug( () -> format( "Get model snapshots for job %s snapshot ID %s. from = %s, size = %s start = '%s', end='%s', sort=%s descending=%s", diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInferTrainedModelDeploymentAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInferTrainedModelDeploymentAction.java index 47c97604b0856..8654a0807c418 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInferTrainedModelDeploymentAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInferTrainedModelDeploymentAction.java @@ -18,6 +18,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; @@ -72,7 +73,7 @@ protected void doExecute( InferTrainedModelDeploymentAction.Request request, ActionListener listener ) { - TaskId taskId = new TaskId(clusterService.getNodeName(), task.getId()); + TaskId taskId = new TaskId(clusterService.localNode().getId(), task.getId()); final String deploymentId = request.getDeploymentId(); // We need to check whether there is at least an assigned task here, otherwise we cannot redirect to the // node running the job task. @@ -143,11 +144,13 @@ protected void taskOperation( TrainedModelDeploymentTask task, ActionListener listener ) { + assert actionTask instanceof CancellableTask : "task [" + actionTask + "] not cancellable"; task.infer( request.getDocs().get(0), request.getUpdate(), request.isSkipQueue(), request.getInferenceTimeout(), + actionTask, ActionListener.wrap( pyTorchResult -> listener.onResponse(new InferTrainedModelDeploymentAction.Response(pyTorchResult)), listener::onFailure diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java index 9f9fd654f1ce3..c87c796a2f0e0 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java @@ -96,7 +96,7 @@ public TransportInternalInferModelAction( protected void doExecute(Task task, Request request, ActionListener listener) { Response.Builder responseBuilder = Response.builder(); - TaskId parentTaskId = new TaskId(clusterService.getNodeName(), task.getId()); + TaskId parentTaskId = new TaskId(clusterService.localNode().getId(), task.getId()); if (MachineLearningField.ML_API_FEATURE.check(licenseState)) { responseBuilder.setLicensed(true); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDataFrameAnalyticsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDataFrameAnalyticsAction.java index beb3f07318978..8e7583b28b740 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDataFrameAnalyticsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDataFrameAnalyticsAction.java @@ -102,7 +102,7 @@ protected void doExecute(Task task, Request request, ActionListener li } void preview(Task task, DataFrameAnalyticsConfig config, ActionListener listener) { - final TaskId parentTaskId = new TaskId(clusterService.getNodeName(), task.getId()); + final TaskId parentTaskId = new TaskId(clusterService.localNode().getId(), task.getId()); final ExtractedFieldsDetectorFactory extractedFieldsDetectorFactory = new ExtractedFieldsDetectorFactory( new ParentTaskAssigningClient(client, parentTaskId) ); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java index 121791c5cfc33..1c9c01e55cbe4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java @@ -92,7 +92,7 @@ public TransportPreviewDatafeedAction( @Override protected void doExecute(Task task, PreviewDatafeedAction.Request request, ActionListener listener) { - TaskId parentTaskId = new TaskId(clusterService.getNodeName(), task.getId()); + TaskId parentTaskId = new TaskId(clusterService.localNode().getId(), task.getId()); ActionListener datafeedConfigActionListener = ActionListener.wrap(datafeedConfig -> { if (request.getJobConfig() != null) { previewDatafeed(parentTaskId, datafeedConfig, request.getJobConfig().build(new Date()), request, listener); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java index 478cbfcedd1ad..bd8c3366fafcd 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java @@ -487,15 +487,11 @@ public boolean test(ClusterState clusterState) { .stream() .filter(d -> nodesShuttingDown.contains(d.getId()) == false) .filter(TaskParams::mayAssignToNode) - .collect(Collectors.toList()); - OptionalLong smallestMLNode = nodes.stream().map(NodeLoadDetector::getNodeSize).flatMapToLong(OptionalLong::stream).min(); + .toList(); + boolean isScalingPossible = isScalingPossible(nodes); // No nodes allocated at all! - if (nodeIdsAndRouting.isEmpty() - // We cannot scale horizontally - && maxLazyMLNodes <= nodes.size() - // We cannot scale vertically - && (smallestMLNode.isEmpty() || smallestMLNode.getAsLong() >= maxMLNodeSize)) { + if (nodeIdsAndRouting.isEmpty() && isScalingPossible == false) { String msg = "Could not start deployment because no suitable nodes were found, allocation explanation [" + trainedModelAssignment.getReason() + "]"; @@ -509,6 +505,15 @@ public boolean test(ClusterState clusterState) { return true; } + // We cannot add more nodes and the assignment is not satisfied + if (isScalingPossible == false + && trainedModelAssignment.isSatisfied(nodes.stream().map(DiscoveryNode::getId).collect(Collectors.toSet())) == false) { + String msg = "Could not start deployment because there are not enough resources to provide all requested allocations"; + logger.debug(() -> format("[%s] %s", modelId, msg)); + exception = new ElasticsearchStatusException(msg, RestStatus.TOO_MANY_REQUESTS); + return true; + } + AllocationStatus allocationStatus = trainedModelAssignment.calculateAllocationStatus().orElse(null); if (allocationStatus == null || allocationStatus.calculateState().compareTo(waitForState) >= 0) { return true; @@ -527,6 +532,16 @@ public boolean test(ClusterState clusterState) { ); return false; } + + private boolean isScalingPossible(List nodes) { + OptionalLong smallestMLNode = nodes.stream().map(NodeLoadDetector::getNodeSize).flatMapToLong(OptionalLong::stream).min(); + + // We can scale horizontally + return maxLazyMLNodes > nodes.size() + // We can scale vertically + // TODO this currently only considers memory. We should also consider CPU when autoscaling by CPU is possible. + || (smallestMLNode.isEmpty() == false && smallestMLNode.getAsLong() < maxMLNodeSize); + } } static Set nodesShuttingDown(final ClusterState state) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderService.java index 4a8d2a4a4b205..83cabd49c79c1 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderService.java @@ -32,6 +32,7 @@ import org.elasticsearch.xpack.core.ml.action.StartDataFrameAnalyticsAction; import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction.DatafeedParams; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsState; +import org.elasticsearch.xpack.core.ml.inference.assignment.AllocationStatus; import org.elasticsearch.xpack.core.ml.inference.assignment.AssignmentState; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; import org.elasticsearch.xpack.core.ml.job.config.AnalysisLimits; @@ -409,6 +410,19 @@ public AutoscalingDeciderResult scale(Settings configuration, AutoscalingDecider .filter(e -> e.getValue().getAssignmentState().equals(AssignmentState.STARTING) && e.getValue().getNodeRoutingTable().isEmpty()) .map(Map.Entry::getKey) .toList(); + // TODO for autoscaling by memory, we only care about if the model is allocated to at least one node (see above) + // We should do this check in our autoscaling by processor count service, which will be a separate decider for readability's sake + final List notFullyAllocatedModels = modelAssignments.entrySet() + .stream() + .filter( + e -> e.getValue() + .calculateAllocationStatus() + .map(AllocationStatus::calculateState) + .orElse(AllocationStatus.State.FULLY_ALLOCATED) + .equals(AllocationStatus.State.FULLY_ALLOCATED) == false + ) + .map(Map.Entry::getKey) + .toList(); final int numAnalyticsJobsInQueue = NUM_ANALYTICS_JOBS_IN_QUEUE.get(configuration); final int numAnomalyJobsInQueue = NUM_ANOMALY_JOBS_IN_QUEUE.get(configuration); @@ -543,7 +557,8 @@ public AutoscalingDeciderResult scale(Settings configuration, AutoscalingDecider if (waitingAnalyticsJobs.isEmpty() == false || waitingSnapshotUpgrades.isEmpty() == false - || waitingAnomalyJobs.isEmpty() == false) { + || waitingAnomalyJobs.isEmpty() == false + || notFullyAllocatedModels.isEmpty() == false) { // We don't want to continue to consider a scale down if there are now waiting jobs resetScaleDownCoolDown(); return new AutoscalingDeciderResult( @@ -553,11 +568,13 @@ public AutoscalingDeciderResult scale(Settings configuration, AutoscalingDecider Locale.ROOT, "Passing currently perceived capacity as there are [%d] model snapshot upgrades, " + "[%d] analytics and [%d] anomaly detection jobs in the queue, " + + "[%d] trained models not fully-allocated, " + "but the number in the queue is less than the configured maximum allowed " + "or the queued jobs will eventually be assignable at the current size.", waitingSnapshotUpgrades.size(), waitingAnalyticsJobs.size(), - waitingAnomalyJobs.size() + waitingAnomalyJobs.size(), + notFullyAllocatedModels.size() ) ).build() ); @@ -654,6 +671,11 @@ public AutoscalingDeciderResult scale(Settings configuration, AutoscalingDecider if (capacity == null) { return null; } + // TODO we should remove this when we can auto-scale (down and up) via a new CPU auto-scaling decider + if (modelAssignmentsRequireMoreThanHalfCpu(modelAssignments.values(), mlNodes)) { + logger.debug("not down-scaling; model assignments require more than half of the ML tier's allocated processors"); + return null; + } return new AutoscalingDeciderResult(capacity, result.reason()); }); if (maybeScaleDown.isPresent()) { @@ -744,6 +766,26 @@ static AutoscalingCapacity ensureScaleDown(AutoscalingCapacity scaleDownResult, return newCapacity; } + static boolean modelAssignmentsRequireMoreThanHalfCpu(Collection assignments, List mlNodes) { + int totalRequiredProcessors = assignments.stream() + .mapToInt(t -> t.getTaskParams().getNumberOfAllocations() * t.getTaskParams().getThreadsPerAllocation()) + .sum(); + int totalMlProcessors = mlNodes.stream().mapToInt(node -> { + String allocatedProcessorsString = node.getAttributes().get(MachineLearning.ALLOCATED_PROCESSORS_NODE_ATTR); + try { + return Integer.parseInt(allocatedProcessorsString); + } catch (NumberFormatException e) { + assert e == null + : MachineLearning.ALLOCATED_PROCESSORS_NODE_ATTR + + " should parse because we set it internally: invalid value was [" + + allocatedProcessorsString + + "]"; + return 0; + } + }).sum(); + return totalRequiredProcessors * 2 > totalMlProcessors; + } + // This doesn't allow any jobs to wait in the queue, this is because in a "normal" scaling event, we also verify if a job // can eventually start, and given the current cluster, no job can eventually start. AutoscalingDeciderResult scaleUpFromZero( diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java index aa8445647745e..1d48f1d1f2297 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java @@ -277,9 +277,10 @@ public void infer( Map doc, boolean skipQueue, TimeValue timeout, + Task parentActionTask, ActionListener listener ) { - deploymentManager.infer(task, config, doc, skipQueue, timeout, listener); + deploymentManager.infer(task, config, doc, skipQueue, timeout, parentActionTask, listener); } public Optional modelStats(TrainedModelDeploymentTask task) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java index 35e7f619a8e83..90dccf138fe1a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java @@ -20,6 +20,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.IdsQueryBuilder; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentFactory; @@ -237,6 +238,7 @@ public void infer( Map doc, boolean skipQueue, TimeValue timeout, + Task parentActionTask, ActionListener listener ) { var processContext = getProcessContext(task, listener::onFailure); @@ -254,6 +256,7 @@ public void infer( config, doc, threadPool, + parentActionTask, listener ); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/InferencePyTorchAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/InferencePyTorchAction.java index 71220194ba58a..720751dd617c5 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/InferencePyTorchAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/InferencePyTorchAction.java @@ -10,7 +10,11 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ml.inference.results.InferenceResults; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfig; @@ -33,6 +37,7 @@ class InferencePyTorchAction extends AbstractPyTorchAction { private final InferenceConfig config; private final Map doc; + private final Task parentActionTask; InferencePyTorchAction( String modelId, @@ -42,11 +47,25 @@ class InferencePyTorchAction extends AbstractPyTorchAction { InferenceConfig config, Map doc, ThreadPool threadPool, + @Nullable Task parentActionTask, ActionListener listener ) { super(modelId, requestId, timeout, processContext, threadPool, listener); this.config = config; this.doc = doc; + this.parentActionTask = parentActionTask; + } + + private boolean isCancelled() { + if (parentActionTask instanceof CancellableTask cancellableTask) { + try { + cancellableTask.ensureNotCancelled(); + } catch (TaskCancelledException ex) { + logger.debug(() -> format("[%s] %s", getModelId(), ex.getMessage())); + return true; + } + } + return false; } @Override @@ -56,12 +75,15 @@ protected void doRun() throws Exception { logger.debug(() -> format("[%s] skipping inference on request [%s] as it has timed out", getModelId(), getRequestId())); return; } + if (isCancelled()) { + onFailure("inference task cancelled"); + return; + } final String requestIdStr = String.valueOf(getRequestId()); try { // The request builder expect a list of inputs which are then batched. - // TODO batching was implemented for expected use-cases such as zero-shot - // classification but is not used here. + // TODO batching was implemented for expected use-cases such as zero-shot classification but is not used here. List text = Collections.singletonList(NlpTask.extractInput(getProcessContext().getModelInput().get(), doc)); NlpTask.Processor processor = getProcessContext().getNlpTaskProcessor().get(); processor.validateInputs(text); @@ -74,6 +96,11 @@ protected void doRun() throws Exception { logger.debug("[{}] [{}] input truncated", getModelId(), getRequestId()); } + // Tokenization is non-trivial, so check for cancellation one last time before sending request to the native process + if (isCancelled()) { + onFailure("inference task cancelled"); + return; + } getProcessContext().getResultProcessor() .registerRequest( requestIdStr, @@ -109,6 +136,10 @@ private void processResult( ); return; } + if (isCancelled()) { + onFailure("inference task cancelled"); + return; + } InferenceResults results = inferenceResultsProcessor.processResult(tokenization, pyTorchResult.inferenceResult()); logger.debug(() -> format("[%s] processed result for request [%s]", getModelId(), getRequestId())); onSuccess(results); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/TrainedModelDeploymentTask.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/TrainedModelDeploymentTask.java index 72e706ca595c6..caef67ddab889 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/TrainedModelDeploymentTask.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/TrainedModelDeploymentTask.java @@ -18,6 +18,7 @@ import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction; @@ -132,6 +133,7 @@ public void infer( InferenceConfigUpdate update, boolean skipQueue, TimeValue timeout, + Task parentActionTask, ActionListener listener ) { if (inferenceConfigHolder.get() == null) { @@ -150,7 +152,15 @@ public void infer( ); return; } - trainedModelAssignmentNodeService.infer(this, update.apply(inferenceConfigHolder.get()), doc, skipQueue, timeout, listener); + trainedModelAssignmentNodeService.infer( + this, + update.apply(inferenceConfigHolder.get()), + doc, + skipQueue, + timeout, + parentActionTask, + listener + ); } public Optional modelStats() { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/categorization/InternalCategorizationAggregationTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/categorization/InternalCategorizationAggregationTests.java index add1638a58b86..8d2414a5c15ad 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/categorization/InternalCategorizationAggregationTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/categorization/InternalCategorizationAggregationTests.java @@ -64,6 +64,12 @@ protected List getNamedXContents() { ); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/87240") + public void testReduceRandom() { + // The bug is in the assertReduced() method immediately below that the base class testReduceRandom() calls. + // To unmute after the bug is fixed, simply delete this entire method so that the base class method is used again. + } + @Override protected void assertReduced(InternalCategorizationAggregation reduced, List inputs) { Map reducedCounts = toCounts(reduced.getBuckets().stream()); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderServiceTests.java index 7602ca69307cf..4b03d38b01419 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderServiceTests.java @@ -31,8 +31,10 @@ import org.elasticsearch.xpack.core.ml.action.OpenJobAction; import org.elasticsearch.xpack.core.ml.action.StartDataFrameAnalyticsAction; import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; +import org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsState; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsTaskState; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; @@ -123,6 +125,7 @@ private static long mlOnlyNodeJvmBytes(long systemMemoryBytes) { private static final long TEST_NODE_SIZE = ByteSizeValue.ofGb(20).getBytes(); private static final long ML_MEMORY_FOR_TEST_NODE_SIZE = NativeMemoryCalculator.allowedBytesForMl(TEST_NODE_SIZE, 0, true); private static final long TEST_JVM_SIZE = mlOnlyNodeJvmBytes(TEST_NODE_SIZE); + private static final int TEST_ALLOCATED_PROCESSORS = 2; private static final long TEST_JOB_SIZE = ByteSizeValue.ofMb(200).getBytes(); private static final long PER_NODE_OVERHEAD = MachineLearning.NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes(); @@ -1182,6 +1185,48 @@ public void testScaleDown() { } } + public void testCpuModelAssignmentRequirements() { + assertTrue( + MlAutoscalingDeciderService.modelAssignmentsRequireMoreThanHalfCpu( + List.of( + TrainedModelAssignment.Builder.empty( + new StartTrainedModelDeploymentAction.TaskParams("model1", TEST_JOB_SIZE, 3, 2, 100, null) + ).build(), + TrainedModelAssignment.Builder.empty( + new StartTrainedModelDeploymentAction.TaskParams("model1", TEST_JOB_SIZE, 1, 1, 100, null) + ).build() + ), + withMlNodes("ml_node_1", "ml_node_2") + ) + ); + assertTrue( + MlAutoscalingDeciderService.modelAssignmentsRequireMoreThanHalfCpu( + List.of( + TrainedModelAssignment.Builder.empty( + new StartTrainedModelDeploymentAction.TaskParams("model1", TEST_JOB_SIZE, 3, 1, 100, null) + ).build(), + TrainedModelAssignment.Builder.empty( + new StartTrainedModelDeploymentAction.TaskParams("model1", TEST_JOB_SIZE, 1, 1, 100, null) + ).build() + ), + withMlNodes("ml_node_1", "ml_node_2") + ) + ); + assertFalse( + MlAutoscalingDeciderService.modelAssignmentsRequireMoreThanHalfCpu( + List.of( + TrainedModelAssignment.Builder.empty( + new StartTrainedModelDeploymentAction.TaskParams("model1", TEST_JOB_SIZE, 3, 1, 100, null) + ).build(), + TrainedModelAssignment.Builder.empty( + new StartTrainedModelDeploymentAction.TaskParams("model1", TEST_JOB_SIZE, 1, 1, 100, null) + ).build() + ), + withMlNodes("ml_node_1", "ml_node_2", "ml_node_3", "ml_node_4") + ) + ); + } + public void testEnsureScaleDown() { assertThat( MlAutoscalingDeciderService.ensureScaleDown( @@ -1394,7 +1439,9 @@ private static List withMlNodes(String... nodeName) { MachineLearning.MACHINE_MEMORY_NODE_ATTR, String.valueOf(TEST_NODE_SIZE), MachineLearning.MAX_JVM_SIZE_NODE_ATTR, - String.valueOf(TEST_JVM_SIZE) + String.valueOf(TEST_JVM_SIZE), + MachineLearning.ALLOCATED_PROCESSORS_NODE_ATTR, + String.valueOf(TEST_ALLOCATED_PROCESSORS) ), Set.of(DiscoveryNodeRole.ML_ROLE), Version.CURRENT diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManagerTests.java index 10b2813603d59..4350428b221a2 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManagerTests.java @@ -101,6 +101,7 @@ public void testRejectedExecution() { Map.of(), false, TimeValue.timeValueMinutes(1), + null, ActionListener.wrap(result -> fail("unexpected success"), e -> assertThat(e, instanceOf(EsRejectedExecutionException.class))) ); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/deployment/InferencePyTorchActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/deployment/InferencePyTorchActionTests.java index 4a3a23a6622a2..4590aeb2a8888 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/deployment/InferencePyTorchActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/deployment/InferencePyTorchActionTests.java @@ -8,7 +8,13 @@ package org.elasticsearch.xpack.ml.inference.deployment; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskAwareRequest; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ScalingExecutorBuilder; import org.elasticsearch.threadpool.TestThreadPool; @@ -21,6 +27,7 @@ import org.junit.Before; import java.util.Map; +import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import static org.elasticsearch.xpack.ml.MachineLearning.UTILITY_THREAD_POOL_NAME; @@ -64,7 +71,7 @@ public void testInferListenerOnlyCalledOnce() { AtomicInteger timeoutCount = new AtomicInteger(); when(processContext.getTimeoutCount()).thenReturn(timeoutCount); - ListenerCounter listener = new ListenerCounter(); + TestListenerCounter listener = new TestListenerCounter(); InferencePyTorchAction action = new InferencePyTorchAction( "test-model", 1, @@ -73,6 +80,7 @@ public void testInferListenerOnlyCalledOnce() { new PassThroughConfig(null, null, null), Map.of(), tp, + null, listener ); action.init(); @@ -93,6 +101,7 @@ public void testInferListenerOnlyCalledOnce() { new PassThroughConfig(null, null, null), Map.of(), tp, + null, listener ); action.init(); @@ -114,6 +123,7 @@ public void testInferListenerOnlyCalledOnce() { new PassThroughConfig(null, null, null), Map.of(), tp, + null, listener ); action.init(); @@ -134,7 +144,7 @@ public void testRunNotCalledAfterNotified() { AtomicInteger timeoutCount = new AtomicInteger(); when(processContext.getTimeoutCount()).thenReturn(timeoutCount); - ListenerCounter listener = new ListenerCounter(); + TestListenerCounter listener = new TestListenerCounter(); { InferencePyTorchAction action = new InferencePyTorchAction( "test-model", @@ -144,6 +154,7 @@ public void testRunNotCalledAfterNotified() { new PassThroughConfig(null, null, null), Map.of(), tp, + null, listener ); action.init(); @@ -161,6 +172,7 @@ public void testRunNotCalledAfterNotified() { new PassThroughConfig(null, null, null), Map.of(), tp, + null, listener ); action.init(); @@ -170,7 +182,49 @@ public void testRunNotCalledAfterNotified() { } } - static class ListenerCounter implements ActionListener { + public void testCallingRunAfterParentTaskCancellation() throws Exception { + DeploymentManager.ProcessContext processContext = mock(DeploymentManager.ProcessContext.class); + PyTorchResultProcessor resultProcessor = mock(PyTorchResultProcessor.class); + when(processContext.getResultProcessor()).thenReturn(resultProcessor); + AtomicInteger timeoutCount = new AtomicInteger(); + when(processContext.getTimeoutCount()).thenReturn(timeoutCount); + TaskManager taskManager = new TaskManager(Settings.EMPTY, tp, Set.of()); + TestListenerCounter listener = new TestListenerCounter(); + CancellableTask cancellableTask = (CancellableTask) taskManager.register("test_task", "testAction", new TaskAwareRequest() { + @Override + public void setParentTask(TaskId taskId) {} + + @Override + public TaskId getParentTask() { + return TaskId.EMPTY_TASK_ID; + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new CancellableTask(id, type, action, getDescription(), parentTaskId, headers); + } + }); + InferencePyTorchAction action = new InferencePyTorchAction( + "test-model", + 1, + TimeValue.MAX_VALUE, + processContext, + new PassThroughConfig(null, null, null), + Map.of(), + tp, + cancellableTask, + listener + ); + action.init(); + taskManager.cancel(cancellableTask, "test", () -> {}); + + action.doRun(); + assertThat(listener.failureCounts, equalTo(1)); + assertThat(listener.responseCounts, equalTo(0)); + verify(resultProcessor, never()).registerRequest(anyString(), any()); + } + + static class TestListenerCounter implements ActionListener { private int responseCounts; private int failureCounts; diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java index 0a7566746607c..52087742d92d4 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java @@ -1550,9 +1550,13 @@ public void testUpdateApiKeyAutoUpdatesUserFields() throws IOException, Executio final String apiKeyId = createdApiKey.getId(); expectRoleDescriptorsForApiKey("limited_by_role_descriptors", Set.of(roleDescriptorBeforeUpdate), getApiKeyDocument(apiKeyId)); - final List newClusterPrivileges = new ArrayList<>(randomSubsetOf(ClusterPrivilegeResolver.names())); - // At a minimum include privilege to manage own API key to ensure no 403 - newClusterPrivileges.add(randomFrom("manage_api_key", "manage_own_api_key")); + final List newClusterPrivileges = randomValueOtherThan(clusterPrivileges, () -> { + final List privs = new ArrayList<>(randomSubsetOf(ClusterPrivilegeResolver.names())); + // At a minimum include privilege to manage own API key to ensure no 403 + privs.add(randomFrom("manage_api_key", "manage_own_api_key")); + return privs; + }); + // Update user role final RoleDescriptor roleDescriptorAfterUpdate = putRoleWithClusterPrivileges( nativeRealmRole, diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportCreateApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportCreateApiKeyAction.java index 6cf55d2c27c33..c9656d68e0dc9 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportCreateApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportCreateApiKeyAction.java @@ -21,7 +21,7 @@ import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.security.authc.ApiKeyService; -import org.elasticsearch.xpack.security.authc.support.ApiKeyGenerator; +import org.elasticsearch.xpack.security.authc.support.ApiKeyUserRoleDescriptorResolver; import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore; /** @@ -29,7 +29,8 @@ */ public final class TransportCreateApiKeyAction extends HandledTransportAction { - private final ApiKeyGenerator generator; + private final ApiKeyService apiKeyService; + private final ApiKeyUserRoleDescriptorResolver resolver; private final SecurityContext securityContext; @Inject @@ -42,7 +43,8 @@ public TransportCreateApiKeyAction( NamedXContentRegistry xContentRegistry ) { super(CreateApiKeyAction.NAME, transportService, actionFilters, CreateApiKeyRequest::new); - this.generator = new ApiKeyGenerator(apiKeyService, rolesStore, xContentRegistry); + this.apiKeyService = apiKeyService; + this.resolver = new ApiKeyUserRoleDescriptorResolver(rolesStore, xContentRegistry); this.securityContext = context; } @@ -60,7 +62,13 @@ protected void doExecute(Task task, CreateApiKeyRequest request, ActionListener< ); return; } - generator.generateApiKey(authentication, request, listener); + resolver.resolveUserRoleDescriptors( + authentication, + ActionListener.wrap( + roleDescriptors -> apiKeyService.createApiKey(authentication, request, roleDescriptors, listener), + listener::onFailure + ) + ); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportGrantApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportGrantApiKeyAction.java index f32d92e67e631..583f1a79efdf0 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportGrantApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportGrantApiKeyAction.java @@ -21,7 +21,7 @@ import org.elasticsearch.xpack.security.action.TransportGrantAction; import org.elasticsearch.xpack.security.authc.ApiKeyService; import org.elasticsearch.xpack.security.authc.AuthenticationService; -import org.elasticsearch.xpack.security.authc.support.ApiKeyGenerator; +import org.elasticsearch.xpack.security.authc.support.ApiKeyUserRoleDescriptorResolver; import org.elasticsearch.xpack.security.authz.AuthorizationService; import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore; @@ -30,16 +30,17 @@ */ public final class TransportGrantApiKeyAction extends TransportGrantAction { - private final ApiKeyGenerator generator; + private final ApiKeyService apiKeyService; + private final ApiKeyUserRoleDescriptorResolver resolver; @Inject public TransportGrantApiKeyAction( TransportService transportService, ActionFilters actionFilters, ThreadPool threadPool, - ApiKeyService apiKeyService, AuthenticationService authenticationService, AuthorizationService authorizationService, + ApiKeyService apiKeyService, CompositeRolesStore rolesStore, NamedXContentRegistry xContentRegistry ) { @@ -47,20 +48,21 @@ public TransportGrantApiKeyAction( transportService, actionFilters, threadPool.getThreadContext(), - new ApiKeyGenerator(apiKeyService, rolesStore, xContentRegistry), authenticationService, - authorizationService + authorizationService, + apiKeyService, + new ApiKeyUserRoleDescriptorResolver(rolesStore, xContentRegistry) ); } - // Constructor for testing TransportGrantApiKeyAction( TransportService transportService, ActionFilters actionFilters, ThreadContext threadContext, - ApiKeyGenerator generator, AuthenticationService authenticationService, - AuthorizationService authorizationService + AuthorizationService authorizationService, + ApiKeyService apiKeyService, + ApiKeyUserRoleDescriptorResolver resolver ) { super( GrantApiKeyAction.NAME, @@ -71,14 +73,28 @@ public TransportGrantApiKeyAction( authorizationService, threadContext ); - this.generator = generator; + this.apiKeyService = apiKeyService; + this.resolver = resolver; } @Override protected void doExecute(Task task, GrantApiKeyRequest request, ActionListener listener) { executeWithGrantAuthentication( request, - listener.delegateFailure((l, authentication) -> generator.generateApiKey(authentication, request.getApiKeyRequest(), listener)) + listener.delegateFailure( + (l, authentication) -> resolver.resolveUserRoleDescriptors( + authentication, + ActionListener.wrap( + roleDescriptors -> apiKeyService.createApiKey( + authentication, + request.getApiKeyRequest(), + roleDescriptors, + listener + ), + listener::onFailure + ) + ) + ) ); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportUpdateApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportUpdateApiKeyAction.java index d90abdea65284..6b28f7d601420 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportUpdateApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportUpdateApiKeyAction.java @@ -19,14 +19,14 @@ import org.elasticsearch.xpack.core.security.action.apikey.UpdateApiKeyRequest; import org.elasticsearch.xpack.core.security.action.apikey.UpdateApiKeyResponse; import org.elasticsearch.xpack.security.authc.ApiKeyService; -import org.elasticsearch.xpack.security.authc.support.ApiKeyGenerator; +import org.elasticsearch.xpack.security.authc.support.ApiKeyUserRoleDescriptorResolver; import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore; public final class TransportUpdateApiKeyAction extends HandledTransportAction { - private final ApiKeyService apiKeyService; private final SecurityContext securityContext; - private final ApiKeyGenerator apiKeyGenerator; + private final ApiKeyService apiKeyService; + private final ApiKeyUserRoleDescriptorResolver resolver; @Inject public TransportUpdateApiKeyAction( @@ -38,9 +38,9 @@ public TransportUpdateApiKeyAction( final NamedXContentRegistry xContentRegistry ) { super(UpdateApiKeyAction.NAME, transportService, actionFilters, UpdateApiKeyRequest::new); - this.apiKeyService = apiKeyService; this.securityContext = context; - this.apiKeyGenerator = new ApiKeyGenerator(apiKeyService, rolesStore, xContentRegistry); + this.apiKeyService = apiKeyService; + this.resolver = new ApiKeyUserRoleDescriptorResolver(rolesStore, xContentRegistry); } @Override @@ -56,9 +56,7 @@ protected void doExecute(Task task, UpdateApiKeyRequest request, ActionListener< return; } - // TODO generalize `ApiKeyGenerator` to handle updates - apiKeyService.ensureEnabled(); - apiKeyGenerator.getUserRoleDescriptors( + resolver.resolveUserRoleDescriptors( authentication, ActionListener.wrap( roleDescriptors -> apiKeyService.updateApiKey(authentication, request, roleDescriptors, listener), diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/ApiKeyGenerator.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/ApiKeyUserRoleDescriptorResolver.java similarity index 63% rename from x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/ApiKeyGenerator.java rename to x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/ApiKeyUserRoleDescriptorResolver.java index 5b1be869b0570..9eb82bc97270a 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/ApiKeyGenerator.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/ApiKeyUserRoleDescriptorResolver.java @@ -8,49 +8,26 @@ package org.elasticsearch.xpack.security.authc.support; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.xcontent.NamedXContentRegistry; -import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequest; -import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyResponse; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.Subject; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.support.DLSRoleQueryValidator; -import org.elasticsearch.xpack.security.authc.ApiKeyService; import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore; import java.util.Set; -public class ApiKeyGenerator { - - private final ApiKeyService apiKeyService; +public class ApiKeyUserRoleDescriptorResolver { private final CompositeRolesStore rolesStore; private final NamedXContentRegistry xContentRegistry; - public ApiKeyGenerator(ApiKeyService apiKeyService, CompositeRolesStore rolesStore, NamedXContentRegistry xContentRegistry) { - this.apiKeyService = apiKeyService; + public ApiKeyUserRoleDescriptorResolver(CompositeRolesStore rolesStore, NamedXContentRegistry xContentRegistry) { this.rolesStore = rolesStore; this.xContentRegistry = xContentRegistry; } - public void generateApiKey(Authentication authentication, CreateApiKeyRequest request, ActionListener listener) { - if (authentication == null) { - listener.onFailure(new ElasticsearchSecurityException("no authentication available to generate API key")); - return; - } - apiKeyService.ensureEnabled(); - - getUserRoleDescriptors( - authentication, - ActionListener.wrap( - roleDescriptors -> apiKeyService.createApiKey(authentication, request, roleDescriptors, listener), - listener::onFailure - ) - ); - } - - public void getUserRoleDescriptors(Authentication authentication, ActionListener> listener) { + public void resolveUserRoleDescriptors(final Authentication authentication, final ActionListener> listener) { final ActionListener> roleDescriptorsListener = ActionListener.wrap(roleDescriptors -> { for (RoleDescriptor rd : roleDescriptors) { try { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/ApiKeyBaseRestHandler.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/ApiKeyBaseRestHandler.java new file mode 100644 index 0000000000000..7c48aa63829a5 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/ApiKeyBaseRestHandler.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.rest.action.apikey; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; +import org.elasticsearch.xpack.security.support.FeatureNotEnabledException; + +abstract class ApiKeyBaseRestHandler extends SecurityBaseRestHandler { + ApiKeyBaseRestHandler(Settings settings, XPackLicenseState licenseState) { + super(settings, licenseState); + } + + @Override + protected Exception checkFeatureAvailable(RestRequest request) { + final Exception failedFeature = super.checkFeatureAvailable(request); + if (failedFeature != null) { + return failedFeature; + } else if (XPackSettings.API_KEY_SERVICE_ENABLED_SETTING.get(settings) == false) { + return new FeatureNotEnabledException(FeatureNotEnabledException.Feature.API_KEY_SERVICE, "api keys are not enabled"); + } else { + return null; + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyAction.java index 827148e038eb7..9d38b504fe2fa 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyAction.java @@ -15,7 +15,6 @@ import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequest; import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequestBuilder; -import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; import java.io.IOException; import java.util.List; @@ -26,7 +25,7 @@ /** * Rest action to create an API key */ -public final class RestCreateApiKeyAction extends SecurityBaseRestHandler { +public final class RestCreateApiKeyAction extends ApiKeyBaseRestHandler { /** * @param settings the node's settings diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyAction.java index ea4a37d58e5e2..f12488a7c9f10 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyAction.java @@ -19,7 +19,6 @@ import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyRequest; import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyResponse; -import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; import java.io.IOException; import java.util.List; @@ -29,7 +28,7 @@ /** * Rest action to get one or more API keys information. */ -public final class RestGetApiKeyAction extends SecurityBaseRestHandler { +public final class RestGetApiKeyAction extends ApiKeyBaseRestHandler { public RestGetApiKeyAction(Settings settings, XPackLicenseState licenseState) { super(settings, licenseState); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGrantApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGrantApiKeyAction.java index e4a4753234a05..b45b3468cc92a 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGrantApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGrantApiKeyAction.java @@ -25,7 +25,6 @@ import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyResponse; import org.elasticsearch.xpack.core.security.action.apikey.GrantApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.GrantApiKeyRequest; -import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; import java.io.IOException; import java.util.Arrays; @@ -39,7 +38,7 @@ * Rest action to create an API key on behalf of another user. Loosely mimics the API of * {@link org.elasticsearch.xpack.security.rest.action.oauth2.RestGetTokenAction} combined with {@link RestCreateApiKeyAction} */ -public final class RestGrantApiKeyAction extends SecurityBaseRestHandler implements RestRequestFilter { +public final class RestGrantApiKeyAction extends ApiKeyBaseRestHandler implements RestRequestFilter { static final ObjectParser PARSER = new ObjectParser<>("grant_api_key_request", GrantApiKeyRequest::new); static { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyAction.java index 1af42b0c1a100..14fb04eea5680 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyAction.java @@ -23,7 +23,6 @@ import org.elasticsearch.xpack.core.security.action.apikey.InvalidateApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.InvalidateApiKeyRequest; import org.elasticsearch.xpack.core.security.action.apikey.InvalidateApiKeyResponse; -import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; import java.io.IOException; import java.util.List; @@ -33,7 +32,7 @@ /** * Rest action to invalidate one or more API keys */ -public final class RestInvalidateApiKeyAction extends SecurityBaseRestHandler { +public final class RestInvalidateApiKeyAction extends ApiKeyBaseRestHandler { @SuppressWarnings("unchecked") static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "invalidate_api_key", diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestQueryApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestQueryApiKeyAction.java index 84dc6fcde1047..7d1bd96d550c2 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestQueryApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestQueryApiKeyAction.java @@ -22,7 +22,6 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.security.action.apikey.QueryApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.QueryApiKeyRequest; -import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; import java.io.IOException; import java.util.List; @@ -35,7 +34,7 @@ /** * Rest action to search for API keys */ -public final class RestQueryApiKeyAction extends SecurityBaseRestHandler { +public final class RestQueryApiKeyAction extends ApiKeyBaseRestHandler { @SuppressWarnings("unchecked") private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateApiKeyAction.java index 7fae6fdf76511..500e7c5056eb3 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateApiKeyAction.java @@ -17,7 +17,6 @@ import org.elasticsearch.xpack.core.security.action.apikey.UpdateApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.UpdateApiKeyRequest; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; -import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; import java.io.IOException; import java.util.List; @@ -26,7 +25,7 @@ import static org.elasticsearch.rest.RestRequest.Method.PUT; import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; -public final class RestUpdateApiKeyAction extends SecurityBaseRestHandler { +public final class RestUpdateApiKeyAction extends ApiKeyBaseRestHandler { @SuppressWarnings("unchecked") static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/apikey/TransportGrantApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/apikey/TransportGrantApiKeyActionTests.java index 7e2b410c90a9d..abb1584bbca31 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/apikey/TransportGrantApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/apikey/TransportGrantApiKeyActionTests.java @@ -31,14 +31,17 @@ import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; import org.elasticsearch.xpack.core.security.authc.support.BearerToken; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.authc.ApiKeyService; import org.elasticsearch.xpack.security.authc.AuthenticationService; -import org.elasticsearch.xpack.security.authc.support.ApiKeyGenerator; +import org.elasticsearch.xpack.security.authc.support.ApiKeyUserRoleDescriptorResolver; import org.elasticsearch.xpack.security.authz.AuthorizationService; import org.junit.After; import org.junit.Before; import java.util.List; +import java.util.Set; import static org.elasticsearch.test.ActionListenerUtils.anyActionListener; import static org.elasticsearch.test.TestMatchers.throwableWithMessage; @@ -62,14 +65,16 @@ public class TransportGrantApiKeyActionTests extends ESTestCase { private TransportGrantApiKeyAction action; - private ApiKeyGenerator apiKeyGenerator; + private ApiKeyService apiKeyService; + private ApiKeyUserRoleDescriptorResolver resolver; private AuthenticationService authenticationService; private ThreadPool threadPool; private AuthorizationService authorizationService; @Before public void setupMocks() throws Exception { - apiKeyGenerator = mock(ApiKeyGenerator.class); + apiKeyService = mock(ApiKeyService.class); + resolver = mock(ApiKeyUserRoleDescriptorResolver.class); authenticationService = mock(AuthenticationService.class); authorizationService = mock(AuthorizationService.class); @@ -80,9 +85,10 @@ public void setupMocks() throws Exception { mock(TransportService.class), mock(ActionFilters.class), threadContext, - apiKeyGenerator, authenticationService, - authorizationService + authorizationService, + apiKeyService, + resolver ); } @@ -91,7 +97,7 @@ public void cleanup() { threadPool.shutdown(); } - public void testGrantApiKeyWithUsernamePassword() throws Exception { + public void testGrantApiKeyWithUsernamePassword() { final String username = randomAlphaOfLengthBetween(4, 12); final SecureString password = new SecureString(randomAlphaOfLengthBetween(8, 24).toCharArray()); final Authentication authentication = buildAuthentication(username); @@ -123,7 +129,7 @@ public void testGrantApiKeyWithUsernamePassword() throws Exception { }).when(authenticationService) .authenticate(eq(GrantApiKeyAction.NAME), same(request), any(UsernamePasswordToken.class), anyActionListener()); - setupApiKeyGenerator(authentication, request, response); + setupApiKeyServiceWithRoleResolution(authentication, request, response); final PlainActionFuture future = new PlainActionFuture<>(); action.doExecute(null, request, future); @@ -132,7 +138,7 @@ public void testGrantApiKeyWithUsernamePassword() throws Exception { verify(authorizationService, never()).authorize(any(), any(), any(), anyActionListener()); } - public void testGrantApiKeyWithAccessToken() throws Exception { + public void testGrantApiKeyWithAccessToken() { final String username = randomAlphaOfLengthBetween(4, 12); final Authentication authentication = buildAuthentication(username); @@ -160,7 +166,7 @@ public void testGrantApiKeyWithAccessToken() throws Exception { return null; }).when(authenticationService).authenticate(eq(GrantApiKeyAction.NAME), same(request), any(BearerToken.class), anyActionListener()); - setupApiKeyGenerator(authentication, request, response); + setupApiKeyServiceWithRoleResolution(authentication, request, response); final PlainActionFuture future = new PlainActionFuture<>(); action.doExecute(null, request, future); @@ -214,7 +220,7 @@ public void testGrantApiKeyWithInvalidatedCredentials() { }).when(authenticationService) .authenticate(eq(GrantApiKeyAction.NAME), same(request), any(AuthenticationToken.class), anyActionListener()); - setupApiKeyGenerator(authentication, request, response); + setupApiKeyServiceWithRoleResolution(authentication, request, response); final PlainActionFuture future = new PlainActionFuture<>(); action.doExecute(null, request, future); @@ -222,7 +228,8 @@ public void testGrantApiKeyWithInvalidatedCredentials() { final ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, future::actionGet); assertThat(exception, throwableWithMessage("authentication failed for testing")); - verifyNoMoreInteractions(apiKeyGenerator); + verifyNoMoreInteractions(apiKeyService); + verifyNoMoreInteractions(resolver); verify(authorizationService, never()).authorize(any(), any(), any(), anyActionListener()); } @@ -251,7 +258,7 @@ public void testGrantWithRunAs() { .build(); final CreateApiKeyResponse response = mockResponse(request); - setupApiKeyGenerator(authentication, request, response); + setupApiKeyServiceWithRoleResolution(authentication, request, response); doAnswer(inv -> { assertThat(threadPool.getThreadContext().getHeader(AuthenticationServiceField.RUN_AS_USER_HEADER), equalTo(runAsUsername)); @@ -397,20 +404,37 @@ private GrantApiKeyRequest mockRequest() { return request; } - private void setupApiKeyGenerator(Authentication authentication, GrantApiKeyRequest request, CreateApiKeyResponse response) { + private void setupApiKeyServiceWithRoleResolution( + Authentication authentication, + GrantApiKeyRequest request, + CreateApiKeyResponse response + ) { + final Set roleDescriptors = Set.of(); doAnswer(inv -> { final Object[] args = inv.getArguments(); - assertThat(args, arrayWithSize(3)); + assertThat(args, arrayWithSize(4)); assertThat(args[0], equalTo(authentication)); assertThat(args[1], sameInstance(request.getApiKeyRequest())); + assertThat(args[2], sameInstance(roleDescriptors)); @SuppressWarnings("unchecked") ActionListener listener = (ActionListener) args[args.length - 1]; listener.onResponse(response); return null; - }).when(apiKeyGenerator).generateApiKey(any(Authentication.class), any(CreateApiKeyRequest.class), anyActionListener()); - } + }).when(apiKeyService).createApiKey(any(Authentication.class), any(CreateApiKeyRequest.class), any(), anyActionListener()); + + doAnswer(inv -> { + final Object[] args = inv.getArguments(); + assertThat(args, arrayWithSize(2)); + assertThat(args[0], equalTo(authentication)); + + @SuppressWarnings("unchecked") + ActionListener> listener = (ActionListener>) args[args.length - 1]; + listener.onResponse(roleDescriptors); + return null; + }).when(resolver).resolveUserRoleDescriptors(any(Authentication.class), anyActionListener()); + } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/ApiKeyGeneratorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/ApiKeyUserRoleDescriptorResolverTests.java similarity index 59% rename from x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/ApiKeyGeneratorTests.java rename to x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/ApiKeyUserRoleDescriptorResolverTests.java index 2f900dc3e6212..2af18f5dca7dc 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/ApiKeyGeneratorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/ApiKeyUserRoleDescriptorResolverTests.java @@ -9,18 +9,14 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.NamedXContentRegistry; -import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequest; -import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyResponse; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.AuthenticationTestHelper; import org.elasticsearch.xpack.core.security.authc.Subject; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.user.User; -import org.elasticsearch.xpack.security.authc.ApiKeyService; import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore; import java.util.Collection; @@ -31,27 +27,23 @@ import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.sameInstance; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anySet; -import static org.mockito.ArgumentMatchers.same; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; -public class ApiKeyGeneratorTests extends ESTestCase { +public class ApiKeyUserRoleDescriptorResolverTests extends ESTestCase { @SuppressWarnings("unchecked") - public void testGenerateApiKeySuccessfully() { - final ApiKeyService apiKeyService = mock(ApiKeyService.class); + public void testGetRoleDescriptors() { final CompositeRolesStore rolesStore = mock(CompositeRolesStore.class); - final ApiKeyGenerator generator = new ApiKeyGenerator(apiKeyService, rolesStore, NamedXContentRegistry.EMPTY); + final ApiKeyUserRoleDescriptorResolver resolver = new ApiKeyUserRoleDescriptorResolver(rolesStore, NamedXContentRegistry.EMPTY); final Set userRoleNames = Sets.newHashSet(randomArray(1, 4, String[]::new, () -> randomAlphaOfLengthBetween(3, 12))); final Authentication authentication = AuthenticationTestHelper.builder() .user(new User("test", userRoleNames.toArray(String[]::new))) .realmRef(new Authentication.RealmRef("realm-name", "realm-type", "node-name")) .build(false); - final CreateApiKeyRequest request = new CreateApiKeyRequest("name", null, null); - final Set roleDescriptors = randomSubsetOf(userRoleNames).stream() .map(name -> new RoleDescriptor(name, generateRandomStringArray(3, 6, false), null, null)) .collect(Collectors.toUnmodifiableSet()); @@ -70,30 +62,21 @@ public void testGenerateApiKeySuccessfully() { return null; }).when(rolesStore).getRoleDescriptorsList(any(Subject.class), any(ActionListener.class)); - CreateApiKeyResponse response = new CreateApiKeyResponse( - "name", - randomAlphaOfLength(18), - new SecureString(randomAlphaOfLength(24).toCharArray()), - null - ); - doAnswer(inv -> { - final Object[] args = inv.getArguments(); - assertThat(args, arrayWithSize(4)); - - assertThat(args[0], sameInstance(authentication)); - assertThat(args[1], sameInstance(request)); - assertThat(args[2], sameInstance(roleDescriptors)); + final PlainActionFuture> future = new PlainActionFuture<>(); + resolver.resolveUserRoleDescriptors(authentication, future); - ActionListener listener = (ActionListener) args[args.length - 1]; - listener.onResponse(response); + assertThat(future.actionGet(), equalTo(roleDescriptors)); + } - return null; - }).when(apiKeyService).createApiKey(same(authentication), same(request), anySet(), any(ActionListener.class)); + public void testGetRoleDescriptorsEmptyForApiKey() { + final CompositeRolesStore rolesStore = mock(CompositeRolesStore.class); + final Authentication authentication = AuthenticationTestHelper.builder().apiKey().build(false); - final PlainActionFuture future = new PlainActionFuture<>(); - generator.generateApiKey(authentication, request, future); + final ApiKeyUserRoleDescriptorResolver resolver = new ApiKeyUserRoleDescriptorResolver(rolesStore, NamedXContentRegistry.EMPTY); + final PlainActionFuture> future = new PlainActionFuture<>(); + resolver.resolveUserRoleDescriptors(authentication, future); - assertThat(future.actionGet(), sameInstance(response)); + assertThat(future.actionGet(), equalTo(Set.of())); + verify(rolesStore, never()).getRoleDescriptorsList(any(), any()); } - } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/ApiKeyBaseRestHandlerTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/ApiKeyBaseRestHandlerTests.java new file mode 100644 index 0000000000000..15dd1b230a51b --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/ApiKeyBaseRestHandlerTests.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.rest.action.apikey; + +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.client.NoOpNodeClient; +import org.elasticsearch.test.rest.FakeRestChannel; +import org.elasticsearch.test.rest.FakeRestRequest; +import org.elasticsearch.xpack.core.XPackSettings; + +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.mockito.Mockito.mock; + +public class ApiKeyBaseRestHandlerTests extends ESTestCase { + + public void testCheckFeatureAvailableChecksSettings() throws Exception { + final boolean securityEnabled = randomBoolean(); + final boolean serviceEnabled = randomBoolean(); + final boolean requiredSettingsEnabled = securityEnabled && serviceEnabled; + final var settings = Settings.builder() + .put(XPackSettings.SECURITY_ENABLED.getKey(), securityEnabled) + .put(XPackSettings.API_KEY_SERVICE_ENABLED_SETTING.getKey(), serviceEnabled) + .build(); + final var consumerCalled = new AtomicBoolean(false); + final var handler = new ApiKeyBaseRestHandler(settings, mock(XPackLicenseState.class)) { + + @Override + public String getName() { + return "test_xpack_security_api_key_base_action"; + } + + @Override + public List routes() { + return Collections.emptyList(); + } + + @Override + protected RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) { + return channel -> { + if (consumerCalled.compareAndSet(false, true) == false) { + fail("consumerCalled was not false"); + } + }; + } + }; + final var fakeRestRequest = new FakeRestRequest(); + final var fakeRestChannel = new FakeRestChannel(fakeRestRequest, randomBoolean(), requiredSettingsEnabled ? 0 : 1); + + try (NodeClient client = new NoOpNodeClient(this.getTestName())) { + assertFalse(consumerCalled.get()); + handler.handleRequest(fakeRestRequest, fakeRestChannel, client); + + if (requiredSettingsEnabled) { + assertTrue(consumerCalled.get()); + assertEquals(0, fakeRestChannel.responses().get()); + assertEquals(0, fakeRestChannel.errors().get()); + } else { + assertFalse(consumerCalled.get()); + assertEquals(0, fakeRestChannel.responses().get()); + assertEquals(1, fakeRestChannel.errors().get()); + } + } + } + +} diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportUpdateTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportUpdateTransformAction.java index cd24581eae6b4..e9bd2eade6c65 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportUpdateTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportUpdateTransformAction.java @@ -9,6 +9,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; @@ -169,9 +170,35 @@ protected void doExecute(Task task, Request request, ActionListener li && ((TransformState) transformTask.getState()).getTaskState() != TransformTaskState.FAILED && clusterState.nodes().get(transformTask.getExecutorNode()).getVersion().onOrAfter(Version.V_7_8_0)) { + ActionListener taskUpdateListener = ActionListener.wrap(listener::onResponse, e -> { + // benign: A transform might be stopped meanwhile, this is not a problem + if (e instanceof TransformTaskDisappearedDuringUpdateException) { + logger.debug("[{}] transform task disappeared during update, ignoring", request.getId()); + listener.onResponse(new Response(updatedConfig)); + return; + } + + if (e instanceof TransformTaskUpdateException) { + // BWC: only log a warning as response object can not be changed + logger.warn( + () -> format( + "[%s] failed to notify running transform task about update. " + + "New settings will be applied after next checkpoint.", + request.getId() + ), + e + ); + + listener.onResponse(new Response(updatedConfig)); + return; + } + + listener.onFailure(e); + }); + request.setNodes(transformTask.getExecutorNode()); request.setConfig(updatedConfig); - super.doExecute(task, request, listener); + super.doExecute(task, request, taskUpdateListener); return; } } @@ -208,8 +235,29 @@ protected Response newResponse( List taskOperationFailures, List failedNodeExceptions ) { - // there should be only 1 response, todo: check + if (tasks.isEmpty()) { + if (taskOperationFailures.isEmpty() == false) { + throw new TransformTaskUpdateException("Failed to update running transform task.", taskOperationFailures.get(0).getCause()); + } else if (failedNodeExceptions.isEmpty() == false) { + throw new TransformTaskUpdateException("Failed to update running transform task.", failedNodeExceptions.get(0)); + } else { + throw new TransformTaskDisappearedDuringUpdateException("Could not update running transform as it has been stopped."); + } + } + return tasks.get(0); } + private static class TransformTaskUpdateException extends ElasticsearchException { + TransformTaskUpdateException(String msg, Throwable cause, Object... args) { + super(msg, cause, args); + } + } + + private static class TransformTaskDisappearedDuringUpdateException extends ElasticsearchException { + TransformTaskDisappearedDuringUpdateException(String msg) { + super(msg); + } + } + } diff --git a/x-pack/qa/rolling-upgrade/build.gradle b/x-pack/qa/rolling-upgrade/build.gradle index eae660ff7b001..1a55844b41a47 100644 --- a/x-pack/qa/rolling-upgrade/build.gradle +++ b/x-pack/qa/rolling-upgrade/build.gradle @@ -1,4 +1,3 @@ -import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.internal.BwcVersions import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask @@ -146,6 +145,7 @@ BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> 'mixed_cluster/40_ml_datafeed_crud/Put job and datafeed in mixed cluster', 'mixed_cluster/40_ml_datafeed_crud/Put job and datafeed without aggs in mixed cluster', 'mixed_cluster/40_ml_datafeed_crud/Put job and datafeed with aggs in mixed cluster', + 'mixed_cluster/40_ml_datafeed_crud/Put job and datafeed with composite aggs in mixed cluster', 'mixed_cluster/80_transform_jobs_crud/Test put batch transform on mixed cluster', 'mixed_cluster/80_transform_jobs_crud/Test put continuous transform on mixed cluster', 'mixed_cluster/90_ml_data_frame_analytics_crud/Put an outlier_detection job on the mixed cluster', diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml index b6962ca59ebb9..0330b12663a41 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml @@ -39,7 +39,20 @@ datafeed_id: old-cluster-datafeed-with-aggs - match: { datafeeds.0.state: "stopped"} - is_false: datafeeds.0.node +--- +"Test old cluster datafeed with comp aggs": + - do: + ml.get_datafeeds: + datafeed_id: old-cluster-datafeed-with-comp-aggs + - match: { datafeeds.0.datafeed_id: "old-cluster-datafeed-with-comp-aggs"} + - length: { datafeeds.0.indices: 1 } + - is_false: datafeeds.0.script_fields + - do: + ml.get_datafeed_stats: + datafeed_id: old-cluster-datafeed-with-comp-aggs + - match: { datafeeds.0.state: "stopped"} + - is_false: datafeeds.0.node --- "Put job and datafeed without aggs in mixed cluster": - do: @@ -148,3 +161,76 @@ datafeed_id: mixed-cluster-datafeed-with-aggs - match: { datafeeds.0.state: stopped} - is_false: datafeeds.0.node +--- +"Put job and datafeed with composite aggs in mixed cluster": + - do: + ml.put_job: + job_id: mixed-cluster-datafeed-job-with-comp-aggs + body: > + { + "description":"Cluster upgrade", + "analysis_config" : { + "bucket_span": "60s", + "summary_count_field_name": "doc_count", + "detectors" :[{"function":"count"}] + }, + "analysis_limits" : { + "model_memory_limit": "50mb" + }, + "data_description" : { + "format":"xcontent", + "time_field":"time" + } + } + - match: { job_id: mixed-cluster-datafeed-job-with-comp-aggs } + + - do: + ml.put_datafeed: + datafeed_id: mixed-cluster-datafeed-with-comp-aggs + body: > + { + "job_id":"mixed-cluster-datafeed-job-with-comp-aggs", + "indices":["airline-data"], + "aggregations": { + "buckets": { + "composite": { + "size": 2000, + "sources": [ + { + "time_bucket": { + "date_histogram": { + "field": "time", + "fixed_interval": "30s", + "time_zone": "UTC" + } + } + } + ] + }, + "aggregations": { + "time": { + "max": {"field": "time"} + }, + "airline": { + "terms": { + "field": "airline", + "size": 100 + }, + "aggregations": { + "responsetime": { + "avg": { + "field": "responsetime" + } + } + } + } + } + } + } + } + + - do: + ml.get_datafeed_stats: + datafeed_id: mixed-cluster-datafeed-with-comp-aggs + - match: { datafeeds.0.state: stopped} + - is_false: datafeeds.0.node diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml index 515405a33b07c..3dcc97edd6203 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml @@ -259,3 +259,76 @@ datafeed_id: old-cluster-datafeed-with-aggs - match: { datafeeds.0.state: stopped} - is_false: datafeeds.0.node +--- +"Put job and datafeed with composite aggs": + - do: + ml.put_job: + job_id: old-cluster-datafeed-job-with-comp-aggs + body: > + { + "description":"Cluster upgrade", + "analysis_config" : { + "bucket_span": "60s", + "summary_count_field_name": "doc_count", + "detectors" :[{"function":"count"}] + }, + "analysis_limits" : { + "model_memory_limit": "50mb" + }, + "data_description" : { + "format":"xcontent", + "time_field":"time" + } + } + - match: { job_id: old-cluster-datafeed-job-with-comp-aggs } + + - do: + ml.put_datafeed: + datafeed_id: old-cluster-datafeed-with-comp-aggs + body: > + { + "job_id":"old-cluster-datafeed-job-with-comp-aggs", + "indices":["airline-data"], + "aggregations": { + "buckets": { + "composite": { + "size": 2000, + "sources": [ + { + "time_bucket": { + "date_histogram": { + "field": "time", + "fixed_interval": "30s", + "time_zone": "UTC" + } + } + } + ] + }, + "aggregations": { + "time": { + "max": {"field": "time"} + }, + "airline": { + "terms": { + "field": "airline", + "size": 100 + }, + "aggregations": { + "responsetime": { + "avg": { + "field": "responsetime" + } + } + } + } + } + } + } + } + + - do: + ml.get_datafeed_stats: + datafeed_id: old-cluster-datafeed-with-comp-aggs + - match: { datafeeds.0.state: stopped} + - is_false: datafeeds.0.node diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml index c60e6494b80fa..f8a38f8f399aa 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml @@ -200,6 +200,96 @@ setup: job_id: mixed-cluster-datafeed-job-with-aggs - match: { acknowledged: true } + - do: + indices.delete: + index: airline-data +--- +"Test old and mixed cluster datafeeds with composite aggs": + - do: + indices.create: + index: airline-data + body: + mappings: + properties: + time: + type: date + + - do: + ml.get_datafeeds: + datafeed_id: old-cluster-datafeed-with-comp-aggs + - match: { datafeeds.0.datafeed_id: "old-cluster-datafeed-with-comp-aggs"} + - length: { datafeeds.0.indices: 1 } + + - do: + ml.get_datafeed_stats: + datafeed_id: old-cluster-datafeed-with-comp-aggs + - match: { datafeeds.0.state: "stopped"} + - is_false: datafeeds.0.node + + - do: + ml.get_datafeeds: + datafeed_id: mixed-cluster-datafeed-with-comp-aggs + - match: { datafeeds.0.datafeed_id: "mixed-cluster-datafeed-with-comp-aggs"} + - length: { datafeeds.0.indices: 1 } + + - do: + ml.get_datafeed_stats: + datafeed_id: mixed-cluster-datafeed-with-comp-aggs + - match: { datafeeds.0.state: "stopped"} + - is_false: datafeeds.0.node + + - do: + ml.open_job: + job_id: old-cluster-datafeed-job-with-comp-aggs + + - do: + ml.start_datafeed: + datafeed_id: old-cluster-datafeed-with-comp-aggs + start: 0 + + - do: + ml.stop_datafeed: + datafeed_id: old-cluster-datafeed-with-comp-aggs + + - do: + ml.close_job: + job_id: old-cluster-datafeed-job-with-comp-aggs + + - do: + ml.delete_datafeed: + datafeed_id: old-cluster-datafeed-with-comp-aggs + + - do: + ml.delete_job: + job_id: old-cluster-datafeed-job-with-comp-aggs + - match: { acknowledged: true } + + - do: + ml.open_job: + job_id: mixed-cluster-datafeed-job-with-comp-aggs + + - do: + ml.start_datafeed: + datafeed_id: mixed-cluster-datafeed-with-comp-aggs + start: 0 + + - do: + ml.stop_datafeed: + datafeed_id: mixed-cluster-datafeed-with-comp-aggs + + - do: + ml.close_job: + job_id: mixed-cluster-datafeed-job-with-comp-aggs + + - do: + ml.delete_datafeed: + datafeed_id: mixed-cluster-datafeed-with-comp-aggs + + - do: + ml.delete_job: + job_id: mixed-cluster-datafeed-job-with-comp-aggs + - match: { acknowledged: true } + - do: indices.delete: index: airline-data