diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 52f1a492a3f74..65fd9e7281ad1 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -1,47 +1,4 @@ BWC_VERSION: - - "7.0.0" - - "7.0.1" - - "7.1.0" - - "7.1.1" - - "7.2.0" - - "7.2.1" - - "7.3.0" - - "7.3.1" - - "7.3.2" - - "7.4.0" - - "7.4.1" - - "7.4.2" - - "7.5.0" - - "7.5.1" - - "7.5.2" - - "7.6.0" - - "7.6.1" - - "7.6.2" - - "7.7.0" - - "7.7.1" - - "7.8.0" - - "7.8.1" - - "7.9.0" - - "7.9.1" - - "7.9.2" - - "7.9.3" - - "7.10.0" - - "7.10.1" - - "7.10.2" - - "1.0.0" - - "1.1.0" - - "1.2.0" - - "1.2.1" - - "1.2.2" - - "1.2.3" - - "1.2.4" - - "1.2.5" - - "1.3.0" - - "1.3.1" - - "1.3.2" - - "1.3.3" - - "1.3.4" - - "1.3.5" - "2.0.0" - "2.0.1" - "2.0.2" diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index 6023c875c6796..70865fb62c997 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -12,8 +12,15 @@ on: types: - completed +permissions: + contents: read # to fetch code (actions/checkout) + jobs: gradle-check: + permissions: + contents: read # to fetch code (actions/checkout) + pull-requests: write # to create or update comment (peter-evans/create-or-update-comment) + runs-on: ubuntu-latest timeout-minutes: 130 steps: diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index ca05aee8be378..ac94f5ef5ec5e 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -2,6 +2,8 @@ name: Link Checker on: schedule: - cron: '0 0 * * *' +permissions: + contents: read # to fetch code (actions/checkout) jobs: linkchecker: if: github.repository == 'opensearch-project/OpenSearch' diff --git a/.github/workflows/version.yml b/.github/workflows/version.yml index 42c2d21d106ce..d1b5e90484ec4 100644 --- a/.github/workflows/version.yml +++ b/.github/workflows/version.yml @@ -5,6 +5,7 @@ on: tags: - '*.*.*' +permissions: {} jobs: build: runs-on: ubuntu-latest diff --git a/CHANGELOG.md b/CHANGELOG.md index eeb2f53090923..e4dcf79c41a94 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,13 +6,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 3.0] ### Added -- Support for HTTP/2 (server-side) ([#3847](https://github.com/opensearch-project/OpenSearch/pull/3847)) -- Add getter for path field in NestedQueryBuilder ([#4636](https://github.com/opensearch-project/OpenSearch/pull/4636)) -- Apply reproducible builds configuration for OpenSearch plugins through gradle plugin ([#4746](https://github.com/opensearch-project/OpenSearch/pull/4746)) -- Add project health badges to the README.md ([#4843](https://github.com/opensearch-project/OpenSearch/pull/4843)) -- [Test] Add IAE test for deprecated edgeNGram analyzer name ([#5040](https://github.com/opensearch-project/OpenSearch/pull/5040)) -- Allow mmap to use new JDK-19 preview APIs in Apache Lucene 9.4+ ([#5151](https://github.com/opensearch-project/OpenSearch/pull/5151)) -- Add feature flag for extensions ([#5211](https://github.com/opensearch-project/OpenSearch/pull/5211)) - [Identity] Document identity roadmap and feature branch processes ([#4583](https://github.com/opensearch-project/OpenSearch/pull/4583)) - [Identity] Add stubs for AccessTokenManager ([#4612](https://github.com/opensearch-project/OpenSearch/pull/4612)) - [Identity] Permissions check API ([#4516](https://github.com/opensearch-project/OpenSearch/pull/4516)) @@ -23,158 +16,3 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Identity] Strategy for Delegated Authority using Tokens ([#4826](https://github.com/opensearch-project/OpenSearch/pull/4826)) - [Identity] User operations: create update delete ([#4741](https://github.com/opensearch-project/OpenSearch/pull/4741)) - [Identity] Adds Basic Auth mechanism via Internal IdP ([#4798](https://github.com/opensearch-project/OpenSearch/pull/4798)) - -### Dependencies -- Bumps `log4j-core` from 2.18.0 to 2.19.0 -- Bumps `reactor-netty-http` from 1.0.18 to 1.0.23 -- Bumps `jettison` from 1.5.0 to 1.5.1 -- Bumps `azure-storage-common` from 12.18.0 to 12.18.1 -- Bumps `forbiddenapis` from 3.3 to 3.4 -- Bumps `gson` from 2.9.0 to 2.10 -- Bumps `protobuf-java` from 3.21.2 to 3.21.9 -- Bumps `azure-core` from 1.31.0 to 1.33.0 -- Bumps `avro` from 1.11.0 to 1.11.1 -- Bumps `woodstox-core` from 6.3.0 to 6.3.1 -- Bumps `xmlbeans` from 5.1.0 to 5.1.1 ([#4354](https://github.com/opensearch-project/OpenSearch/pull/4354)) -- Bumps `azure-core-http-netty` from 1.12.0 to 1.12.4 ([#4160](https://github.com/opensearch-project/OpenSearch/pull/4160)) -- Bumps `azure-core` from 1.27.0 to 1.31.0 ([#4160](https://github.com/opensearch-project/OpenSearch/pull/4160)) -- Bumps `azure-storage-common` from 12.16.0 to 12.18.0 ([#4160](https://github.com/opensearch-project/OpenSearch/pull/4160)) -- Bumps `org.gradle.test-retry` from 1.4.0 to 1.4.1 ([#4411](https://github.com/opensearch-project/OpenSearch/pull/4411)) -- Bumps `reactor-netty-core` from 1.0.19 to 1.0.22 ([#4447](https://github.com/opensearch-project/OpenSearch/pull/4447)) -- Bumps `reactive-streams` from 1.0.3 to 1.0.4 ([#4488](https://github.com/opensearch-project/OpenSearch/pull/4488)) -- Bumps `com.diffplug.spotless` from 6.10.0 to 6.11.0 ([#4547](https://github.com/opensearch-project/OpenSearch/pull/4547)) -- Bumps `reactor-core` from 3.4.18 to 3.4.23 ([#4548](https://github.com/opensearch-project/OpenSearch/pull/4548)) -- Bumps `jempbox` from 1.8.16 to 1.8.17 ([#4550](https://github.com/opensearch-project/OpenSearch/pull/4550)) -- Update Apache Lucene to 9.5.0-snapshot-a4ef70f ([#4979](https://github.com/opensearch-project/OpenSearch/pull/4979)) -- Update to Gradle 7.6 and JDK-19 ([#4973](https://github.com/opensearch-project/OpenSearch/pull/4973)) - -### Changed - -- Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) -- Use RemoteSegmentStoreDirectory instead of RemoteDirectory ([#4240](https://github.com/opensearch-project/OpenSearch/pull/4240)) -- Plugin ZIP publication groupId value is configurable ([#4156](https://github.com/opensearch-project/OpenSearch/pull/4156)) -- Weighted round-robin scheduling policy for shard coordination traffic ([#4241](https://github.com/opensearch-project/OpenSearch/pull/4241)) -- Add index specific setting for remote repository ([#4253](https://github.com/opensearch-project/OpenSearch/pull/4253)) -- [Segment Replication] Update replicas to commit SegmentInfos instead of relying on SIS files from primary shards. ([#4402](https://github.com/opensearch-project/OpenSearch/pull/4402)) -- [CCR] Add getHistoryOperationsFromTranslog method to fetch the history snapshot from translogs ([#3948](https://github.com/opensearch-project/OpenSearch/pull/3948)) -- Relax visibility of the HTTP_CHANNEL_KEY and HTTP_SERVER_CHANNEL_KEY to make it possible for the plugins to access associated Netty4HttpChannel / Netty4HttpServerChannel instance ([#4638](https://github.com/opensearch-project/OpenSearch/pull/4638)) -- Use ReplicationFailedException instead of OpensearchException in ReplicationTarget ([#4725](https://github.com/opensearch-project/OpenSearch/pull/4725)) -- Migrate client transports to Apache HttpClient / Core 5.x ([#4459](https://github.com/opensearch-project/OpenSearch/pull/4459)) - -### Deprecated - -### Removed -- Remove deprecated code to add node name into log pattern of log4j property file ([#4568](https://github.com/opensearch-project/OpenSearch/pull/4568)) -- Unused object and import within TransportClusterAllocationExplainAction ([#4639](https://github.com/opensearch-project/OpenSearch/pull/4639)) -- Remove LegacyESVersion.V_7_0_* and V_7_1_* Constants ([#2768](https://https://github.com/opensearch-project/OpenSearch/pull/2768)) -- Remove LegacyESVersion.V_7_2_ and V_7_3_ Constants ([#4702](https://github.com/opensearch-project/OpenSearch/pull/4702)) -- Always auto release the flood stage block ([#4703](https://github.com/opensearch-project/OpenSearch/pull/4703)) -- Remove LegacyESVersion.V_7_4_ and V_7_5_ Constants ([#4704](https://github.com/opensearch-project/OpenSearch/pull/4704)) -- Remove Legacy Version support from Snapshot/Restore Service ([#4728](https://github.com/opensearch-project/OpenSearch/pull/4728)) -- Remove deprecated serialization logic from pipeline aggs ([#4847](https://github.com/opensearch-project/OpenSearch/pull/4847)) -- Remove unused private methods ([#4926](https://github.com/opensearch-project/OpenSearch/pull/4926)) -- Remove LegacyESVersion.V_7_8_ and V_7_9_ Constants ([#4855](https://github.com/opensearch-project/OpenSearch/pull/4855)) -- Remove LegacyESVersion.V_7_6_ and V_7_7_ Constants ([#4837](https://github.com/opensearch-project/OpenSearch/pull/4837)) -- Remove LegacyESVersion.V_7_10_ Constants ([#5018](https://github.com/opensearch-project/OpenSearch/pull/5018)) -- Remove Version.V_1_ Constants ([#5021](https://github.com/opensearch-project/OpenSearch/pull/5021)) - -### Fixed - -- `opensearch-service.bat start` and `opensearch-service.bat manager` failing to run ([#4289](https://github.com/opensearch-project/OpenSearch/pull/4289)) -- PR reference to checkout code for changelog verifier ([#4296](https://github.com/opensearch-project/OpenSearch/pull/4296)) -- `opensearch.bat` and `opensearch-service.bat install` failing to run, missing logs directory ([#4305](https://github.com/opensearch-project/OpenSearch/pull/4305)) -- Restore using the class ClusterInfoRequest and ClusterInfoRequestBuilder from package 'org.opensearch.action.support.master.info' for subclasses ([#4307](https://github.com/opensearch-project/OpenSearch/pull/4307)) -- Do not fail replica shard due to primary closure ([#4133](https://github.com/opensearch-project/OpenSearch/pull/4133)) -- Add timeout on Mockito.verify to reduce flakyness in testReplicationOnDone test([#4314](https://github.com/opensearch-project/OpenSearch/pull/4314)) -- Commit workflow for dependabot changelog helper ([#4331](https://github.com/opensearch-project/OpenSearch/pull/4331)) -- Fixed cancellation of segment replication events ([#4225](https://github.com/opensearch-project/OpenSearch/pull/4225)) -- [Segment Replication] Bump segment infos counter before commit during replica promotion ([#4365](https://github.com/opensearch-project/OpenSearch/pull/4365)) -- Bugs for dependabot changelog verifier workflow ([#4364](https://github.com/opensearch-project/OpenSearch/pull/4364)) -- Fix flaky random test `NRTReplicationEngineTests.testUpdateSegments` ([#4352](https://github.com/opensearch-project/OpenSearch/pull/4352)) -- [Segment Replication] Extend FileChunkWriter to allow cancel on transport client ([#4386](https://github.com/opensearch-project/OpenSearch/pull/4386)) -- [Segment Replication] Add check to cancel ongoing replication with old primary on onNewCheckpoint on replica ([#4363](https://github.com/opensearch-project/OpenSearch/pull/4363)) -- Fix NoSuchFileExceptions with segment replication when computing primary metadata snapshots ([#4366](https://github.com/opensearch-project/OpenSearch/pull/4366)) -- [Segment Replication] Update flaky testOnNewCheckpointFromNewPrimaryCancelOngoingReplication unit test ([#4414](https://github.com/opensearch-project/OpenSearch/pull/4414)) -- Fixed the `_cat/shards/10_basic.yml` test cases fix. -- [Segment Replication] Fix timeout issue by calculating time needed to process getSegmentFiles ([#4426](https://github.com/opensearch-project/OpenSearch/pull/4426)) -- [Bug]: gradle check failing with java heap OutOfMemoryError ([#4328](https://github.com/opensearch-project/OpenSearch/)) -- `opensearch.bat` fails to execute when install path includes spaces ([#4362](https://github.com/opensearch-project/OpenSearch/pull/4362)) -- Getting security exception due to access denied 'java.lang.RuntimePermission' 'accessDeclaredMembers' when trying to get snapshot with S3 IRSA ([#4469](https://github.com/opensearch-project/OpenSearch/pull/4469)) -- Fixed flaky test `ResourceAwareTasksTests.testTaskIdPersistsInThreadContext` ([#4484](https://github.com/opensearch-project/OpenSearch/pull/4484)) -- Fixed the ignore_malformed setting to also ignore objects ([#4494](https://github.com/opensearch-project/OpenSearch/pull/4494)) -- [Segment Replication] Ignore lock file when testing cleanupAndPreserveLatestCommitPoint ([#4544](https://github.com/opensearch-project/OpenSearch/pull/4544)) -- Updated jackson to 2.13.4 and snakeyml to 1.32 ([#4556](https://github.com/opensearch-project/OpenSearch/pull/4556)) -- Fixing PIT flaky tests ([4632](https://github.com/opensearch-project/OpenSearch/pull/4632)) -- Fixed day of year defaulting for round up parser ([#4627](https://github.com/opensearch-project/OpenSearch/pull/4627)) -- Fixed the SnapshotsInProgress error during index deletion ([#4570](https://github.com/opensearch-project/OpenSearch/pull/4570)) -- [Segment Replication] Adding check to make sure checkpoint is not processed when a shard's shard routing is primary ([#4630](https://github.com/opensearch-project/OpenSearch/pull/4630)) -- [Bug]: Fixed invalid location of JDK dependency for arm64 architecture([#4613](https://github.com/opensearch-project/OpenSearch/pull/4613)) -- [Bug]: Alias filter lost after rollover ([#4499](https://github.com/opensearch-project/OpenSearch/pull/4499)) -- Fixed the SnapshotsInProgress error during index deletion ([#4570](https://github.com/opensearch-project/OpenSearch/pull/4570)) -- [Segment Replication] Adding check to make sure checkpoint is not processed when a shard's shard routing is primary ([#4630](https://github.com/opensearch-project/OpenSearch/pull/4630)) -- [Bug]: Fixed invalid location of JDK dependency for arm64 architecture([#4613](https://github.com/opensearch-project/OpenSearch/pull/4613)) -- [Bug]: Alias filter lost after rollover ([#4499](https://github.com/opensearch-project/OpenSearch/pull/4499)) -- Fixed misunderstanding message "No OpenSearchException found" when detailed_error disabled ([#4669](https://github.com/opensearch-project/OpenSearch/pull/4669)) -- Attempt to fix Github workflow for Gradle Check job ([#4679](https://github.com/opensearch-project/OpenSearch/pull/4679)) -- Fix flaky DecommissionControllerTests.testTimesOut ([4683](https://github.com/opensearch-project/OpenSearch/pull/4683)) -- Fix new race condition in DecommissionControllerTests ([4688](https://github.com/opensearch-project/OpenSearch/pull/4688)) -- Fix SearchStats (de)serialization (caused by https://github.com/opensearch-project/OpenSearch/pull/4616) ([#4697](https://github.com/opensearch-project/OpenSearch/pull/4697)) -- Fixing Gradle warnings associated with publishPluginZipPublicationToXxx tasks ([#4696](https://github.com/opensearch-project/OpenSearch/pull/4696)) -- [BUG]: Remove redundant field from GetDecommissionStateResponse ([#4751](https://github.com/opensearch-project/OpenSearch/pull/4751)) -- Fixed randomly failing test ([4774](https://github.com/opensearch-project/OpenSearch/pull/4774)) -- Update version check after backport ([4786](https://github.com/opensearch-project/OpenSearch/pull/4786)) -- Fix decommission status update to non leader nodes ([4800](https://github.com/opensearch-project/OpenSearch/pull/4800)) -- Fix recovery path for searchable snapshots ([4813](https://github.com/opensearch-project/OpenSearch/pull/4813)) -- Fix bug in AwarenessAttributeDecommissionIT([4822](https://github.com/opensearch-project/OpenSearch/pull/4822)) -- Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827)) -- Fixed compression support for h2c protocol ([#4944](https://github.com/opensearch-project/OpenSearch/pull/4944)) -- Add jvm option to allow security manager ([#5194](https://github.com/opensearch-project/OpenSearch/pull/5194)) -### Security -<<<<<<< HEAD - -- CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) - -## [2.x] - -### Added - -- Github workflow for changelog verification ([#4085](https://github.com/opensearch-project/OpenSearch/pull/4085)) -- Label configuration for dependabot PRs ([#4348](https://github.com/opensearch-project/OpenSearch/pull/4348)) -- Added RestLayer Changes for PIT stats ([#4217](https://github.com/opensearch-project/OpenSearch/pull/4217)) -- Added GeoBounds aggregation on GeoShape field type.([#4266](https://github.com/opensearch-project/OpenSearch/pull/4266)) - - Addition of Doc values on the GeoShape Field - - Addition of GeoShape ValueSource level code interfaces for accessing the DocValues. - - Addition of Missing Value feature in the GeoShape Aggregations. -- Install and configure Log4j JUL Adapter for Lucene 9.4 ([#4754](https://github.com/opensearch-project/OpenSearch/pull/4754)) -- Added feature to ignore indexes starting with dot during shard limit validation.([#4695](https://github.com/opensearch-project/OpenSearch/pull/4695)) -======= - -## [Unreleased 2.x] -### Added -### Dependencies -<<<<<<< HEAD ->>>>>>> origin/main -======= -- Bumps `bcpg-fips` from 1.0.5.1 to 1.0.7.1 -- Bumps `azure-storage-blob` from 12.16.1 to 12.20.0 ([#4995](https://github.com/opensearch-project/OpenSearch/pull/4995)) -- Bumps `commons-compress` from 1.21 to 1.22 ([#5104](https://github.com/opensearch-project/OpenSearch/pull/5104)) -- Bump `opencensus-contrib-http-util` from 0.18.0 to 0.31.1 ([#3633](https://github.com/opensearch-project/OpenSearch/pull/3633)) -- Bump `geoip2` from 3.0.1 to 3.0.2 ([#5103](https://github.com/opensearch-project/OpenSearch/pull/5103)) ->>>>>>> origin/main -### Changed -### Deprecated -### Removed -### Fixed -<<<<<<< HEAD - -- PR reference to checkout code for changelog verifier ([#4296](https://github.com/opensearch-project/OpenSearch/pull/4296)) -- Commit workflow for dependabot changelog helper ([#4331](https://github.com/opensearch-project/OpenSearch/pull/4331)) -- Better plural stemmer than minimal_english ([#4738](https://github.com/opensearch-project/OpenSearch/pull/4738)) -- Disable merge on refresh in DiskThresholdDeciderIT ([#4828](https://github.com/opensearch-project/OpenSearch/pull/4828)) - -======= ->>>>>>> origin/main -### Security - -[Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.4...HEAD -[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.4...2.x diff --git a/build.gradle b/build.gradle index 076894863b1bf..2211da85345b5 100644 --- a/build.gradle +++ b/build.gradle @@ -454,9 +454,11 @@ subprojects { apply plugin: "org.gradle.test-retry" tasks.withType(Test).configureEach { retry { + if (BuildParams.isCi()) { + maxRetries = 3 + maxFailures = 10 + } failOnPassedAfterRetry = false - maxRetries = 3 - maxFailures = 10 } } } diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 40f1eeccf5300..6d055ee9805fc 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -105,7 +105,7 @@ dependencies { api 'commons-codec:commons-codec:1.15' api 'org.apache.commons:commons-compress:1.21' api 'org.apache.ant:ant:1.10.12' - api 'com.netflix.nebula:gradle-extra-configurations-plugin:7.0.0' + api 'com.netflix.nebula:gradle-extra-configurations-plugin:8.0.0' api 'com.netflix.nebula:nebula-publishing-plugin:4.6.0' api 'com.netflix.nebula:gradle-info-plugin:11.3.3' api 'org.apache.rat:apache-rat:0.13' @@ -118,7 +118,7 @@ dependencies { api 'com.avast.gradle:gradle-docker-compose-plugin:0.15.2' api "org.yaml:snakeyaml:${props.getProperty('snakeyaml')}" api 'org.apache.maven:maven-model:3.6.2' - api 'com.networknt:json-schema-validator:1.0.69' + api 'com.networknt:json-schema-validator:1.0.73' api "com.fasterxml.jackson.core:jackson-databind:${props.getProperty('jackson_databind')}" testFixturesApi "junit:junit:${props.getProperty('junit')}" @@ -127,7 +127,7 @@ dependencies { testFixturesApi gradleTestKit() testImplementation 'com.github.tomakehurst:wiremock-jre8-standalone:2.33.2' testImplementation "org.mockito:mockito-core:${props.getProperty('mockito')}" - integTestImplementation('org.spockframework:spock-core:2.1-groovy-3.0') { + integTestImplementation('org.spockframework:spock-core:2.3-groovy-3.0') { exclude module: "groovy" } } diff --git a/buildSrc/src/main/java/org/opensearch/gradle/BwcVersions.java b/buildSrc/src/main/java/org/opensearch/gradle/BwcVersions.java index 3f65abcc25d17..cddd03ccc2019 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/BwcVersions.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/BwcVersions.java @@ -105,7 +105,7 @@ public class BwcVersions { private final Map> groupByMajor; private final Map unreleased; - public class UnreleasedVersionInfo { + public static class UnreleasedVersionInfo { public final Version version; public final String branch; public final String gradleProjectPath; @@ -149,13 +149,7 @@ public BwcVersions(SortedSet allVersions, Version currentVersionPropert groupByMajor = allVersions.stream() // We only care about the last 2 majors when it comes to BWC. - // It might take us time to remove the older ones from versionLines, so we allow them to exist. - // Adjust the major number since OpenSearch 1.x is released after predecessor version 7.x - .filter( - version -> (version.getMajor() == 1 ? 7 : version.getMajor()) > (currentVersion.getMajor() == 1 - ? 7 - : currentVersion.getMajor()) - 2 - ) + .filter(version -> version.getMajor() > currentVersion.getMajor() - 2) .collect(Collectors.groupingBy(Version::getMajor, Collectors.toList())); assertCurrentVersionMatchesParsed(currentVersionProperty); @@ -174,9 +168,7 @@ public BwcVersions(SortedSet allVersions, Version currentVersionPropert private void assertNoOlderThanTwoMajors() { Set majors = groupByMajor.keySet(); - // until OpenSearch 3.0 we will need to carry three major support - // (1, 7, 6) && (2, 1, 7) since OpenSearch 1.0 === Legacy 7.x - int numSupportedMajors = (currentVersion.getMajor() < 3) ? 3 : 2; + int numSupportedMajors = 2; if (majors.size() != numSupportedMajors && currentVersion.getMinor() != 0 && currentVersion.getRevision() != 0) { throw new IllegalStateException("Expected exactly 2 majors in parsed versions but found: " + majors); } @@ -207,7 +199,7 @@ public void forPreviousUnreleased(Consumer consumer) { .map(version -> new UnreleasedVersionInfo(version, getBranchFor(version), getGradleProjectPathFor(version))) .collect(Collectors.toList()); - collect.forEach(uvi -> consumer.accept(uvi)); + collect.forEach(consumer); } private String getGradleProjectPathFor(Version version) { @@ -271,18 +263,9 @@ public List getUnreleased() { // The current version is being worked, is always unreleased unreleased.add(currentVersion); - // No unreleased versions for 1.0.0 - // todo remove this hack - if (currentVersion.equals(Version.fromString("1.0.0"))) { - return unmodifiableList(unreleased); - } - // the tip of the previous major is unreleased for sure, be it a minor or a bugfix if (currentVersion.getMajor() != 1) { - final Version latestOfPreviousMajor = getLatestVersionByKey( - this.groupByMajor, - currentVersion.getMajor() == 1 ? 7 : currentVersion.getMajor() - 1 - ); + final Version latestOfPreviousMajor = getLatestVersionByKey(this.groupByMajor, currentVersion.getMajor() - 1); unreleased.add(latestOfPreviousMajor); if (latestOfPreviousMajor.getRevision() == 0) { // if the previous major is a x.y.0 release, then the tip of the minor before that (y-1) is also unreleased @@ -311,7 +294,7 @@ public List getUnreleased() { } } - return unmodifiableList(unreleased.stream().sorted().distinct().collect(Collectors.toList())); + return unreleased.stream().sorted().distinct().collect(Collectors.toUnmodifiableList()); } private Version getLatestInMinor(int major, int minor) { @@ -342,7 +325,7 @@ private Map> getReleasedMajorGroupedByMinor() { public void compareToAuthoritative(List authoritativeReleasedVersions) { Set notReallyReleased = new HashSet<>(getReleased()); - notReallyReleased.removeAll(authoritativeReleasedVersions); + authoritativeReleasedVersions.forEach(notReallyReleased::remove); if (notReallyReleased.isEmpty() == false) { throw new IllegalStateException( "out-of-date released versions" @@ -370,32 +353,21 @@ private List getReleased() { .stream() .flatMap(Collection::stream) .filter(each -> unreleased.contains(each) == false) - // this is to make sure we only consider OpenSearch versions - // TODO remove this filter once legacy ES versions are no longer supported - .filter(v -> v.onOrAfter("1.0.0")) .collect(Collectors.toList()); } public List getIndexCompatible() { int currentMajor = currentVersion.getMajor(); int prevMajor = getPreviousMajor(currentMajor); - List result = Stream.concat(groupByMajor.get(prevMajor).stream(), groupByMajor.get(currentMajor).stream()) + return Stream.concat(groupByMajor.get(prevMajor).stream(), groupByMajor.get(currentMajor).stream()) .filter(version -> version.equals(currentVersion) == false) - .collect(Collectors.toList()); - if (currentMajor == 1) { - // add 6.x compatible for OpenSearch 1.0.0 - return unmodifiableList(Stream.concat(groupByMajor.get(prevMajor - 1).stream(), result.stream()).collect(Collectors.toList())); - } else if (currentMajor == 2) { - // add 7.x compatible for OpenSearch 2.0.0 - return unmodifiableList(Stream.concat(groupByMajor.get(7).stream(), result.stream()).collect(Collectors.toList())); - } - return unmodifiableList(result); + .collect(Collectors.toUnmodifiableList()); } public List getWireCompatible() { List wireCompat = new ArrayList<>(); int currentMajor = currentVersion.getMajor(); - int lastMajor = currentMajor == 1 ? 6 : currentMajor == 2 ? 7 : currentMajor - 1; + int lastMajor = currentMajor - 1; List lastMajorList = groupByMajor.get(lastMajor); if (lastMajorList == null) { throw new IllegalStateException("Expected to find a list of versions for version: " + lastMajor); @@ -405,20 +377,6 @@ public List getWireCompatible() { wireCompat.add(lastMajorList.get(i)); } - // if current is OpenSearch 1.0.0 add all of the 7.x line: - if (currentMajor == 1) { - List previousMajor = groupByMajor.get(7); - for (Version v : previousMajor) { - wireCompat.add(v); - } - } else if (currentMajor == 2) { - // add all of the 1.x line: - List previousMajor = groupByMajor.get(1); - for (Version v : previousMajor) { - wireCompat.add(v); - } - } - wireCompat.addAll(groupByMajor.get(currentMajor)); wireCompat.remove(currentVersion); wireCompat.sort(Version::compareTo); @@ -438,7 +396,7 @@ public List getUnreleasedWireCompatible() { } private int getPreviousMajor(int currentMajor) { - return currentMajor == 1 ? 7 : currentMajor - 1; + return currentMajor - 1; } } diff --git a/buildSrc/src/test/java/org/opensearch/gradle/BwcOpenSearchVersionsTests.java b/buildSrc/src/test/java/org/opensearch/gradle/BwcOpenSearchVersionsTests.java deleted file mode 100644 index 14931c83ba29b..0000000000000 --- a/buildSrc/src/test/java/org/opensearch/gradle/BwcOpenSearchVersionsTests.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.gradle; - -import org.opensearch.gradle.test.GradleUnitTestCase; -import org.junit.Rule; -import org.junit.rules.ExpectedException; - -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -import static java.util.Arrays.asList; - -/** - * Tests to specifically verify the OpenSearch version 1.x with Legacy ES versions. - * This supplements the tests in BwcVersionsTests. - * - * Currently the versioning logic doesn't work for OpenSearch 2.x as the masking - * is only applied specifically for 1.x. - */ -public class BwcOpenSearchVersionsTests extends GradleUnitTestCase { - - private static final Map> sampleVersions = new HashMap<>(); - - @Rule - public ExpectedException expectedEx = ExpectedException.none(); - - static { - sampleVersions.put("1.0.0", asList("5_6_13", "6_6_1", "6_8_15", "7_0_0", "7_9_1", "7_10_0", "7_10_1", "7_10_2", "1_0_0")); - sampleVersions.put("1.1.0", asList("5_6_13", "6_6_1", "6_8_15", "7_0_0", "7_9_1", "7_10_0", "7_10_1", "7_10_2", "1_0_0", "1_1_0")); - sampleVersions.put( - "2.0.0", - asList("5_6_13", "6_6_1", "6_8_15", "7_0_0", "7_9_1", "7_10_0", "7_10_1", "7_10_2", "1_0_0", "1_1_0", "2_0_0") - ); - } - - public void testWireCompatible() { - assertVersionsEquals( - asList("6.8.15", "7.0.0", "7.9.1", "7.10.0", "7.10.1", "7.10.2"), - getVersionCollection("1.0.0").getWireCompatible() - ); - assertVersionsEquals( - asList("6.8.15", "7.0.0", "7.9.1", "7.10.0", "7.10.1", "7.10.2", "1.0.0"), - getVersionCollection("1.1.0").getWireCompatible() - ); - } - - public void testWireCompatibleUnreleased() { - assertVersionsEquals(Collections.emptyList(), getVersionCollection("1.0.0").getUnreleasedWireCompatible()); - } - - public void testIndexCompatible() { - assertVersionsEquals( - asList("6.6.1", "6.8.15", "7.0.0", "7.9.1", "7.10.0", "7.10.1", "7.10.2"), - getVersionCollection("1.0.0").getIndexCompatible() - ); - assertVersionsEquals( - asList("6.6.1", "6.8.15", "7.0.0", "7.9.1", "7.10.0", "7.10.1", "7.10.2", "1.0.0"), - getVersionCollection("1.1.0").getIndexCompatible() - ); - } - - public void testIndexCompatibleUnreleased() { - assertVersionsEquals(Collections.emptyList(), getVersionCollection("1.0.0").getUnreleasedIndexCompatible()); - } - - public void testGetUnreleased() { - assertVersionsEquals(Collections.singletonList("1.0.0"), getVersionCollection("1.0.0").getUnreleased()); - } - - private String formatVersionToLine(final String version) { - return " public static final Version V_" + version.replaceAll("\\.", "_") + " "; - } - - private void assertVersionsEquals(List expected, List actual) { - assertEquals(expected.stream().map(Version::fromString).collect(Collectors.toList()), actual); - } - - private BwcVersions getVersionCollection(String versionString) { - List versionMap = sampleVersions.get(versionString); - assertNotNull(versionMap); - Version version = Version.fromString(versionString); - assertNotNull(version); - return new BwcVersions(versionMap.stream().map(this::formatVersionToLine).collect(Collectors.toList()), version); - } -} diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 3f2c54e776e28..51a191efca2fb 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -8,8 +8,8 @@ bundled_jdk = 19.0.1+10 # optional dependencies spatial4j = 0.7 jts = 1.15.0 -jackson = 2.14.0 -jackson_databind = 2.14.0 +jackson = 2.14.1 +jackson_databind = 2.14.1 snakeyaml = 1.32 icu4j = 70.1 supercsv = 2.4.0 @@ -28,8 +28,8 @@ netty = 4.1.84.Final joda = 2.10.13 # client dependencies -httpclient5 = 5.1.3 -httpcore5 = 5.1.4 +httpclient5 = 5.1.4 +httpcore5 = 5.1.5 httpclient = 4.5.13 httpcore = 4.4.15 httpasyncclient = 4.1.5 diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/ResizeRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/ResizeRequest.java index 2a22c8d7d19e9..ebbd813c9fe15 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/ResizeRequest.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/ResizeRequest.java @@ -39,6 +39,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.ToXContentObject; import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.common.unit.ByteSizeValue; import java.io.IOException; import java.util.Collections; @@ -58,6 +59,7 @@ public class ResizeRequest extends TimedRequest implements Validatable, ToXConte private final String targetIndex; private Settings settings = Settings.EMPTY; private Set aliases = new HashSet<>(); + private ByteSizeValue maxShardSize; /** * Creates a new resize request @@ -155,6 +157,24 @@ public ActiveShardCount getWaitForActiveShards() { return waitForActiveShards; } + /** + * Sets the maximum size of a primary shard in the new shrunken index. + * This parameter can be used to calculate the lowest factor of the source index's shards number + * which satisfies the maximum shard size requirement. + * + * @param maxShardSize the maximum size of a primary shard in the new shrunken index + */ + public void setMaxShardSize(ByteSizeValue maxShardSize) { + this.maxShardSize = maxShardSize; + } + + /** + * Returns the maximum size of a primary shard in the new shrunken index. + */ + public ByteSizeValue getMaxShardSize() { + return maxShardSize; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java index 512cc058a64a7..7ed06129dc893 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java @@ -79,6 +79,7 @@ import org.opensearch.common.xcontent.XContentType; import org.opensearch.test.OpenSearchTestCase; import org.junit.Assert; +import org.opensearch.common.unit.ByteSizeValue; import java.io.IOException; import java.util.Arrays; @@ -701,6 +702,8 @@ private void resizeTest(ResizeType resizeType, CheckedFunction \(.*\)$'` - if expr "$link" : '/.*' > /dev/null; then - PRG="$link" - else - PRG=`dirname "$PRG"`"/$link" - fi +app_path=$0 + +# Need this for daisy-chained symlinks. +while + APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path + [ -h "$app_path" ] +do + ls=$( ls -ld "$app_path" ) + link=${ls#*' -> '} + case $link in #( + /*) app_path=$link ;; #( + *) app_path=$APP_HOME$link ;; + esac done -SAVED="`pwd`" -cd "`dirname \"$PRG\"`/" >/dev/null -APP_HOME="`pwd -P`" -cd "$SAVED" >/dev/null + +APP_HOME=$( cd "${APP_HOME:-./}" && pwd -P ) || exit APP_NAME="Gradle" -APP_BASE_NAME=`basename "$0"` +APP_BASE_NAME=${0##*/} # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' # Use the maximum available, or set MAX_FD != -1 to use that value. -MAX_FD="maximum" +MAX_FD=maximum warn () { echo "$*" -} +} >&2 die () { echo echo "$*" echo exit 1 -} +} >&2 # OS specific support (must be 'true' or 'false'). cygwin=false msys=false darwin=false nonstop=false -case "`uname`" in - CYGWIN* ) - cygwin=true - ;; - Darwin* ) - darwin=true - ;; - MINGW* ) - msys=true - ;; - NONSTOP* ) - nonstop=true - ;; +case "$( uname )" in #( + CYGWIN* ) cygwin=true ;; #( + Darwin* ) darwin=true ;; #( + MSYS* | MINGW* ) msys=true ;; #( + NONSTOP* ) nonstop=true ;; esac CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar @@ -87,9 +121,9 @@ CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar if [ -n "$JAVA_HOME" ] ; then if [ -x "$JAVA_HOME/jre/sh/java" ] ; then # IBM's JDK on AIX uses strange locations for the executables - JAVACMD="$JAVA_HOME/jre/sh/java" + JAVACMD=$JAVA_HOME/jre/sh/java else - JAVACMD="$JAVA_HOME/bin/java" + JAVACMD=$JAVA_HOME/bin/java fi if [ ! -x "$JAVACMD" ] ; then die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME @@ -98,7 +132,7 @@ Please set the JAVA_HOME variable in your environment to match the location of your Java installation." fi else - JAVACMD="java" + JAVACMD=java which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. Please set the JAVA_HOME variable in your environment to match the @@ -106,80 +140,95 @@ location of your Java installation." fi # Increase the maximum file descriptors if we can. -if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then - MAX_FD_LIMIT=`ulimit -H -n` - if [ $? -eq 0 ] ; then - if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then - MAX_FD="$MAX_FD_LIMIT" - fi - ulimit -n $MAX_FD - if [ $? -ne 0 ] ; then - warn "Could not set maximum file descriptor limit: $MAX_FD" - fi - else - warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" - fi +if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then + case $MAX_FD in #( + max*) + MAX_FD=$( ulimit -H -n ) || + warn "Could not query maximum file descriptor limit" + esac + case $MAX_FD in #( + '' | soft) :;; #( + *) + ulimit -n "$MAX_FD" || + warn "Could not set maximum file descriptor limit to $MAX_FD" + esac fi -# For Darwin, add options to specify how the application appears in the dock -if $darwin; then - GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" -fi +# Collect all arguments for the java command, stacking in reverse order: +# * args from the command line +# * the main class name +# * -classpath +# * -D...appname settings +# * --module-path (only if needed) +# * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables. # For Cygwin or MSYS, switch paths to Windows format before running java -if [ "$cygwin" = "true" -o "$msys" = "true" ] ; then - APP_HOME=`cygpath --path --mixed "$APP_HOME"` - CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` - - JAVACMD=`cygpath --unix "$JAVACMD"` - - # We build the pattern for arguments to be converted via cygpath - ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` - SEP="" - for dir in $ROOTDIRSRAW ; do - ROOTDIRS="$ROOTDIRS$SEP$dir" - SEP="|" - done - OURCYGPATTERN="(^($ROOTDIRS))" - # Add a user-defined pattern to the cygpath arguments - if [ "$GRADLE_CYGPATTERN" != "" ] ; then - OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" - fi +if "$cygwin" || "$msys" ; then + APP_HOME=$( cygpath --path --mixed "$APP_HOME" ) + CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" ) + + JAVACMD=$( cygpath --unix "$JAVACMD" ) + # Now convert the arguments - kludge to limit ourselves to /bin/sh - i=0 - for arg in "$@" ; do - CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` - CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option - - if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition - eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` - else - eval `echo args$i`="\"$arg\"" + for arg do + if + case $arg in #( + -*) false ;; # don't mess with options #( + /?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath + [ -e "$t" ] ;; #( + *) false ;; + esac + then + arg=$( cygpath --path --ignore --mixed "$arg" ) fi - i=`expr $i + 1` + # Roll the args list around exactly as many times as the number of + # args, so each arg winds up back in the position where it started, but + # possibly modified. + # + # NB: a `for` loop captures its iteration list before it begins, so + # changing the positional parameters here affects neither the number of + # iterations, nor the values presented in `arg`. + shift # remove old arg + set -- "$@" "$arg" # push replacement arg done - case $i in - 0) set -- ;; - 1) set -- "$args0" ;; - 2) set -- "$args0" "$args1" ;; - 3) set -- "$args0" "$args1" "$args2" ;; - 4) set -- "$args0" "$args1" "$args2" "$args3" ;; - 5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; - 6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; - 7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; - 8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; - 9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; - esac fi -# Escape application args -save () { - for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done - echo " " -} -APP_ARGS=`save "$@"` +# Collect all arguments for the java command; +# * $DEFAULT_JVM_OPTS, $JAVA_OPTS, and $GRADLE_OPTS can contain fragments of +# shell script including quotes and variable substitutions, so put them in +# double quotes to make sure that they get re-expanded; and +# * put everything else in single quotes, so that it's not re-expanded. + +set -- \ + "-Dorg.gradle.appname=$APP_BASE_NAME" \ + -classpath "$CLASSPATH" \ + org.gradle.wrapper.GradleWrapperMain \ + "$@" + +# Use "xargs" to parse quoted args. +# +# With -n1 it outputs one arg per line, with the quotes and backslashes removed. +# +# In Bash we could simply go: +# +# readarray ARGS < <( xargs -n1 <<<"$var" ) && +# set -- "${ARGS[@]}" "$@" +# +# but POSIX shell has neither arrays nor command substitution, so instead we +# post-process each arg (as a line of input to sed) to backslash-escape any +# character that might be a shell metacharacter, then use eval to reverse +# that process (while maintaining the separation between arguments), and wrap +# the whole thing up as a single "set" statement. +# +# This will of course break if any of these variables contains a newline or +# an unmatched quote. +# -# Collect all arguments for the java command, following the shell quoting and substitution rules -eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" +eval "set -- $( + printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" | + xargs -n1 | + sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' | + tr '\n' ' ' + )" '"$@"' exec "$JAVACMD" "$@" diff --git a/libs/cli/build.gradle b/libs/cli/build.gradle index 7f1e9cb8d04b3..bbb7bf68e2ced 100644 --- a/libs/cli/build.gradle +++ b/libs/cli/build.gradle @@ -28,7 +28,7 @@ * under the License. */ apply plugin: 'opensearch.build' -apply plugin: 'nebula.optional-base' +apply plugin: 'com.netflix.nebula.optional-base' apply plugin: 'opensearch.publish' dependencies { diff --git a/libs/core/build.gradle b/libs/core/build.gradle index 374f2fe572a12..fb8bed207dbc6 100644 --- a/libs/core/build.gradle +++ b/libs/core/build.gradle @@ -30,7 +30,7 @@ import org.opensearch.gradle.info.BuildParams -apply plugin: 'nebula.optional-base' +apply plugin: 'com.netflix.nebula.optional-base' apply plugin: 'opensearch.publish' archivesBaseName = 'opensearch-core' diff --git a/libs/grok/build.gradle b/libs/grok/build.gradle index 86414d18108a1..43a55f84b9d55 100644 --- a/libs/grok/build.gradle +++ b/libs/grok/build.gradle @@ -29,9 +29,9 @@ */ dependencies { - api 'org.jruby.joni:joni:2.1.43' + api 'org.jruby.joni:joni:2.1.44' // joni dependencies: - api 'org.jruby.jcodings:jcodings:1.0.57' + api 'org.jruby.jcodings:jcodings:1.0.58' testImplementation(project(":test:framework")) { exclude group: 'org.opensearch', module: 'opensearch-grok' diff --git a/libs/grok/licenses/jcodings-1.0.57.jar.sha1 b/libs/grok/licenses/jcodings-1.0.57.jar.sha1 deleted file mode 100644 index 1a703c2644787..0000000000000 --- a/libs/grok/licenses/jcodings-1.0.57.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -603a9ceac39cbf7f6f27fe18b2fded4714319b0a \ No newline at end of file diff --git a/libs/grok/licenses/jcodings-1.0.58.jar.sha1 b/libs/grok/licenses/jcodings-1.0.58.jar.sha1 new file mode 100644 index 0000000000000..0202d24704a50 --- /dev/null +++ b/libs/grok/licenses/jcodings-1.0.58.jar.sha1 @@ -0,0 +1 @@ +dce27159dc0382e5f7518d4f3e499fc8396357ed \ No newline at end of file diff --git a/libs/grok/licenses/joni-2.1.43.jar.sha1 b/libs/grok/licenses/joni-2.1.43.jar.sha1 deleted file mode 100644 index ef5dfabb2b391..0000000000000 --- a/libs/grok/licenses/joni-2.1.43.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9a3bf154469d5ff1d1107755904279081a5fb618 \ No newline at end of file diff --git a/libs/grok/licenses/joni-2.1.44.jar.sha1 b/libs/grok/licenses/joni-2.1.44.jar.sha1 new file mode 100644 index 0000000000000..bff9ca56f7e8c --- /dev/null +++ b/libs/grok/licenses/joni-2.1.44.jar.sha1 @@ -0,0 +1 @@ +35746c2aee04ce459a2aa8dc2d626946c5dfb051 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-core-2.14.0.jar.sha1 b/libs/x-content/licenses/jackson-core-2.14.0.jar.sha1 deleted file mode 100644 index 884034642ad39..0000000000000 --- a/libs/x-content/licenses/jackson-core-2.14.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -49d219171d6af643e061e9e1baaaf6a6a067918d \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-core-2.14.1.jar.sha1 b/libs/x-content/licenses/jackson-core-2.14.1.jar.sha1 new file mode 100644 index 0000000000000..054873b60eb21 --- /dev/null +++ b/libs/x-content/licenses/jackson-core-2.14.1.jar.sha1 @@ -0,0 +1 @@ +7a07bc535ccf0b7f6929c4d0f2ab9b294ef7c4a3 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-cbor-2.14.0.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-cbor-2.14.0.jar.sha1 deleted file mode 100644 index 695721268da6d..0000000000000 --- a/libs/x-content/licenses/jackson-dataformat-cbor-2.14.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -68831fbd18bffd2ecb0eaf3ea75c95d94cfb940d \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-cbor-2.14.1.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-cbor-2.14.1.jar.sha1 new file mode 100644 index 0000000000000..e1dcda6b33782 --- /dev/null +++ b/libs/x-content/licenses/jackson-dataformat-cbor-2.14.1.jar.sha1 @@ -0,0 +1 @@ +04e6fbcdcd2a01e4a5cb5901338cab6199c9b26b \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-smile-2.14.0.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-smile-2.14.0.jar.sha1 deleted file mode 100644 index d87b3546f4dc3..0000000000000 --- a/libs/x-content/licenses/jackson-dataformat-smile-2.14.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7646180c97a3a2f6a4e63c0725dfb96d3d15353b \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-smile-2.14.1.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-smile-2.14.1.jar.sha1 new file mode 100644 index 0000000000000..7138ebda0e78c --- /dev/null +++ b/libs/x-content/licenses/jackson-dataformat-smile-2.14.1.jar.sha1 @@ -0,0 +1 @@ +656ccecc1fc85b95d13e5b8080289fc1a5e5e21e \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-yaml-2.14.0.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-yaml-2.14.0.jar.sha1 deleted file mode 100644 index d873dd1dc2c89..0000000000000 --- a/libs/x-content/licenses/jackson-dataformat-yaml-2.14.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -06c635ef06d3e4e72a7e9868da41ffa1a0f98d28 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-yaml-2.14.1.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-yaml-2.14.1.jar.sha1 new file mode 100644 index 0000000000000..300b6920dfc8d --- /dev/null +++ b/libs/x-content/licenses/jackson-dataformat-yaml-2.14.1.jar.sha1 @@ -0,0 +1 @@ +cf6d18651659a2e64301452c841e6daa62e77bf6 \ No newline at end of file diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/URLDecodeProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/URLDecodeProcessor.java index bf80c5b064703..bb236f957a587 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/URLDecodeProcessor.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/URLDecodeProcessor.java @@ -32,8 +32,8 @@ package org.opensearch.ingest.common; -import java.io.UnsupportedEncodingException; import java.net.URLDecoder; +import java.nio.charset.StandardCharsets; import java.util.Map; /** @@ -48,11 +48,7 @@ public final class URLDecodeProcessor extends AbstractStringProcessor { } public static String apply(String value) { - try { - return URLDecoder.decode(value, "UTF-8"); - } catch (UnsupportedEncodingException e) { - throw new IllegalArgumentException("Could not URL-decode value.", e); - } + return URLDecoder.decode(value, StandardCharsets.UTF_8); } @Override diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/URLDecodeProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/URLDecodeProcessorTests.java index 81ed3c89768b7..3d68648825594 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/URLDecodeProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/URLDecodeProcessorTests.java @@ -32,13 +32,14 @@ package org.opensearch.ingest.common; -import java.io.UnsupportedEncodingException; import java.net.URLDecoder; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; public class URLDecodeProcessorTests extends AbstractStringProcessorTestCase { @Override protected String modifyInput(String input) { - return "Hello%20G%C3%BCnter" + input; + return "Hello%20G%C3%BCnter" + urlEncode(input); } @Override @@ -48,10 +49,10 @@ protected AbstractStringProcessor newProcessor(String field, boolean ign @Override protected String expectedResult(String input) { - try { - return "Hello Günter" + URLDecoder.decode(input, "UTF-8"); - } catch (UnsupportedEncodingException e) { - throw new IllegalArgumentException("invalid"); - } + return "Hello Günter" + URLDecoder.decode(urlEncode(input), StandardCharsets.UTF_8); + } + + private static String urlEncode(String s) { + return URLEncoder.encode(s, StandardCharsets.UTF_8); } } diff --git a/modules/ingest-geoip/build.gradle b/modules/ingest-geoip/build.gradle index 8c6f279c445b3..fb056192dcbec 100644 --- a/modules/ingest-geoip/build.gradle +++ b/modules/ingest-geoip/build.gradle @@ -43,7 +43,7 @@ dependencies { // geoip2 dependencies: api("com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}") api("com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}") - api('com.maxmind.db:maxmind-db:2.0.0') + api('com.maxmind.db:maxmind-db:2.1.0') testImplementation 'org.elasticsearch:geolite2-databases:20191119' } diff --git a/modules/ingest-geoip/licenses/maxmind-db-2.0.0.jar.sha1 b/modules/ingest-geoip/licenses/maxmind-db-2.0.0.jar.sha1 deleted file mode 100644 index 32c18f89c6a29..0000000000000 --- a/modules/ingest-geoip/licenses/maxmind-db-2.0.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e7e0fd82da0a160b7928ba214e699a7e6a74fff4 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/maxmind-db-2.1.0.jar.sha1 b/modules/ingest-geoip/licenses/maxmind-db-2.1.0.jar.sha1 new file mode 100644 index 0000000000000..3d9f6c443ec9f --- /dev/null +++ b/modules/ingest-geoip/licenses/maxmind-db-2.1.0.jar.sha1 @@ -0,0 +1 @@ +5fb0a7c4677ba725149ed557df9d0809d1836b80 \ No newline at end of file diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 3aa2bbb7dd2f6..9dbfd5d3fb822 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -44,9 +44,9 @@ opensearchplugin { } dependencies { - api 'com.azure:azure-core:1.33.0' + api 'com.azure:azure-core:1.34.0' api 'com.azure:azure-storage-common:12.18.1' - api 'com.azure:azure-core-http-netty:1.12.4' + api 'com.azure:azure-core-http-netty:1.12.7' api "io.netty:netty-codec-dns:${versions.netty}" api "io.netty:netty-codec-socks:${versions.netty}" api "io.netty:netty-codec-http2:${versions.netty}" diff --git a/plugins/repository-azure/licenses/azure-core-1.33.0.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.33.0.jar.sha1 deleted file mode 100644 index 9077fc4ebf84b..0000000000000 --- a/plugins/repository-azure/licenses/azure-core-1.33.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -93f105c2e923f0ab90521cc0e6e729b9c8304ad8 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-1.34.0.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.34.0.jar.sha1 new file mode 100644 index 0000000000000..df0341f5ce236 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-1.34.0.jar.sha1 @@ -0,0 +1 @@ +59827c9aeab1c67053fc598207781e56fb8709f6 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.12.4.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.12.4.jar.sha1 deleted file mode 100644 index 5cb180b20cf8b..0000000000000 --- a/plugins/repository-azure/licenses/azure-core-http-netty-1.12.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -70dcc08887f2d70a8f812bf00d4fa10390fab3fd \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.12.7.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.12.7.jar.sha1 new file mode 100644 index 0000000000000..0c6588c512e29 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-http-netty-1.12.7.jar.sha1 @@ -0,0 +1 @@ +e7739b5c0d9c968afcb6100f15f3491143d47814 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-dataformat-xml-2.14.0.jar.sha1 b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.14.0.jar.sha1 deleted file mode 100644 index 63035d2eaf788..0000000000000 --- a/plugins/repository-azure/licenses/jackson-dataformat-xml-2.14.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0e05c71419a2f88b7b27fc90cdd7fef272348719 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-dataformat-xml-2.14.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.14.1.jar.sha1 new file mode 100644 index 0000000000000..d4b883fb92650 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.14.1.jar.sha1 @@ -0,0 +1 @@ +ccd98bd674080338a6ca4bcdd52be7fb465cec1d \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.14.0.jar.sha1 b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.14.0.jar.sha1 deleted file mode 100644 index ab8f3ddcd4996..0000000000000 --- a/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.14.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -171c5831341883b1cebbbf5aafba62c0fca33b95 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.14.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.14.1.jar.sha1 new file mode 100644 index 0000000000000..4eac9019ac93c --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.14.1.jar.sha1 @@ -0,0 +1 @@ +f24e8cb1437e05149b7a3049ebd6700f42e664b1 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.14.0.jar.sha1 b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.14.0.jar.sha1 deleted file mode 100644 index 6c32e0864c70e..0000000000000 --- a/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.14.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d224162d974acebab7bb6fb7826a5fd319cebbf7 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.14.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.14.1.jar.sha1 new file mode 100644 index 0000000000000..a3f1ff40d44f1 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.14.1.jar.sha1 @@ -0,0 +1 @@ +c986d9cc542fe5ade8aaebf5f0360a563dc51762 \ No newline at end of file diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 9528537a3dd5e..561119e9e2c30 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -68,7 +68,7 @@ dependencies { api 'com.google.protobuf:protobuf-java-util:3.20.0' api 'com.google.protobuf:protobuf-java:3.21.7' api 'com.google.code.gson:gson:2.9.0' - api 'com.google.api.grpc:proto-google-common-protos:2.8.0' + api 'com.google.api.grpc:proto-google-common-protos:2.10.0' api 'com.google.api.grpc:proto-google-iam-v1:0.12.0' api 'com.google.cloud:google-cloud-core-http:1.93.3' api "com.google.auth:google-auth-library-credentials:${versions.google_auth}" @@ -76,7 +76,7 @@ dependencies { api 'com.google.oauth-client:google-oauth-client:1.33.3' api 'com.google.api-client:google-api-client:1.34.0' api 'com.google.http-client:google-http-client-appengine:1.41.8' - api 'com.google.http-client:google-http-client-jackson2:1.35.0' + api 'com.google.http-client:google-http-client-jackson2:1.42.3' api 'com.google.http-client:google-http-client-gson:1.41.4' api 'com.google.api:gax-httpjson:0.103.1' api 'io.grpc:grpc-context:1.46.0' diff --git a/plugins/repository-gcs/licenses/google-http-client-jackson2-1.35.0.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.35.0.jar.sha1 deleted file mode 100644 index 0342f57779315..0000000000000 --- a/plugins/repository-gcs/licenses/google-http-client-jackson2-1.35.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c1c2a08792b935f3345590783ada872f4a0997f1 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-jackson2-1.42.3.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.42.3.jar.sha1 new file mode 100644 index 0000000000000..34d7d49f7b147 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.42.3.jar.sha1 @@ -0,0 +1 @@ +789cafde696403b429026bf19071caf46d8c8934 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-2.10.0.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-common-protos-2.10.0.jar.sha1 new file mode 100644 index 0000000000000..bf97707836c70 --- /dev/null +++ b/plugins/repository-gcs/licenses/proto-google-common-protos-2.10.0.jar.sha1 @@ -0,0 +1 @@ +cf5ac081c05682b0eba6659dee55352fde5852e1 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-2.8.0.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-common-protos-2.8.0.jar.sha1 deleted file mode 100644 index 3f14d9e59c9e9..0000000000000 --- a/plugins/repository-gcs/licenses/proto-google-common-protos-2.8.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8adcbc3c5c3b1b7af1cf1e8a25af26a516d62a4c \ No newline at end of file diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 0fd28154fc2fb..e5d65c9451c1f 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -69,12 +69,12 @@ dependencies { api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api 'com.google.code.gson:gson:2.10' runtimeOnly 'com.google.guava:guava:31.1-jre' - api 'com.google.protobuf:protobuf-java:3.21.7' + api 'com.google.protobuf:protobuf-java:3.21.9' api "commons-logging:commons-logging:${versions.commonslogging}" api 'commons-cli:commons-cli:1.5.0' api "commons-codec:commons-codec:${versions.commonscodec}" api 'commons-collections:commons-collections:3.2.2' - api 'org.apache.commons:commons-compress:1.21' + api 'org.apache.commons:commons-compress:1.22' api 'org.apache.commons:commons-configuration2:2.8.0' api 'commons-io:commons-io:2.11.0' api 'org.apache.commons:commons-lang3:3.12.0' diff --git a/plugins/repository-hdfs/licenses/commons-compress-1.21.jar.sha1 b/plugins/repository-hdfs/licenses/commons-compress-1.21.jar.sha1 deleted file mode 100644 index 81ac609a1aa26..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-compress-1.21.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4ec95b60d4e86b5c95a0e919cb172a0af98011ef \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-compress-1.22.jar.sha1 b/plugins/repository-hdfs/licenses/commons-compress-1.22.jar.sha1 new file mode 100644 index 0000000000000..9ab7216c8050a --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-compress-1.22.jar.sha1 @@ -0,0 +1 @@ +691a8b4e6cf4248c3bc72c8b719337d5cb7359fa \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/protobuf-java-3.21.7.jar.sha1 b/plugins/repository-hdfs/licenses/protobuf-java-3.21.7.jar.sha1 deleted file mode 100644 index faa673a23ef41..0000000000000 --- a/plugins/repository-hdfs/licenses/protobuf-java-3.21.7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -96cfc7147192f1de72c3d7d06972155ffb7d180c \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/protobuf-java-3.21.9.jar.sha1 b/plugins/repository-hdfs/licenses/protobuf-java-3.21.9.jar.sha1 new file mode 100644 index 0000000000000..2e03dbe5dafd0 --- /dev/null +++ b/plugins/repository-hdfs/licenses/protobuf-java-3.21.9.jar.sha1 @@ -0,0 +1 @@ +ed1240d9231044ce6ccf1978512f6e44416bb7e7 \ No newline at end of file diff --git a/qa/full-cluster-restart/build.gradle b/qa/full-cluster-restart/build.gradle index e50ca63c3da69..82aa4cd511ef1 100644 --- a/qa/full-cluster-restart/build.gradle +++ b/qa/full-cluster-restart/build.gradle @@ -38,9 +38,6 @@ apply plugin: 'opensearch.standalone-test' apply from : "$rootDir/gradle/bwc-test.gradle" for (Version bwcVersion : BuildParams.bwcVersions.indexCompatible) { - if (bwcVersion.before('6.3.0')) { - continue; - } String baseName = "v${bwcVersion}" testClusters { diff --git a/qa/repository-multi-version/build.gradle b/qa/repository-multi-version/build.gradle index fdde0997df371..67710095d30bc 100644 --- a/qa/repository-multi-version/build.gradle +++ b/qa/repository-multi-version/build.gradle @@ -41,9 +41,6 @@ dependencies { } for (Version bwcVersion : BuildParams.bwcVersions.indexCompatible) { - if (bwcVersion.before('6.3.0')) { - continue; - } String baseName = "v${bwcVersion}" String oldClusterName = "${baseName}-old" String newClusterName = "${baseName}-new" @@ -76,28 +73,20 @@ for (Version bwcVersion : BuildParams.bwcVersions.indexCompatible) { systemProperty 'tests.rest.suite', 'step2' } - // Step 3 and Step 4 registered for versions for OpenSearch - // since the ES cluster would not be able to read snapshots from OpenSearch cluster in Step 3. - if (bwcVersion.after('7.10.2')) { - tasks.register("${baseName}#Step3OldClusterTest", StandaloneRestIntegTestTask) { + tasks.register("${baseName}#Step3OldClusterTest", StandaloneRestIntegTestTask) { useCluster testClusters."${oldClusterName}" dependsOn "${baseName}#Step2NewClusterTest" systemProperty 'tests.rest.suite', 'step3' } - tasks.register("${baseName}#Step4NewClusterTest", StandaloneRestIntegTestTask) { - useCluster testClusters."${newClusterName}" - dependsOn "${baseName}#Step3OldClusterTest" - systemProperty 'tests.rest.suite', 'step4' - } + tasks.register("${baseName}#Step4NewClusterTest", StandaloneRestIntegTestTask) { + useCluster testClusters."${newClusterName}" + dependsOn "${baseName}#Step3OldClusterTest" + systemProperty 'tests.rest.suite', 'step4' + } - tasks.register(bwcTaskName(bwcVersion)) { - dependsOn tasks.named("${baseName}#Step4NewClusterTest") - } - } else { - tasks.register(bwcTaskName(bwcVersion)) { - dependsOn tasks.named("${baseName}#Step2NewClusterTest") - } + tasks.register(bwcTaskName(bwcVersion)) { + dependsOn tasks.named("${baseName}#Step4NewClusterTest") } tasks.matching { it.name.startsWith(baseName) && it.name.endsWith("ClusterTest") }.configureEach { diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/SystemIndicesUpgradeIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/SystemIndicesUpgradeIT.java index 8bebb3881e3fd..2d26238763a09 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/SystemIndicesUpgradeIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/SystemIndicesUpgradeIT.java @@ -32,14 +32,16 @@ package org.opensearch.upgrades; -import org.opensearch.LegacyESVersion; -import org.opensearch.Version; +import org.hamcrest.MatcherAssert; import org.opensearch.client.Request; +import org.opensearch.client.Response; import org.opensearch.client.ResponseException; import org.opensearch.test.XContentTestUtils.JsonMapView; +import java.io.IOException; import java.util.Map; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; @@ -62,25 +64,7 @@ public void testSystemIndicesUpgrades() throws Exception { "{\"f1\": \"v1\", \"f2\": \"v2\"}\n"); client().performRequest(bulk); - // start a async reindex job - Request reindex = new Request("POST", "/_reindex"); - reindex.setJsonEntity( - "{\n" + - " \"source\":{\n" + - " \"index\":\"test_index_old\"\n" + - " },\n" + - " \"dest\":{\n" + - " \"index\":\"test_index_reindex\"\n" + - " }\n" + - "}"); - reindex.addParameter("wait_for_completion", "false"); - Map response = entityAsMap(client().performRequest(reindex)); - String taskId = (String) response.get("task"); - - // wait for task - Request getTask = new Request("GET", "/_tasks/" + taskId); - getTask.addParameter("wait_for_completion", "true"); - client().performRequest(getTask); + createAndVerifyStoredTask(); // make sure .tasks index exists Request getTasksIndex = new Request("GET", "/.tasks"); @@ -97,6 +81,8 @@ public void testSystemIndicesUpgrades() throws Exception { } }); } else if (CLUSTER_TYPE == ClusterType.UPGRADED) { + createAndVerifyStoredTask(); + assertBusy(() -> { Request clusterStateRequest = new Request("GET", "/_cluster/state/metadata"); Map indices = new JsonMapView(entityAsMap(client().performRequest(clusterStateRequest))) @@ -115,4 +101,29 @@ public void testSystemIndicesUpgrades() throws Exception { }); } } + + /** + * Completed tasks get persisted into the .tasks index, so this method waits + * until the task is completed in order to verify that it has been successfully + * written to the index and can be retrieved. + */ + private static void createAndVerifyStoredTask() throws Exception { + // Use update by query to create an async task + final Request updateByQueryRequest = new Request("POST", "/test_index_old/_update_by_query"); + updateByQueryRequest.addParameter("wait_for_completion", "false"); + final Response updateByQueryResponse = client().performRequest(updateByQueryRequest); + MatcherAssert.assertThat(updateByQueryResponse.getStatusLine().getStatusCode(), equalTo(200)); + final String taskId = (String) entityAsMap(updateByQueryResponse).get("task"); + + // wait for task to complete + waitUntil(() -> { + try { + final Response getTaskResponse = client().performRequest(new Request("GET", "/_tasks/" + taskId)); + MatcherAssert.assertThat(getTaskResponse.getStatusLine().getStatusCode(), equalTo(200)); + return (Boolean) entityAsMap(getTaskResponse).get("completed"); + } catch (IOException e) { + throw new AssertionError(e); + } + }); + } } diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/AwarenessAttributeDecommissionRestIT.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/AwarenessAttributeDecommissionRestIT.java new file mode 100644 index 0000000000000..4d9115b8962ea --- /dev/null +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/AwarenessAttributeDecommissionRestIT.java @@ -0,0 +1,101 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http; + +import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterPutWeightedRoutingResponse; +import org.opensearch.client.Request; +import org.opensearch.client.Response; +import org.opensearch.client.ResponseException; +import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.routing.WeightedRouting; +import org.opensearch.common.settings.Settings; +import org.opensearch.rest.RestStatus; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.opensearch.test.NodeRoles.onlyRole; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class AwarenessAttributeDecommissionRestIT extends HttpSmokeTestCase{ + + public void testRestStatusForDecommissioningFailedException() { + internalCluster().startNodes(3); + Request request = new Request("PUT", "/_cluster/decommission/awareness/zone/zone-1"); + ResponseException exception = expectThrows( + ResponseException.class, + () -> getRestClient().performRequest(request) + ); + assertEquals(exception.getResponse().getStatusLine().getStatusCode(), RestStatus.BAD_REQUEST.getStatus()); + assertTrue(exception.getMessage().contains("invalid awareness attribute requested for decommissioning")); + } + + public void testRestStatusForAcknowledgedDecommission() throws IOException { + Settings commonSettings = Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .build(); + + logger.info("--> start 3 cluster manager nodes on zones 'a' & 'b' & 'c'"); + List clusterManagerNodes = internalCluster().startNodes( + Settings.builder() + .put(commonSettings) + .put("node.attr.zone", "a") + .put(onlyRole(commonSettings, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)) + .build(), + Settings.builder() + .put(commonSettings) + .put("node.attr.zone", "b") + .put(onlyRole(commonSettings, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)) + .build(), + Settings.builder() + .put(commonSettings) + .put("node.attr.zone", "c") + .put(onlyRole(commonSettings, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)) + .build() + ); + + logger.info("--> start 3 data nodes on zones 'a' & 'b' & 'c'"); + List dataNodes = internalCluster().startNodes( + Settings.builder() + .put(commonSettings) + .put("node.attr.zone", "a") + .put(onlyRole(commonSettings, DiscoveryNodeRole.DATA_ROLE)) + .build(), + Settings.builder() + .put(commonSettings) + .put("node.attr.zone", "b") + .put(onlyRole(commonSettings, DiscoveryNodeRole.DATA_ROLE)) + .build(), + Settings.builder() + .put(commonSettings) + .put("node.attr.zone", "c") + .put(onlyRole(commonSettings, DiscoveryNodeRole.DATA_ROLE)) + .build() + ); + + ensureStableCluster(6); + logger.info("--> setting shard routing weights for weighted round robin"); + Map weights = Map.of("a", 1.0, "b", 1.0, "c", 0.0); + WeightedRouting weightedRouting = new WeightedRouting("zone", weights); + + ClusterPutWeightedRoutingResponse weightedRoutingResponse = client().admin() + .cluster() + .prepareWeightedRouting() + .setWeightedRouting(weightedRouting) + .get(); + assertTrue(weightedRoutingResponse.isAcknowledged()); + + Request request = new Request("PUT", "/_cluster/decommission/awareness/zone/c"); + Response response = getRestClient().performRequest(request); + assertEquals(response.getStatusLine().getStatusCode(), RestStatus.OK.getStatus()); + } +} diff --git a/qa/verify-version-constants/build.gradle b/qa/verify-version-constants/build.gradle index 27a3b07157d21..8b0dd20899862 100644 --- a/qa/verify-version-constants/build.gradle +++ b/qa/verify-version-constants/build.gradle @@ -38,9 +38,6 @@ apply plugin: 'opensearch.standalone-test' apply from : "$rootDir/gradle/bwc-test.gradle" for (Version bwcVersion : BuildParams.bwcVersions.indexCompatible) { - if (bwcVersion.before('6.3.0')) { - continue; - } String baseName = "v${bwcVersion}" testClusters { diff --git a/release-notes/opensearch.release-notes-1.3.7.md b/release-notes/opensearch.release-notes-1.3.7.md new file mode 100644 index 0000000000000..b8330b5bfcd7d --- /dev/null +++ b/release-notes/opensearch.release-notes-1.3.7.md @@ -0,0 +1,10 @@ +## 2022-11-30 Version 1.3.7 Release Notes + +### Upgrades +* Upgrade netty to 4.1.84.Final ([#4919](https://github.com/opensearch-project/OpenSearch/pull/4919)) +* OpenJDK Update (October 2022 Patch releases) ([#5016](https://github.com/opensearch-project/OpenSearch/pull/5016)) +* Upgrade com.netflix.nebula:nebula-publishing-plugin to 4.6.0 and gradle-docker-compose-plugin to 0.14.12 ([#5316](https://github.com/opensearch-project/OpenSearch/pull/5136)) +* Updated Jackson to 2.14.1 ([#5356](https://github.com/opensearch-project/OpenSearch/pull/5356)) + +### Bug Fixes +* Fixed error handling while reading analyzer mapping rules ([#5149](https://github.com/opensearch-project/OpenSearch/pull/5149)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/40_max_shard_size.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/40_max_shard_size.yml new file mode 100644 index 0000000000000..b5fe43edcb003 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/40_max_shard_size.yml @@ -0,0 +1,82 @@ +--- +"Shrink index with max_shard_size": + # shrink index with max_shard_size parameter, which is used to generate an optimum + # number_of_shards for the target index. + + - skip: + version: " - 2.9.99" + reason: "only available in 3.0+" + features: allowed_warnings + + - do: + nodes.info: + node_id: data:true + - set: + nodes._arbitrary_key_: node_id + + - do: + indices.create: + index: source + wait_for_active_shards: 1 + body: + settings: + # ensure everything is allocated on the same data node + index.routing.allocation.include._id: $node_id + index.number_of_shards: 3 + index.number_of_replicas: 0 + - do: + index: + index: source + id: "1" + body: { "foo": "hello world" } + + - do: + get: + index: source + id: "1" + + - match: { _index: source } + - match: { _id: "1" } + - match: { _source: { foo: "hello world" } } + + # make it read-only + - do: + indices.put_settings: + index: source + body: + index.blocks.write: true + index.number_of_replicas: 0 + + - do: + cluster.health: + wait_for_status: green + index: source + + # shrink with max_shard_size + - do: + allowed_warnings: + - "Parameter [master_timeout] is deprecated and will be removed in 3.0. To support inclusive language, please use [cluster_manager_timeout] instead." + indices.shrink: + index: "source" + target: "new_shrunken_index" + wait_for_active_shards: 1 + master_timeout: 10s + body: + settings: + index.number_of_replicas: 0 + max_shard_size: "10gb" + + - do: + cluster.health: + wait_for_status: green + + - do: + get: + index: "new_shrunken_index" + id: "1" + + - do: + indices.get_settings: + index: "new_shrunken_index" + + - match: { new_shrunken_index.settings.index.number_of_shards: "1" } diff --git a/sandbox/libs/authn/licenses/jackson-annotations-2.14.0.jar.sha1 b/sandbox/libs/authn/licenses/jackson-annotations-2.14.0.jar.sha1 deleted file mode 100644 index 575fc0a7c0bfa..0000000000000 --- a/sandbox/libs/authn/licenses/jackson-annotations-2.14.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fb7afb3c9c8ea363a9c88ea9c0a7177cf2fbd369 \ No newline at end of file diff --git a/sandbox/libs/authn/licenses/jackson-annotations-2.14.1.jar.sha1 b/sandbox/libs/authn/licenses/jackson-annotations-2.14.1.jar.sha1 new file mode 100644 index 0000000000000..e43faef9e23ff --- /dev/null +++ b/sandbox/libs/authn/licenses/jackson-annotations-2.14.1.jar.sha1 @@ -0,0 +1 @@ +2a6ad504d591a7903ffdec76b5b7252819a2d162 \ No newline at end of file diff --git a/sandbox/libs/authn/licenses/jackson-core-2.14.0.jar.sha1 b/sandbox/libs/authn/licenses/jackson-core-2.14.0.jar.sha1 deleted file mode 100644 index 884034642ad39..0000000000000 --- a/sandbox/libs/authn/licenses/jackson-core-2.14.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -49d219171d6af643e061e9e1baaaf6a6a067918d \ No newline at end of file diff --git a/sandbox/libs/authn/licenses/jackson-core-2.14.1.jar.sha1 b/sandbox/libs/authn/licenses/jackson-core-2.14.1.jar.sha1 new file mode 100644 index 0000000000000..054873b60eb21 --- /dev/null +++ b/sandbox/libs/authn/licenses/jackson-core-2.14.1.jar.sha1 @@ -0,0 +1 @@ +7a07bc535ccf0b7f6929c4d0f2ab9b294ef7c4a3 \ No newline at end of file diff --git a/sandbox/libs/authn/licenses/jackson-databind-2.14.0.jar.sha1 b/sandbox/libs/authn/licenses/jackson-databind-2.14.0.jar.sha1 deleted file mode 100644 index ec437a4d0b4f1..0000000000000 --- a/sandbox/libs/authn/licenses/jackson-databind-2.14.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -513b8ca3fea0352ceebe4d0bbeea527ab343dc1a \ No newline at end of file diff --git a/sandbox/libs/authn/licenses/jackson-databind-2.14.1.jar.sha1 b/sandbox/libs/authn/licenses/jackson-databind-2.14.1.jar.sha1 new file mode 100644 index 0000000000000..0e6726927ebac --- /dev/null +++ b/sandbox/libs/authn/licenses/jackson-databind-2.14.1.jar.sha1 @@ -0,0 +1 @@ +268524b9056cae1211b9f1f52560ef19347f4d17 \ No newline at end of file diff --git a/sandbox/libs/authn/licenses/jackson-dataformat-yaml-2.14.0.jar.sha1 b/sandbox/libs/authn/licenses/jackson-dataformat-yaml-2.14.0.jar.sha1 deleted file mode 100644 index d873dd1dc2c89..0000000000000 --- a/sandbox/libs/authn/licenses/jackson-dataformat-yaml-2.14.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -06c635ef06d3e4e72a7e9868da41ffa1a0f98d28 \ No newline at end of file diff --git a/sandbox/libs/authn/licenses/jackson-dataformat-yaml-2.14.1.jar.sha1 b/sandbox/libs/authn/licenses/jackson-dataformat-yaml-2.14.1.jar.sha1 new file mode 100644 index 0000000000000..300b6920dfc8d --- /dev/null +++ b/sandbox/libs/authn/licenses/jackson-dataformat-yaml-2.14.1.jar.sha1 @@ -0,0 +1 @@ +cf6d18651659a2e64301452c841e6daa62e77bf6 \ No newline at end of file diff --git a/server/build.gradle b/server/build.gradle index 3dc0f1d380647..62ef695a642f2 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -31,7 +31,7 @@ import org.opensearch.gradle.info.BuildParams apply plugin: 'opensearch.build' -apply plugin: 'nebula.optional-base' +apply plugin: 'com.netflix.nebula.optional-base' apply plugin: 'opensearch.publish' apply plugin: 'opensearch.internal-cluster-test' diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/ShrinkIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/ShrinkIndexIT.java index daa124fab2220..3420074a0f60b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/ShrinkIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/ShrinkIndexIT.java @@ -66,6 +66,7 @@ import org.opensearch.common.Priority; import org.opensearch.common.collect.ImmutableOpenMap; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.Index; @@ -75,8 +76,8 @@ import org.opensearch.index.seqno.SeqNoStats; import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; import java.util.Arrays; @@ -760,4 +761,72 @@ public void testShrinkThenSplitWithFailedNode() throws Exception { ); ensureGreen("splitagain"); } + + public void testCreateShrinkIndexWithMaxShardSize() { + internalCluster().ensureAtLeastNumDataNodes(2); + final String shrinkNode = internalCluster().startDataOnlyNode(); + + final int shardCount = between(2, 5); + prepareCreate("source").setSettings( + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, shardCount) + ).get(); + for (int i = 0; i < 20; i++) { + client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + } + client().admin().indices().prepareFlush("source").get(); + ensureGreen(); + + client().admin() + .indices() + .prepareUpdateSettings("source") + .setSettings( + Settings.builder() + .put(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey(), shrinkNode) + .put(IndexMetadata.SETTING_BLOCKS_WRITE, true) + ) + .get(); + ensureGreen(); + + // Cannot set max_shard_size and index.number_of_shards at the same time + IllegalArgumentException exc = expectThrows( + IllegalArgumentException.class, + () -> client().admin() + .indices() + .prepareResizeIndex("source", "target") + .setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ) + .setMaxShardSize(new ByteSizeValue(1)) + .setResizeType(ResizeType.SHRINK) + .get() + ); + assertEquals(exc.getMessage(), "Cannot set max_shard_size and index.number_of_shards at the same time!"); + + // use max_shard_size to calculate the target index's shards number + // set max_shard_size to 1 then the target index's shards number will be same with the source index's + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("source", "target") + .setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .putNull(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey()) + .build() + ) + .setMaxShardSize(new ByteSizeValue(1)) + .setResizeType(ResizeType.SHRINK) + .get() + ); + ensureGreen(); + + GetSettingsResponse target = client().admin().indices().prepareGetSettings("target").get(); + assertEquals(String.valueOf(shardCount), target.getIndexToSettings().get("target").get("index.number_of_shards")); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java b/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java new file mode 100644 index 0000000000000..f8629e2c88b07 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java @@ -0,0 +1,313 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.backpressure; + +import org.hamcrest.MatcherAssert; +import org.junit.After; +import org.junit.Before; +import org.opensearch.action.ActionListener; +import org.opensearch.action.ActionRequest; +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.ActionResponse; +import org.opensearch.action.ActionType; +import org.opensearch.action.search.SearchShardTask; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.HandledTransportAction; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.plugins.ActionPlugin; +import org.opensearch.plugins.Plugin; +import org.opensearch.search.backpressure.settings.NodeDuressSettings; +import org.opensearch.search.backpressure.settings.SearchBackpressureSettings; +import org.opensearch.search.backpressure.settings.SearchShardTaskSettings; +import org.opensearch.search.backpressure.trackers.CpuUsageTracker; +import org.opensearch.search.backpressure.trackers.ElapsedTimeTracker; +import org.opensearch.search.backpressure.trackers.HeapUsageTracker; +import org.opensearch.tasks.Task; +import org.opensearch.tasks.TaskCancelledException; +import org.opensearch.tasks.TaskId; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE) +public class SearchBackpressureIT extends OpenSearchIntegTestCase { + + private static final TimeValue TIMEOUT = new TimeValue(10, TimeUnit.SECONDS); + + @Override + protected Collection> nodePlugins() { + final List> plugins = new ArrayList<>(super.nodePlugins()); + plugins.add(TestPlugin.class); + return plugins; + } + + @Before + public final void setupNodeSettings() { + Settings request = Settings.builder() + .put(NodeDuressSettings.SETTING_CPU_THRESHOLD.getKey(), 0.0) + .put(NodeDuressSettings.SETTING_HEAP_THRESHOLD.getKey(), 0.0) + .put(NodeDuressSettings.SETTING_NUM_SUCCESSIVE_BREACHES.getKey(), 1) + .put(SearchShardTaskSettings.SETTING_TOTAL_HEAP_PERCENT_THRESHOLD.getKey(), 0.0) + .build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(request).get()); + } + + @After + public final void cleanupNodeSettings() { + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().putNull("*")) + .setTransientSettings(Settings.builder().putNull("*")) + ); + } + + public void testSearchShardTaskCancellationWithHighElapsedTime() throws InterruptedException { + Settings request = Settings.builder() + .put(SearchBackpressureSettings.SETTING_MODE.getKey(), "enforced") + .put(ElapsedTimeTracker.SETTING_ELAPSED_TIME_MILLIS_THRESHOLD.getKey(), 1000) + .build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(request).get()); + + ExceptionCatchingListener listener = new ExceptionCatchingListener(); + client().execute(TestTransportAction.ACTION, new TestRequest(RequestType.HIGH_ELAPSED_TIME), listener); + assertTrue(listener.latch.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS)); + + Exception caughtException = listener.getException(); + assertNotNull("SearchShardTask should have been cancelled with TaskCancelledException", caughtException); + MatcherAssert.assertThat(caughtException, instanceOf(TaskCancelledException.class)); + MatcherAssert.assertThat(caughtException.getMessage(), containsString("elapsed time exceeded")); + } + + public void testSearchShardTaskCancellationWithHighCpu() throws InterruptedException { + Settings request = Settings.builder() + .put(SearchBackpressureSettings.SETTING_MODE.getKey(), "enforced") + .put(CpuUsageTracker.SETTING_CPU_TIME_MILLIS_THRESHOLD.getKey(), 1000) + .build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(request).get()); + + ExceptionCatchingListener listener = new ExceptionCatchingListener(); + client().execute(TestTransportAction.ACTION, new TestRequest(RequestType.HIGH_CPU), listener); + assertTrue(listener.latch.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS)); + + Exception caughtException = listener.getException(); + assertNotNull("SearchShardTask should have been cancelled with TaskCancelledException", caughtException); + MatcherAssert.assertThat(caughtException, instanceOf(TaskCancelledException.class)); + MatcherAssert.assertThat(caughtException.getMessage(), containsString("cpu usage exceeded")); + } + + public void testSearchShardTaskCancellationWithHighHeapUsage() throws InterruptedException { + // Before SearchBackpressureService cancels a task based on its heap usage, we need to build up the heap moving average + // To build up the heap moving average, we need to hit the same node with multiple requests and then hit the same node with a + // request having higher heap usage + String node = randomFrom(internalCluster().getNodeNames()); + final int MOVING_AVERAGE_WINDOW_SIZE = 10; + Settings request = Settings.builder() + .put(SearchBackpressureSettings.SETTING_MODE.getKey(), "enforced") + .put(HeapUsageTracker.SETTING_HEAP_PERCENT_THRESHOLD.getKey(), 0.0) + .put(HeapUsageTracker.SETTING_HEAP_VARIANCE_THRESHOLD.getKey(), 1.0) + .put(HeapUsageTracker.SETTING_HEAP_MOVING_AVERAGE_WINDOW_SIZE.getKey(), MOVING_AVERAGE_WINDOW_SIZE) + .build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(request).get()); + + ExceptionCatchingListener listener = new ExceptionCatchingListener(); + for (int i = 0; i < MOVING_AVERAGE_WINDOW_SIZE; i++) { + client(node).execute(TestTransportAction.ACTION, new TestRequest(RequestType.HIGH_HEAP), listener); + } + + listener = new ExceptionCatchingListener(); + client(node).execute(TestTransportAction.ACTION, new TestRequest(RequestType.HIGHER_HEAP), listener); + assertTrue(listener.latch.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS)); + + Exception caughtException = listener.getException(); + assertNotNull("SearchShardTask should have been cancelled with TaskCancelledException", caughtException); + MatcherAssert.assertThat(caughtException, instanceOf(TaskCancelledException.class)); + MatcherAssert.assertThat(caughtException.getMessage(), containsString("heap usage exceeded")); + } + + public void testSearchCancellationWithBackpressureDisabled() throws InterruptedException { + Settings request = Settings.builder().put(SearchBackpressureSettings.SETTING_MODE.getKey(), "monitor_only").build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(request).get()); + + ExceptionCatchingListener listener = new ExceptionCatchingListener(); + client().execute(TestTransportAction.ACTION, new TestRequest(RequestType.HIGH_ELAPSED_TIME), listener); + // waiting for the TIMEOUT * 3 time for the request to complete and the latch to countdown. + assertTrue( + "SearchShardTask should have been completed by now and countdown the latch", + listener.latch.await(TIMEOUT.getSeconds() * 3, TimeUnit.SECONDS) + ); + + Exception caughtException = listener.getException(); + assertNull("SearchShardTask shouldn't have cancelled for monitor_only mode", caughtException); + } + + private static class ExceptionCatchingListener implements ActionListener { + private final CountDownLatch latch; + private Exception exception = null; + + public ExceptionCatchingListener() { + this.latch = new CountDownLatch(1); + } + + @Override + public void onResponse(TestResponse r) { + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + this.exception = e; + latch.countDown(); + } + + private Exception getException() { + return exception; + } + } + + enum RequestType { + HIGH_CPU, + HIGH_HEAP, + HIGHER_HEAP, + HIGH_ELAPSED_TIME; + } + + public static class TestRequest extends ActionRequest { + private final RequestType type; + + public TestRequest(RequestType type) { + this.type = type; + } + + public TestRequest(StreamInput in) throws IOException { + super(in); + this.type = in.readEnum(RequestType.class); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new SearchShardTask(id, type, action, "", parentTaskId, headers); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeEnum(type); + } + + public RequestType getType() { + return this.type; + } + } + + public static class TestResponse extends ActionResponse { + public TestResponse() {} + + public TestResponse(StreamInput in) {} + + @Override + public void writeTo(StreamOutput out) throws IOException {} + } + + public static class TestTransportAction extends HandledTransportAction { + public static final ActionType ACTION = new ActionType<>("internal::test_action", TestResponse::new); + private final ThreadPool threadPool; + + @Inject + public TestTransportAction(TransportService transportService, ThreadPool threadPool, ActionFilters actionFilters) { + super(ACTION.name(), transportService, actionFilters, TestRequest::new); + this.threadPool = threadPool; + } + + @Override + protected void doExecute(Task task, TestRequest request, ActionListener listener) { + threadPool.executor(ThreadPool.Names.SEARCH).execute(() -> { + try { + SearchShardTask searchShardTask = (SearchShardTask) task; + long startTime = System.nanoTime(); + + // Doing a busy-wait until task cancellation or timeout. + // We are running HIGH_HEAP requests to build up heap moving average and not expect it to get cancelled. + do { + doWork(request); + } while (request.type != RequestType.HIGH_HEAP + && searchShardTask.isCancelled() == false + && (System.nanoTime() - startTime) < TIMEOUT.getNanos()); + + if (searchShardTask.isCancelled()) { + throw new TaskCancelledException(searchShardTask.getReasonCancelled()); + } else { + listener.onResponse(new TestResponse()); + } + } catch (Exception e) { + listener.onFailure(e); + } + }); + } + + private void doWork(TestRequest request) throws InterruptedException { + switch (request.getType()) { + case HIGH_CPU: + long i = 0, j = 1, k = 1, iterations = 1000; + do { + j += i; + k *= j; + i++; + } while (i < iterations); + break; + case HIGH_HEAP: + Byte[] bytes = new Byte[100000]; + int[] ints = new int[1000]; + break; + case HIGHER_HEAP: + Byte[] more_bytes = new Byte[1000000]; + int[] more_ints = new int[10000]; + break; + case HIGH_ELAPSED_TIME: + Thread.sleep(100); + break; + } + } + } + + public static class TestPlugin extends Plugin implements ActionPlugin { + @Override + public List> getActions() { + return Collections.singletonList(new ActionHandler<>(TestTransportAction.ACTION, TestTransportAction.class)); + } + + @Override + public List> getClientActions() { + return Collections.singletonList(TestTransportAction.ACTION); + } + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java index 17c32bb407bc3..53b70aa915a37 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java @@ -9,6 +9,7 @@ import org.junit.BeforeClass; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.opensearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequestBuilder; import org.opensearch.action.index.IndexRequestBuilder; @@ -27,6 +28,7 @@ import org.opensearch.index.Index; import org.opensearch.index.IndexNotFoundException; import org.opensearch.monitor.fs.FsInfo; +import org.opensearch.repositories.fs.FsRepository; import java.nio.file.Files; import java.nio.file.Path; @@ -74,23 +76,30 @@ private Settings.Builder chunkedRepositorySettings() { * Ensures availability of sufficient data nodes and search capable nodes. */ public void testCreateSearchableSnapshot() throws Exception { + final String snapshotName = "test-snap"; + final String repoName = "test-repo"; + final String indexName1 = "test-idx-1"; + final String restoredIndexName1 = indexName1 + "-copy"; + final String indexName2 = "test-idx-2"; + final String restoredIndexName2 = indexName2 + "-copy"; final int numReplicasIndex1 = randomIntBetween(1, 4); final int numReplicasIndex2 = randomIntBetween(0, 2); final Client client = client(); internalCluster().ensureAtLeastNumDataNodes(Math.max(numReplicasIndex1, numReplicasIndex2) + 1); - createIndexWithDocsAndEnsureGreen(numReplicasIndex1, 100, "test-idx-1"); - createIndexWithDocsAndEnsureGreen(numReplicasIndex2, 100, "test-idx-2"); + createIndexWithDocsAndEnsureGreen(numReplicasIndex1, 100, indexName1); + createIndexWithDocsAndEnsureGreen(numReplicasIndex2, 100, indexName2); - takeSnapshot(client, "test-idx-1", "test-idx-2"); - deleteIndicesAndEnsureGreen(client, "test-idx-1", "test-idx-2"); + createRepositoryWithSettings(null, repoName); + takeSnapshot(client, snapshotName, repoName, indexName1, indexName2); + deleteIndicesAndEnsureGreen(client, indexName1, indexName2); internalCluster().ensureAtLeastNumSearchNodes(Math.max(numReplicasIndex1, numReplicasIndex2) + 1); - restoreSnapshotAndEnsureGreen(client); + restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); - assertDocCount("test-idx-1-copy", 100L); - assertDocCount("test-idx-2-copy", 100L); - assertIndexDirectoryDoesNotExist("test-idx-1-copy", "test-idx-2-copy"); + assertDocCount(restoredIndexName1, 100L); + assertDocCount(restoredIndexName2, 100L); + assertIndexDirectoryDoesNotExist(restoredIndexName1, restoredIndexName2); } /** @@ -101,16 +110,19 @@ public void testCreateSearchableSnapshotWithChunks() throws Exception { final int numReplicasIndex = randomIntBetween(1, 4); final String indexName = "test-idx"; final String restoredIndexName = indexName + "-copy"; + final String repoName = "test-repo"; + final String snapshotName = "test-snap"; final Client client = client(); Settings.Builder repositorySettings = chunkedRepositorySettings(); internalCluster().ensureAtLeastNumSearchAndDataNodes(numReplicasIndex + 1); createIndexWithDocsAndEnsureGreen(numReplicasIndex, 1000, indexName); - takeSnapshot(client, repositorySettings, indexName); + createRepositoryWithSettings(repositorySettings, repoName); + takeSnapshot(client, snapshotName, repoName, indexName); deleteIndicesAndEnsureGreen(client, indexName); - restoreSnapshotAndEnsureGreen(client); + restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); assertDocCount(restoredIndexName, 1000L); } @@ -124,13 +136,16 @@ public void testSearchableSnapshotAllocationForLocalAndRemoteShardsOnSameNode() final int numReplicasIndex = randomIntBetween(1, 4); final String indexName = "test-idx"; final String restoredIndexName = indexName + "-copy"; + final String repoName = "test-repo"; + final String snapshotName = "test-snap"; final Client client = client(); internalCluster().ensureAtLeastNumSearchAndDataNodes(numReplicasIndex + 1); createIndexWithDocsAndEnsureGreen(numReplicasIndex, 100, indexName); - takeSnapshot(client, indexName); + createRepositoryWithSettings(null, repoName); + takeSnapshot(client, snapshotName, repoName, indexName); - restoreSnapshotAndEnsureGreen(client); + restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); assertDocCount(restoredIndexName, 100L); assertDocCount(indexName, 100L); @@ -145,16 +160,19 @@ public void testSearchableSnapshotAllocationForFailoverAndRecovery() throws Exce final int numReplicasIndex = 1; final String indexName = "test-idx"; final String restoredIndexName = indexName + "-copy"; + final String repoName = "test-repo"; + final String snapshotName = "test-snap"; final Client client = client(); internalCluster().ensureAtLeastNumDataNodes(numReplicasIndex + 1); createIndexWithDocsAndEnsureGreen(numReplicasIndex, 100, indexName); - takeSnapshot(client, indexName); + createRepositoryWithSettings(null, repoName); + takeSnapshot(client, snapshotName, repoName, indexName); deleteIndicesAndEnsureGreen(client, indexName); internalCluster().ensureAtLeastNumSearchNodes(numReplicasIndex + 1); - restoreSnapshotAndEnsureGreen(client); + restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); assertDocCount(restoredIndexName, 100L); logger.info("--> stop a random search node"); @@ -183,14 +201,17 @@ public void testSearchableSnapshotAllocationForFailoverAndRecovery() throws Exce public void testSearchableSnapshotIndexIsReadOnly() throws Exception { final String indexName = "test-index"; final String restoredIndexName = indexName + "-copy"; + final String repoName = "test-repo"; + final String snapshotName = "test-snap"; final Client client = client(); createIndexWithDocsAndEnsureGreen(0, 100, indexName); - takeSnapshot(client, indexName); + createRepositoryWithSettings(null, repoName); + takeSnapshot(client, snapshotName, repoName, indexName); deleteIndicesAndEnsureGreen(client, indexName); internalCluster().ensureAtLeastNumSearchNodes(1); - restoreSnapshotAndEnsureGreen(client); + restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); assertIndexingBlocked(restoredIndexName); assertIndexSettingChangeBlocked(restoredIndexName); @@ -202,6 +223,39 @@ public void testSearchableSnapshotIndexIsReadOnly() throws Exception { ); } + public void testDeleteSearchableSnapshotBackingIndexThrowsException() throws Exception { + final String indexName = "test-index"; + final Client client = client(); + final String repoName = "test-repo"; + final String snapshotName = "test-snap"; + createRepositoryWithSettings(null, repoName); + createIndexWithDocsAndEnsureGreen(0, 100, indexName); + takeSnapshot(client, snapshotName, repoName, indexName); + internalCluster().ensureAtLeastNumSearchNodes(1); + restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); + assertThrows( + SnapshotInUseDeletionException.class, + () -> client().admin().cluster().deleteSnapshot(new DeleteSnapshotRequest(repoName, snapshotName)).actionGet() + ); + } + + public void testDeleteSearchableSnapshotBackingIndex() throws Exception { + final String indexName1 = "test-index1"; + final String indexName2 = "test-index2"; + final Client client = client(); + final String repoName = "test-repo"; + final String snapshotName1 = "test-snapshot1"; + final String snapshotName2 = "test-snap"; + createRepositoryWithSettings(null, repoName); + createIndexWithDocsAndEnsureGreen(0, 100, indexName1); + createIndexWithDocsAndEnsureGreen(0, 100, indexName2); + takeSnapshot(client, snapshotName1, repoName, indexName1); + takeSnapshot(client, snapshotName2, repoName, indexName2); + internalCluster().ensureAtLeastNumSearchNodes(1); + restoreSnapshotAndEnsureGreen(client, snapshotName2, repoName); + client().admin().cluster().deleteSnapshot(new DeleteSnapshotRequest(repoName, snapshotName1)).actionGet(); + } + private void createIndexWithDocsAndEnsureGreen(int numReplicasIndex, int numOfDocs, String indexName) throws InterruptedException { createIndex( indexName, @@ -216,21 +270,11 @@ private void createIndexWithDocsAndEnsureGreen(int numReplicasIndex, int numOfDo ensureGreen(); } - private void takeSnapshot(Client client, String... indices) { - takeSnapshot(client, null, indices); - } - - private void takeSnapshot(Client client, Settings.Builder repositorySettings, String... indices) { - logger.info("--> Create a repository"); - if (repositorySettings == null) { - createRepository("test-repo", "fs"); - } else { - createRepository("test-repo", "fs", repositorySettings); - } + private void takeSnapshot(Client client, String snapshotName, String repoName, String... indices) { logger.info("--> Take a snapshot"); final CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot("test-repo", "test-snap") + .prepareCreateSnapshot(repoName, snapshotName) .setWaitForCompletion(true) .setIndices(indices) .get(); @@ -242,16 +286,25 @@ private void takeSnapshot(Client client, Settings.Builder repositorySettings, St ); } + private void createRepositoryWithSettings(Settings.Builder repositorySettings, String repoName) { + logger.info("--> Create a repository"); + if (repositorySettings == null) { + createRepository(repoName, FsRepository.TYPE); + } else { + createRepository(repoName, FsRepository.TYPE, repositorySettings); + } + } + private void deleteIndicesAndEnsureGreen(Client client, String... indices) { assertTrue(client.admin().indices().prepareDelete(indices).get().isAcknowledged()); ensureGreen(); } - private void restoreSnapshotAndEnsureGreen(Client client) { + private void restoreSnapshotAndEnsureGreen(Client client, String snapshotName, String repoName) { logger.info("--> restore indices as 'remote_snapshot'"); client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(repoName, snapshotName) .setRenamePattern("(.+)") .setRenameReplacement("$1-copy") .setStorageType(RestoreSnapshotRequest.StorageType.REMOTE_SNAPSHOT) diff --git a/server/src/main/java/org/opensearch/ExceptionsHelper.java b/server/src/main/java/org/opensearch/ExceptionsHelper.java index fbfc9beaea468..35e9d23c3502b 100644 --- a/server/src/main/java/org/opensearch/ExceptionsHelper.java +++ b/server/src/main/java/org/opensearch/ExceptionsHelper.java @@ -40,6 +40,7 @@ import org.apache.lucene.index.IndexFormatTooOldException; import org.opensearch.action.ShardOperationFailedException; import org.opensearch.common.Nullable; +import org.opensearch.common.compress.NotXContentException; import org.opensearch.common.util.concurrent.OpenSearchRejectedExecutionException; import org.opensearch.index.Index; import org.opensearch.rest.RestStatus; @@ -94,6 +95,8 @@ public static RestStatus status(Throwable t) { return RestStatus.BAD_REQUEST; } else if (t instanceof OpenSearchRejectedExecutionException) { return RestStatus.TOO_MANY_REQUESTS; + } else if (t instanceof NotXContentException) { + return RestStatus.BAD_REQUEST; } } return RestStatus.INTERNAL_SERVER_ERROR; diff --git a/server/src/main/java/org/opensearch/OpenSearchException.java b/server/src/main/java/org/opensearch/OpenSearchException.java index 4e667d0a9f3a5..aef098403ec2b 100644 --- a/server/src/main/java/org/opensearch/OpenSearchException.java +++ b/server/src/main/java/org/opensearch/OpenSearchException.java @@ -51,6 +51,7 @@ import org.opensearch.index.shard.ShardId; import org.opensearch.rest.RestStatus; import org.opensearch.search.aggregations.MultiBucketConsumerService; +import org.opensearch.snapshots.SnapshotInUseDeletionException; import org.opensearch.transport.TcpTransport; import java.io.IOException; @@ -1611,6 +1612,12 @@ private enum OpenSearchExceptionHandle { ClusterManagerThrottlingException::new, 165, Version.V_2_4_0 + ), + SNAPSHOT_IN_USE_DELETION_EXCEPTION( + SnapshotInUseDeletionException.class, + SnapshotInUseDeletionException::new, + 166, + UNKNOWN_VERSION_ADDED ); final Class exceptionClass; diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index a5f181e0bfbf2..cef8ab1320342 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -67,27 +67,32 @@ public class Version implements Comparable, ToXContentFragment { * All listed versions MUST be released versions, except the last major, the last minor and the last revison. ONLY those are required * as unreleased versions. * - * Example: assume the last release is 7.3.0 - * The unreleased last major is the next major release, e.g. _8_.0.0 - * The unreleased last minor is the current major with a upped minor: 7._4_.0 - * The unreleased revision is the very release with a upped revision 7.3._1_ + * Example: assume the last release is 2.4.0 + * The unreleased last major is the next major release, e.g. _3_.0.0 + * The unreleased last minor is the current major with a upped minor: 2._5_.0 + * The unreleased revision is the very release with a upped revision 2.4._1_ */ public static final int V_EMPTY_ID = 0; public static final Version V_EMPTY = new Version(V_EMPTY_ID, org.apache.lucene.util.Version.LATEST); + // RELEASED public static final Version V_2_0_0 = new Version(2000099, org.apache.lucene.util.Version.LUCENE_9_1_0); public static final Version V_2_0_1 = new Version(2000199, org.apache.lucene.util.Version.LUCENE_9_1_0); - public static final Version V_2_0_2 = new Version(2000299, org.apache.lucene.util.Version.LUCENE_9_1_0); public static final Version V_2_1_0 = new Version(2010099, org.apache.lucene.util.Version.LUCENE_9_2_0); - public static final Version V_2_1_1 = new Version(2010199, org.apache.lucene.util.Version.LUCENE_9_2_0); public static final Version V_2_2_0 = new Version(2020099, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_2_2_1 = new Version(2020199, org.apache.lucene.util.Version.LUCENE_9_3_0); - public static final Version V_2_2_2 = new Version(2020299, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_2_3_0 = new Version(2030099, org.apache.lucene.util.Version.LUCENE_9_3_0); - public static final Version V_2_3_1 = new Version(2030199, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_2_4_0 = new Version(2040099, org.apache.lucene.util.Version.LUCENE_9_4_1); - public static final Version V_2_4_1 = new Version(2040199, org.apache.lucene.util.Version.LUCENE_9_4_1); - public static final Version V_2_5_0 = new Version(2050099, org.apache.lucene.util.Version.LUCENE_9_4_1); + + // UNRELEASED + public static final Version V_2_4_1 = new Version( + 2040199, + org.apache.lucene.util.Version.fromBits(9, 4, 2) /** needs updated 9.5.0 snapshots */ + ); + public static final Version V_2_5_0 = new Version( + 2050099, + org.apache.lucene.util.Version.fromBits(9, 4, 2) /** needs updated 9.5.0 snapshots */ + ); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_5_0); public static final Version CURRENT = V_3_0_0; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java index f5d9528422b58..f83431994a649 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java @@ -31,6 +31,7 @@ package org.opensearch.action.admin.indices.shrink; +import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.admin.indices.alias.Alias; @@ -46,6 +47,7 @@ import org.opensearch.common.xcontent.ToXContentObject; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.common.unit.ByteSizeValue; import java.io.IOException; import java.util.Objects; @@ -60,6 +62,8 @@ public class ResizeRequest extends AcknowledgedRequest implements IndicesRequest, ToXContentObject { public static final ObjectParser PARSER = new ObjectParser<>("resize_request"); + private static final ParseField MAX_SHARD_SIZE = new ParseField("max_shard_size"); + static { PARSER.declareField( (parser, request, context) -> request.getTargetIndexRequest().settings(parser.map()), @@ -71,12 +75,19 @@ public class ResizeRequest extends AcknowledgedRequest implements new ParseField("aliases"), ObjectParser.ValueType.OBJECT ); + PARSER.declareField( + ResizeRequest::setMaxShardSize, + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_SHARD_SIZE.getPreferredName()), + MAX_SHARD_SIZE, + ObjectParser.ValueType.STRING + ); } private CreateIndexRequest targetIndexRequest; private String sourceIndex; private ResizeType type = ResizeType.SHRINK; private Boolean copySettings = true; + private ByteSizeValue maxShardSize; public ResizeRequest(StreamInput in) throws IOException { super(in); @@ -84,6 +95,9 @@ public ResizeRequest(StreamInput in) throws IOException { sourceIndex = in.readString(); type = in.readEnum(ResizeType.class); copySettings = in.readOptionalBoolean(); + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + maxShardSize = in.readOptionalWriteable(ByteSizeValue::new); + } } ResizeRequest() {} @@ -108,6 +122,9 @@ public ActionRequestValidationException validate() { if (type == ResizeType.SPLIT && IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexRequest.settings()) == false) { validationException = addValidationError("index.number_of_shards is required for split operations", validationException); } + if (maxShardSize != null && maxShardSize.getBytes() <= 0) { + validationException = addValidationError("max_shard_size must be greater than 0", validationException); + } assert copySettings == null || copySettings; return validationException; } @@ -123,6 +140,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(sourceIndex); out.writeEnum(type); out.writeOptionalBoolean(copySettings); + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeOptionalWriteable(maxShardSize); + } } @Override @@ -205,6 +225,24 @@ public Boolean getCopySettings() { return copySettings; } + /** + * Sets the maximum size of a primary shard in the new shrunken index. + * This parameter can be used to calculate the lowest factor of the source index's shards number + * which satisfies the maximum shard size requirement. + * + * @param maxShardSize the maximum size of a primary shard in the new shrunken index + */ + public void setMaxShardSize(ByteSizeValue maxShardSize) { + this.maxShardSize = maxShardSize; + } + + /** + * Returns the maximum size of a primary shard in the new shrunken index. + */ + public ByteSizeValue getMaxShardSize() { + return maxShardSize; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -221,6 +259,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } } builder.endObject(); + if (maxShardSize != null) { + builder.field(MAX_SHARD_SIZE.getPreferredName(), maxShardSize); + } } builder.endObject(); return builder; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequestBuilder.java index 418e83a5431ec..eb05c0a69b78b 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequestBuilder.java @@ -37,6 +37,7 @@ import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.ByteSizeValue; /** * Transport request builder for resizing an index @@ -95,4 +96,12 @@ public ResizeRequestBuilder setResizeType(ResizeType type) { this.request.setResizeType(type); return this; } + + /** + * Sets the maximum size of a primary shard in the new shrunken index. + */ + public ResizeRequestBuilder setMaxShardSize(ByteSizeValue maxShardSize) { + this.request.setMaxShardSize(maxShardSize); + return this; + } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java index ba079aeb03921..7f55e5efe801b 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java @@ -57,6 +57,8 @@ import org.opensearch.index.shard.ShardId; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; +import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.index.store.StoreStats; import java.io.IOException; import java.util.Locale; @@ -141,11 +143,12 @@ protected void clusterManagerOperation( .prepareStats(sourceIndex) .clear() .setDocs(true) + .setStore(true) .execute(ActionListener.delegateFailure(listener, (delegatedListener, indicesStatsResponse) -> { CreateIndexClusterStateUpdateRequest updateRequest = prepareCreateIndexRequest(resizeRequest, state, i -> { IndexShardStats shard = indicesStatsResponse.getIndex(sourceIndex).getIndexShards().get(i); return shard == null ? null : shard.getPrimary().getDocs(); - }, sourceIndex, targetIndex); + }, indicesStatsResponse.getPrimaries().store, sourceIndex, targetIndex); createIndexService.createIndex( updateRequest, ActionListener.map( @@ -162,6 +165,7 @@ static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest( final ResizeRequest resizeRequest, final ClusterState state, final IntFunction perShardDocStats, + final StoreStats primaryShardsStoreStats, String sourceIndexName, String targetIndexName ) { @@ -176,12 +180,22 @@ static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest( targetIndexSettingsBuilder.remove(IndexMetadata.SETTING_HISTORY_UUID); final Settings targetIndexSettings = targetIndexSettingsBuilder.build(); final int numShards; + + // max_shard_size is only supported for shrink + ByteSizeValue maxShardSize = resizeRequest.getMaxShardSize(); + if (resizeRequest.getResizeType() != ResizeType.SHRINK && maxShardSize != null) { + throw new IllegalArgumentException("Unsupported parameter [max_shard_size]"); + } + if (IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexSettings)) { numShards = IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings); + if (resizeRequest.getResizeType() == ResizeType.SHRINK && maxShardSize != null) { + throw new IllegalArgumentException("Cannot set max_shard_size and index.number_of_shards at the same time!"); + } } else { assert resizeRequest.getResizeType() != ResizeType.SPLIT : "split must specify the number of shards explicitly"; if (resizeRequest.getResizeType() == ResizeType.SHRINK) { - numShards = 1; + numShards = calculateTargetIndexShardsNum(maxShardSize, primaryShardsStoreStats, metadata); } else { assert resizeRequest.getResizeType() == ResizeType.CLONE; numShards = metadata.getNumberOfShards(); @@ -250,6 +264,46 @@ static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest( .copySettings(resizeRequest.getCopySettings() == null ? false : resizeRequest.getCopySettings()); } + /** + * Calculate target index's shards count according to max_shard_ize and the source index's storage(only primary shards included) + * for shrink. Target index's shards count is the lowest factor of the source index's primary shards count which satisfies the + * maximum shard size requirement. If max_shard_size is less than the source index's single shard size, then target index's shards count + * will be equal to the source index's shards count. + * @param maxShardSize the maximum size of a primary shard in the target index + * @param sourceIndexShardStoreStats primary shards' store stats of the source index + * @param sourceIndexMetaData source index's metadata + * @return target index's shards number + */ + protected static int calculateTargetIndexShardsNum( + ByteSizeValue maxShardSize, + StoreStats sourceIndexShardStoreStats, + IndexMetadata sourceIndexMetaData + ) { + if (maxShardSize == null + || sourceIndexShardStoreStats == null + || maxShardSize.getBytes() == 0 + || sourceIndexShardStoreStats.getSizeInBytes() == 0) { + return 1; + } + + int sourceIndexShardsNum = sourceIndexMetaData.getNumberOfShards(); + // calculate the minimum shards count according to source index's storage, ceiling ensures that the minimum shards count is never + // less than 1 + int minValue = (int) Math.ceil((double) sourceIndexShardStoreStats.getSizeInBytes() / maxShardSize.getBytes()); + // if minimum shards count is greater than the source index's shards count, then the source index's shards count will be returned + if (minValue >= sourceIndexShardsNum) { + return sourceIndexShardsNum; + } + + // find the lowest factor of the source index's shards count here, because minimum shards count may not be a factor + for (int i = minValue; i < sourceIndexShardsNum; i++) { + if (sourceIndexShardsNum % i == 0) { + return i; + } + } + return sourceIndexShardsNum; + } + @Override protected String getClusterManagerActionName(DiscoveryNode node) { return super.getClusterManagerActionName(node); diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkRequestParser.java b/server/src/main/java/org/opensearch/action/bulk/BulkRequestParser.java index 212450515b57e..af0408453e652 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkRequestParser.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkRequestParser.java @@ -53,6 +53,7 @@ import java.io.IOException; import java.util.HashMap; import java.util.Map; +import java.util.Set; import java.util.function.Consumer; import java.util.function.Function; @@ -78,6 +79,8 @@ public final class BulkRequestParser { private static final ParseField IF_PRIMARY_TERM = new ParseField("if_primary_term"); private static final ParseField REQUIRE_ALIAS = new ParseField(DocWriteRequest.REQUIRE_ALIAS); + private static final Set VALID_ACTIONS = Set.of("create", "delete", "index", "update"); + private static int findNextMarker(byte marker, int from, BytesReference data) { final int res = data.indexOf(marker, from); if (res != -1) { @@ -177,6 +180,15 @@ public void parse( ); } String action = parser.currentName(); + if (action == null || VALID_ACTIONS.contains(action) == false) { + throw new IllegalArgumentException( + "Malformed action/metadata line [" + + line + + "], expected one of [create, delete, index, update] but found [" + + action + + "]" + ); + } String index = defaultIndex; String id = null; diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissioningFailedException.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissioningFailedException.java index fe1b9368ac712..9d1325ccf4912 100644 --- a/server/src/main/java/org/opensearch/cluster/decommission/DecommissioningFailedException.java +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissioningFailedException.java @@ -11,6 +11,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.rest.RestStatus; import java.io.IOException; @@ -52,4 +53,9 @@ public void writeTo(StreamOutput out) throws IOException { public DecommissionAttribute decommissionAttribute() { return decommissionAttribute; } + + @Override + public RestStatus status() { + return RestStatus.BAD_REQUEST; + } } diff --git a/server/src/main/java/org/opensearch/index/translog/BufferedChecksumStreamInput.java b/server/src/main/java/org/opensearch/index/translog/BufferedChecksumStreamInput.java index 5feb994171b65..f299da0c1ac1e 100644 --- a/server/src/main/java/org/opensearch/index/translog/BufferedChecksumStreamInput.java +++ b/server/src/main/java/org/opensearch/index/translog/BufferedChecksumStreamInput.java @@ -36,6 +36,7 @@ import org.opensearch.common.io.stream.FilterStreamInput; import org.opensearch.common.io.stream.StreamInput; +import java.io.EOFException; import java.io.IOException; import java.util.zip.CRC32; import java.util.zip.Checksum; @@ -117,7 +118,11 @@ public void reset() throws IOException { @Override public int read() throws IOException { - return readByte() & 0xFF; + try { + return readByte() & 0xFF; + } catch (EOFException e) { + return -1; + } } @Override diff --git a/server/src/main/java/org/opensearch/index/translog/Checkpoint.java b/server/src/main/java/org/opensearch/index/translog/Checkpoint.java index ade28791b2e27..8df574ed8374f 100644 --- a/server/src/main/java/org/opensearch/index/translog/Checkpoint.java +++ b/server/src/main/java/org/opensearch/index/translog/Checkpoint.java @@ -59,7 +59,7 @@ * * @opensearch.internal */ -final class Checkpoint { +final public class Checkpoint { final long offset; final int numOps; @@ -262,6 +262,14 @@ public synchronized byte[] toByteArray() { return byteOutputStream.toByteArray(); } + public long getMinTranslogGeneration() { + return minTranslogGeneration; + } + + public long getGeneration() { + return generation; + } + @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogReader.java b/server/src/main/java/org/opensearch/index/translog/TranslogReader.java index 9d22fe0a498eb..205229949da77 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogReader.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogReader.java @@ -138,7 +138,7 @@ public int totalOperations() { } @Override - final Checkpoint getCheckpoint() { + final public Checkpoint getCheckpoint() { return checkpoint; } diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java b/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java new file mode 100644 index 0000000000000..36d9d71217837 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java @@ -0,0 +1,71 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog.transfer; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.action.ActionListener; +import org.opensearch.action.ActionRunnable; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.BlobStore; +import org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; + +import java.io.IOException; +import java.io.InputStream; +import java.util.concurrent.ExecutorService; + +/** + * Service that handles remote transfer of translog and checkpoint files + * + * @opensearch.internal + */ +public class BlobStoreTransferService implements TransferService { + + private final BlobStore blobStore; + private final ExecutorService executorService; + + private static final Logger logger = LogManager.getLogger(BlobStoreTransferService.class); + + public BlobStoreTransferService(BlobStore blobStore, ExecutorService executorService) { + this.blobStore = blobStore; + this.executorService = executorService; + } + + @Override + public void uploadBlobAsync( + final TransferFileSnapshot fileSnapshot, + Iterable remoteTransferPath, + ActionListener listener + ) { + assert remoteTransferPath instanceof BlobPath; + BlobPath blobPath = (BlobPath) remoteTransferPath; + executorService.execute(ActionRunnable.wrap(listener, l -> { + try (InputStream inputStream = fileSnapshot.inputStream()) { + blobStore.blobContainer(blobPath) + .writeBlobAtomic(fileSnapshot.getName(), inputStream, fileSnapshot.getContentLength(), true); + l.onResponse(fileSnapshot); + } catch (Exception e) { + logger.error(() -> new ParameterizedMessage("Failed to upload blob {}", fileSnapshot.getName()), e); + l.onFailure(new FileTransferException(fileSnapshot, e)); + } + })); + } + + @Override + public void uploadBlob(final TransferFileSnapshot fileSnapshot, Iterable remoteTransferPath) throws IOException { + assert remoteTransferPath instanceof BlobPath; + BlobPath blobPath = (BlobPath) remoteTransferPath; + try (InputStream inputStream = fileSnapshot.inputStream()) { + blobStore.blobContainer(blobPath).writeBlobAtomic(fileSnapshot.getName(), inputStream, fileSnapshot.getContentLength(), true); + } catch (Exception ex) { + throw ex; + } + } +} diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/FileSnapshot.java b/server/src/main/java/org/opensearch/index/translog/transfer/FileSnapshot.java new file mode 100644 index 0000000000000..e8c06e3d251c7 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/transfer/FileSnapshot.java @@ -0,0 +1,223 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog.transfer; + +import org.opensearch.common.Nullable; +import org.opensearch.common.io.stream.BytesStreamInput; +import org.opensearch.common.io.stream.InputStreamStreamInput; +import org.opensearch.core.internal.io.IOUtils; +import org.opensearch.index.translog.BufferedChecksumStreamInput; + +import java.io.Closeable; +import java.io.IOException; +import java.io.InputStream; +import java.nio.channels.Channels; +import java.nio.channels.FileChannel; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.Objects; + +/** + * Snapshot of a single file that gets transferred + * + * @opensearch.internal + */ +public class FileSnapshot implements Closeable { + + private final String name; + @Nullable + private final FileChannel fileChannel; + @Nullable + private Path path; + @Nullable + private byte[] content; + + private FileSnapshot(Path path) throws IOException { + Objects.requireNonNull(path); + this.name = path.getFileName().toString(); + this.path = path; + this.fileChannel = FileChannel.open(path, StandardOpenOption.READ); + } + + private FileSnapshot(String name, byte[] content) { + Objects.requireNonNull(name); + this.name = name; + this.content = content; + this.fileChannel = null; + } + + public Path getPath() { + return path; + } + + public String getName() { + return name; + } + + public long getContentLength() throws IOException { + return fileChannel == null ? content.length : fileChannel.size(); + } + + public InputStream inputStream() throws IOException { + return fileChannel != null + ? new BufferedChecksumStreamInput( + new InputStreamStreamInput(Channels.newInputStream(fileChannel), fileChannel.size()), + path.toString() + ) + : new BufferedChecksumStreamInput(new BytesStreamInput(content), name); + } + + @Override + public int hashCode() { + return Objects.hash(name, content, path); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + FileSnapshot other = (FileSnapshot) o; + return Objects.equals(this.name, other.name) + && Objects.equals(this.content, other.content) + && Objects.equals(this.path, other.path); + } + + @Override + public String toString() { + return new StringBuilder("FileInfo [").append(" name = ") + .append(name) + .append(", path = ") + .append(path.toUri()) + .append("]") + .toString(); + } + + @Override + public void close() throws IOException { + IOUtils.close(fileChannel); + } + + /** + * Snapshot of a single file with primary term that gets transferred + * + * @opensearch.internal + */ + public static class TransferFileSnapshot extends FileSnapshot { + + private final long primaryTerm; + + public TransferFileSnapshot(Path path, long primaryTerm) throws IOException { + super(path); + this.primaryTerm = primaryTerm; + } + + public TransferFileSnapshot(String name, byte[] content, long primaryTerm) throws IOException { + super(name, content); + this.primaryTerm = primaryTerm; + } + + public long getPrimaryTerm() { + return primaryTerm; + } + + @Override + public int hashCode() { + return Objects.hash(primaryTerm, super.hashCode()); + } + + @Override + public boolean equals(Object o) { + if (super.equals(o)) { + if (this == o) return true; + if (getClass() != o.getClass()) return false; + TransferFileSnapshot other = (TransferFileSnapshot) o; + return Objects.equals(this.primaryTerm, other.primaryTerm); + } + return false; + } + } + + /** + * Snapshot of a single .tlg file that gets transferred + * + * @opensearch.internal + */ + public static final class TranslogFileSnapshot extends TransferFileSnapshot { + + private final long generation; + + public TranslogFileSnapshot(long primaryTerm, long generation, Path path) throws IOException { + super(path, primaryTerm); + this.generation = generation; + } + + public long getGeneration() { + return generation; + } + + @Override + public int hashCode() { + return Objects.hash(generation, super.hashCode()); + } + + @Override + public boolean equals(Object o) { + if (super.equals(o)) { + if (this == o) return true; + if (getClass() != o.getClass()) return false; + TranslogFileSnapshot other = (TranslogFileSnapshot) o; + return Objects.equals(this.generation, other.generation); + } + return false; + } + } + + /** + * Snapshot of a single .ckp file that gets transferred + * + * @opensearch.internal + */ + public static final class CheckpointFileSnapshot extends TransferFileSnapshot { + + private final long generation; + + private final long minTranslogGeneration; + + public CheckpointFileSnapshot(long primaryTerm, long generation, long minTranslogGeneration, Path path) throws IOException { + super(path, primaryTerm); + this.minTranslogGeneration = minTranslogGeneration; + this.generation = generation; + } + + public long getGeneration() { + return generation; + } + + public long getMinTranslogGeneration() { + return minTranslogGeneration; + } + + @Override + public int hashCode() { + return Objects.hash(generation, minTranslogGeneration, super.hashCode()); + } + + @Override + public boolean equals(Object o) { + if (super.equals(o)) { + if (this == o) return true; + if (getClass() != o.getClass()) return false; + CheckpointFileSnapshot other = (CheckpointFileSnapshot) o; + return Objects.equals(this.minTranslogGeneration, other.minTranslogGeneration) + && Objects.equals(this.generation, other.generation); + } + return false; + } + } +} diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/FileTransferException.java b/server/src/main/java/org/opensearch/index/translog/transfer/FileTransferException.java new file mode 100644 index 0000000000000..89a4135d2409b --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/transfer/FileTransferException.java @@ -0,0 +1,30 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog.transfer; + +import org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; + +/** + * Exception when a single file transfer encounters a failure + * + * @opensearch.internal + */ +public class FileTransferException extends RuntimeException { + + private final TransferFileSnapshot fileSnapshot; + + public FileTransferException(TransferFileSnapshot fileSnapshot, Throwable cause) { + super(cause); + this.fileSnapshot = fileSnapshot; + } + + public TransferFileSnapshot getFileSnapshot() { + return fileSnapshot; + } +} diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TransferService.java b/server/src/main/java/org/opensearch/index/translog/transfer/TransferService.java new file mode 100644 index 0000000000000..ed6c185352833 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TransferService.java @@ -0,0 +1,43 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog.transfer; + +import org.opensearch.action.ActionListener; +import org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; + +import java.io.IOException; + +/** + * Interface for the translog transfer service responsible for interacting with a remote store + * + * @opensearch.internal + */ +public interface TransferService { + + /** + * Uploads the {@link TransferFileSnapshot} async, once the upload is complete the callback is invoked + * @param fileSnapshot the file snapshot to upload + * @param remotePath the remote path where upload should be made + * @param listener the callback to be invoked once upload completes successfully/fails + */ + void uploadBlobAsync( + final TransferFileSnapshot fileSnapshot, + Iterable remotePath, + ActionListener listener + ); + + /** + * Uploads the {@link TransferFileSnapshot} blob + * @param fileSnapshot the file snapshot to upload + * @param remotePath the remote path where upload should be made + * @throws IOException the exception while transferring the data + */ + void uploadBlob(final TransferFileSnapshot fileSnapshot, Iterable remotePath) throws IOException; + +} diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TransferSnapshot.java b/server/src/main/java/org/opensearch/index/translog/transfer/TransferSnapshot.java new file mode 100644 index 0000000000000..b4c1c97f04a7d --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TransferSnapshot.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog.transfer; + +import org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; +import org.opensearch.index.translog.transfer.FileSnapshot.CheckpointFileSnapshot; +import org.opensearch.index.translog.transfer.FileSnapshot.TranslogFileSnapshot; + +import java.util.Set; + +/** + * The snapshot of the generational translog and checkpoint files and it's corresponding metadata that is transferred + * to the {@link TransferService} + * + * @opensearch.internal + */ +public interface TransferSnapshot { + + /** + * The snapshot of the checkpoint generational files + * @return the set of {@link CheckpointFileSnapshot} + */ + Set getCheckpointFileSnapshots(); + + /** + * The snapshot of the translog generational files + * @return the set of {@link TranslogFileSnapshot} + */ + Set getTranslogFileSnapshots(); + + /** + * The translog transfer metadata of this {@link TransferSnapshot} + * @return the translog transfer metadata + */ + TranslogTransferMetadata getTranslogTransferMetadata(); +} diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java new file mode 100644 index 0000000000000..30b81627614b7 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java @@ -0,0 +1,148 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog.transfer; + +import org.opensearch.common.collect.Tuple; +import org.opensearch.index.translog.TranslogReader; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Set; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.LongStream; + +import static org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; +import static org.opensearch.index.translog.transfer.FileSnapshot.TranslogFileSnapshot; +import static org.opensearch.index.translog.transfer.FileSnapshot.CheckpointFileSnapshot; + +/** + * Implementation for a {@link TransferSnapshot} which builds the snapshot from the translog and checkpoint files present on the local-disk + * + * @opensearch.internal + */ +public class TranslogCheckpointTransferSnapshot implements TransferSnapshot { + + private final Set> translogCheckpointFileInfoTupleSet; + private final int size; + private final long generation; + private final long primaryTerm; + private long minTranslogGeneration; + + TranslogCheckpointTransferSnapshot(long primaryTerm, long generation, int size) { + translogCheckpointFileInfoTupleSet = new HashSet<>(size); + this.size = size; + this.generation = generation; + this.primaryTerm = primaryTerm; + } + + private void add(TranslogFileSnapshot translogFileSnapshot, CheckpointFileSnapshot checkPointFileSnapshot) { + translogCheckpointFileInfoTupleSet.add(Tuple.tuple(translogFileSnapshot, checkPointFileSnapshot)); + assert translogFileSnapshot.getGeneration() == checkPointFileSnapshot.getGeneration(); + } + + private void setMinTranslogGeneration(long minTranslogGeneration) { + this.minTranslogGeneration = minTranslogGeneration; + } + + @Override + public Set getTranslogFileSnapshots() { + return translogCheckpointFileInfoTupleSet.stream().map(Tuple::v1).collect(Collectors.toSet()); + } + + @Override + public TranslogTransferMetadata getTranslogTransferMetadata() { + return new TranslogTransferMetadata(primaryTerm, generation, minTranslogGeneration, translogCheckpointFileInfoTupleSet.size() * 2); + } + + @Override + public Set getCheckpointFileSnapshots() { + return translogCheckpointFileInfoTupleSet.stream().map(Tuple::v2).collect(Collectors.toSet()); + } + + @Override + public String toString() { + return new StringBuilder("TranslogTransferSnapshot [").append(" primary term = ") + .append(primaryTerm) + .append(", generation = ") + .append(generation) + .append(" ]") + .toString(); + } + + /** + * Builder for {@link TranslogCheckpointTransferSnapshot} + */ + public static class Builder { + private final long primaryTerm; + private final long generation; + private final List readers; + private final Function checkpointGenFileNameMapper; + private final Path location; + + public Builder( + long primaryTerm, + long generation, + Path location, + List readers, + Function checkpointGenFileNameMapper + ) { + this.primaryTerm = primaryTerm; + this.generation = generation; + this.readers = readers; + this.checkpointGenFileNameMapper = checkpointGenFileNameMapper; + this.location = location; + } + + public TranslogCheckpointTransferSnapshot build() throws IOException { + final List generations = new LinkedList<>(); + long highestGeneration = Long.MIN_VALUE; + long highestGenPrimaryTerm = Long.MIN_VALUE; + long lowestGeneration = Long.MAX_VALUE; + long highestGenMinTranslogGeneration = Long.MIN_VALUE; + TranslogCheckpointTransferSnapshot translogTransferSnapshot = new TranslogCheckpointTransferSnapshot( + primaryTerm, + generation, + readers.size() + ); + for (TranslogReader reader : readers) { + final long readerGeneration = reader.getGeneration(); + final long readerPrimaryTerm = reader.getPrimaryTerm(); + final long minTranslogGeneration = reader.getCheckpoint().getMinTranslogGeneration(); + final long checkpointGeneration = reader.getCheckpoint().getGeneration(); + Path translogPath = reader.path(); + Path checkpointPath = location.resolve(checkpointGenFileNameMapper.apply(readerGeneration)); + generations.add(readerGeneration); + translogTransferSnapshot.add( + new TranslogFileSnapshot(readerPrimaryTerm, readerGeneration, translogPath), + new CheckpointFileSnapshot(readerPrimaryTerm, checkpointGeneration, minTranslogGeneration, checkpointPath) + ); + if (readerGeneration > highestGeneration) { + highestGeneration = readerGeneration; + highestGenMinTranslogGeneration = minTranslogGeneration; + highestGenPrimaryTerm = readerPrimaryTerm; + } + lowestGeneration = Math.min(lowestGeneration, readerGeneration); + } + translogTransferSnapshot.setMinTranslogGeneration(highestGenMinTranslogGeneration); + + assert this.primaryTerm == highestGenPrimaryTerm : "inconsistent primary term"; + assert this.generation == highestGeneration : "inconsistent generation"; + assert LongStream.iterate(lowestGeneration, i -> i + 1) + .limit(highestGeneration) + .boxed() + .collect(Collectors.toList()) + .equals(generations.stream().sorted().collect(Collectors.toList())) == true : "generation gaps found"; + return translogTransferSnapshot; + } + } +} diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java new file mode 100644 index 0000000000000..02ebab8ed6826 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java @@ -0,0 +1,147 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog.transfer; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.action.ActionListener; +import org.opensearch.action.LatchedActionListener; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.index.translog.transfer.listener.FileTransferListener; +import org.opensearch.index.translog.transfer.listener.TranslogTransferListener; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.function.UnaryOperator; +import java.util.stream.Collectors; + +import static org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; +import static org.opensearch.index.translog.transfer.FileSnapshot.TranslogFileSnapshot; + +/** + * The class responsible for orchestrating the transfer of a {@link TransferSnapshot} via a {@link TransferService} + * + * @opensearch.internal + */ +public class TranslogTransferManager { + + private final TransferService transferService; + private final BlobPath remoteBaseTransferPath; + private final FileTransferListener fileTransferListener; + private final UnaryOperator> exclusionFilter; + + private static final long TRANSFER_TIMEOUT_IN_MILLIS = 30000; + + private static final Logger logger = LogManager.getLogger(TranslogTransferManager.class); + + public TranslogTransferManager( + TransferService transferService, + BlobPath remoteBaseTransferPath, + FileTransferListener fileTransferListener, + UnaryOperator> exclusionFilter + ) { + this.transferService = transferService; + this.remoteBaseTransferPath = remoteBaseTransferPath; + this.fileTransferListener = fileTransferListener; + this.exclusionFilter = exclusionFilter; + } + + public boolean transferSnapshot(TransferSnapshot transferSnapshot, TranslogTransferListener translogTransferListener) + throws IOException { + List exceptionList = new ArrayList<>(transferSnapshot.getTranslogTransferMetadata().getCount()); + Set toUpload = new HashSet<>(transferSnapshot.getTranslogTransferMetadata().getCount()); + try { + toUpload.addAll(exclusionFilter.apply(transferSnapshot.getTranslogFileSnapshots())); + toUpload.addAll(exclusionFilter.apply(transferSnapshot.getCheckpointFileSnapshots())); + final CountDownLatch latch = new CountDownLatch(toUpload.size()); + LatchedActionListener latchedActionListener = new LatchedActionListener<>( + ActionListener.wrap(fileTransferListener::onSuccess, ex -> { + assert ex instanceof FileTransferException; + logger.error( + () -> new ParameterizedMessage( + "Exception during transfer for file {}", + ((FileTransferException) ex).getFileSnapshot().getName() + ), + ex + ); + FileTransferException e = (FileTransferException) ex; + fileTransferListener.onFailure(e.getFileSnapshot(), ex); + exceptionList.add(ex); + }), + latch + ); + toUpload.forEach( + fileSnapshot -> transferService.uploadBlobAsync( + fileSnapshot, + remoteBaseTransferPath.add(String.valueOf(fileSnapshot.getPrimaryTerm())), + latchedActionListener + ) + ); + try { + if (latch.await(TRANSFER_TIMEOUT_IN_MILLIS, TimeUnit.MILLISECONDS) == false) { + Exception ex = new TimeoutException("Timed out waiting for transfer of snapshot " + transferSnapshot + " to complete"); + exceptionList.forEach(ex::addSuppressed); + throw ex; + } + } catch (InterruptedException ex) { + exceptionList.forEach(ex::addSuppressed); + Thread.currentThread().interrupt(); + throw ex; + } + if (exceptionList.isEmpty()) { + final TransferFileSnapshot transferFileSnapshot = prepareMetadata(transferSnapshot); + transferService.uploadBlob( + prepareMetadata(transferSnapshot), + remoteBaseTransferPath.add(String.valueOf(transferFileSnapshot.getPrimaryTerm())) + ); + translogTransferListener.onUploadComplete(transferSnapshot); + return true; + } else { + Exception ex = new RuntimeException("Failed to upload some files during transfer"); + exceptionList.forEach(ex::addSuppressed); + throw ex; + } + } catch (Exception ex) { + logger.error(() -> new ParameterizedMessage("Transfer failed for snapshot {}", transferSnapshot), ex); + translogTransferListener.onUploadFailed(transferSnapshot, ex); + return false; + } + } + + private TransferFileSnapshot prepareMetadata(TransferSnapshot transferSnapshot) throws IOException { + Map generationPrimaryTermMap = transferSnapshot.getTranslogFileSnapshots().stream().map(s -> { + assert s instanceof TranslogFileSnapshot; + return (TranslogFileSnapshot) s; + }) + .collect( + Collectors.toMap( + snapshot -> String.valueOf(snapshot.getGeneration()), + snapshot -> String.valueOf(snapshot.getPrimaryTerm()) + ) + ); + TranslogTransferMetadata translogTransferMetadata = transferSnapshot.getTranslogTransferMetadata(); + translogTransferMetadata.setGenerationToPrimaryTermMapper(new HashMap<>(generationPrimaryTermMap)); + TransferFileSnapshot fileSnapshot = new TransferFileSnapshot( + translogTransferMetadata.getFileName(), + translogTransferMetadata.createMetadataBytes(), + translogTransferMetadata.getPrimaryTerm() + ); + + return fileSnapshot; + } +} diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferMetadata.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferMetadata.java new file mode 100644 index 0000000000000..0aae773f593fd --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferMetadata.java @@ -0,0 +1,127 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog.transfer; + +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.store.DataOutput; +import org.apache.lucene.store.OutputStreamIndexOutput; +import org.apache.lucene.util.SetOnce; +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.io.stream.BytesStreamOutput; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Map; +import java.util.Objects; + +/** + * The metadata associated with every transfer {@link TransferSnapshot}. The metadata is uploaded at the end of the + * tranlog and generational checkpoint uploads to mark the latest generation and the translog/checkpoint files that are + * still referenced by the last checkpoint. + * + * @opensearch.internal + */ +public class TranslogTransferMetadata { + + private final long primaryTerm; + + private final long generation; + + private final long minTranslogGeneration; + + private final long timeStamp; + + private final int count; + + private final SetOnce> generationToPrimaryTermMapper = new SetOnce<>(); + + private static final String METADATA_SEPARATOR = "__"; + + private static final int BUFFER_SIZE = 4096; + + private static final int CURRENT_VERSION = 1; + + private static final String METADATA_CODEC = "md"; + + public TranslogTransferMetadata(long primaryTerm, long generation, long minTranslogGeneration, int count) { + this.primaryTerm = primaryTerm; + this.generation = generation; + this.minTranslogGeneration = minTranslogGeneration; + this.timeStamp = System.currentTimeMillis(); + this.count = count; + } + + public long getPrimaryTerm() { + return primaryTerm; + } + + public long getGeneration() { + return generation; + } + + public long getMinTranslogGeneration() { + return minTranslogGeneration; + } + + public int getCount() { + return count; + } + + public void setGenerationToPrimaryTermMapper(Map generationToPrimaryTermMap) { + generationToPrimaryTermMapper.set(generationToPrimaryTermMap); + } + + public String getFileName() { + return String.join( + METADATA_SEPARATOR, + Arrays.asList(String.valueOf(primaryTerm), String.valueOf(generation), String.valueOf(timeStamp)) + ); + } + + public byte[] createMetadataBytes() throws IOException { + try (BytesStreamOutput output = new BytesStreamOutput()) { + try ( + OutputStreamIndexOutput indexOutput = new OutputStreamIndexOutput( + "translog transfer metadata " + primaryTerm, + getFileName(), + output, + BUFFER_SIZE + ) + ) { + CodecUtil.writeHeader(indexOutput, METADATA_CODEC, CURRENT_VERSION); + write(indexOutput); + CodecUtil.writeFooter(indexOutput); + } + return BytesReference.toBytes(output.bytes()); + } + } + + @Override + public int hashCode() { + return Objects.hash(primaryTerm, generation, timeStamp); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + TranslogTransferMetadata other = (TranslogTransferMetadata) o; + return Objects.equals(this.primaryTerm, other.primaryTerm) + && Objects.equals(this.generation, other.generation) + && Objects.equals(this.timeStamp, other.timeStamp); + } + + private void write(DataOutput out) throws IOException { + out.writeLong(primaryTerm); + out.writeLong(generation); + out.writeLong(minTranslogGeneration); + out.writeLong(timeStamp); + out.writeMapOfStrings(generationToPrimaryTermMapper.get()); + } +} diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/listener/FileTransferListener.java b/server/src/main/java/org/opensearch/index/translog/transfer/listener/FileTransferListener.java new file mode 100644 index 0000000000000..939b56f109a36 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/transfer/listener/FileTransferListener.java @@ -0,0 +1,32 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog.transfer.listener; + +import org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; + +/** + * The listener to be invoked on the completion or failure of a {@link TransferFileSnapshot} + * + * @opensearch.internal + */ +public interface FileTransferListener { + + /** + * Invoked when the transfer of a single {@link TransferFileSnapshot} succeeds + * @param fileSnapshot the corresponding file snapshot + */ + void onSuccess(TransferFileSnapshot fileSnapshot); + + /** + * Invoked when the transfer of a single {@link TransferFileSnapshot} fails + * @param fileSnapshot the corresponding file snapshot + * @param e the exception while processing the {@link TransferFileSnapshot} + */ + void onFailure(TransferFileSnapshot fileSnapshot, Exception e); +} diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/listener/TranslogTransferListener.java b/server/src/main/java/org/opensearch/index/translog/transfer/listener/TranslogTransferListener.java new file mode 100644 index 0000000000000..c09fd8798e505 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/transfer/listener/TranslogTransferListener.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog.transfer.listener; + +import org.opensearch.index.translog.transfer.TransferSnapshot; + +import java.io.IOException; + +/** + * The listener to be invoked on the completion or failure of a {@link TransferSnapshot} + * + * @opensearch.internal + */ +public interface TranslogTransferListener { + + /** + * Invoked when the transfer of {@link TransferSnapshot} succeeds + * @param transferSnapshot the transfer snapshot + * @throws IOException the exception during the transfer of data + */ + void onUploadComplete(TransferSnapshot transferSnapshot) throws IOException; + + /** + * Invoked when the transfer of {@link TransferSnapshot} fails + * @param transferSnapshot the transfer snapshot + * @param ex the exception while processing the {@link TransferSnapshot} + * @throws IOException the exception during the transfer of data + */ + void onUploadFailed(TransferSnapshot transferSnapshot, Exception ex) throws IOException; +} diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/listener/package-info.java b/server/src/main/java/org/opensearch/index/translog/transfer/listener/package-info.java new file mode 100644 index 0000000000000..edb7f453515b1 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/transfer/listener/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Core classes responsible for handling all translog operations */ +package org.opensearch.index.translog.transfer.listener; diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/package-info.java b/server/src/main/java/org/opensearch/index/translog/transfer/package-info.java new file mode 100644 index 0000000000000..2ac96b01b0673 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/transfer/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Core classes responsible for handling all translog operations */ +package org.opensearch.index.translog.transfer; diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotInUseDeletionException.java b/server/src/main/java/org/opensearch/snapshots/SnapshotInUseDeletionException.java new file mode 100644 index 0000000000000..e93bf5ab0cd91 --- /dev/null +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotInUseDeletionException.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.snapshots; + +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.rest.RestStatus; + +import java.io.IOException; + +/** + * Thrown if requested snapshot/s can't be deleted + * + * @opensearch.internal + */ +public class SnapshotInUseDeletionException extends SnapshotException { + + public SnapshotInUseDeletionException(final String repositoryName, final String snapshotName, final String msg) { + super(repositoryName, snapshotName, msg); + } + + public SnapshotInUseDeletionException(StreamInput in) throws IOException { + super(in); + } + + @Override + public RestStatus status() { + return RestStatus.CONFLICT; + } +} diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotUtils.java b/server/src/main/java/org/opensearch/snapshots/SnapshotUtils.java index 073e4f7723077..3ef3523961df8 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotUtils.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotUtils.java @@ -31,10 +31,15 @@ package org.opensearch.snapshots; +import com.carrotsearch.hppc.cursors.ObjectCursor; import org.opensearch.action.support.IndicesOptions; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.common.collect.ImmutableOpenMap; import org.opensearch.common.regex.Regex; +import org.opensearch.index.IndexModule; import org.opensearch.index.IndexNotFoundException; +import org.opensearch.index.IndexSettings; import java.util.ArrayList; import java.util.Arrays; @@ -42,6 +47,8 @@ import java.util.HashSet; import java.util.List; import java.util.Set; +import java.util.HashMap; +import java.util.Map; /** * Snapshot utilities @@ -135,4 +142,39 @@ public static List filterIndices(List availableIndices, String[] } return Collections.unmodifiableList(new ArrayList<>(result)); } + + /** + * Validates if there are any remote snapshots backing an index + * @param metadata index metadata from cluster state + * @param snapshotIds list of snapshot Ids to be verified + * @param repoName repo name for which the verification is being done + */ + public static void validateSnapshotsBackingAnyIndex( + ImmutableOpenMap metadata, + List snapshotIds, + String repoName + ) { + final Map uuidToSnapshotId = new HashMap<>(); + final Set snapshotsToBeNotDeleted = new HashSet<>(); + snapshotIds.forEach(snapshotId -> uuidToSnapshotId.put(snapshotId.getUUID(), snapshotId)); + + for (ObjectCursor cursor : metadata.values()) { + IndexMetadata indexMetadata = cursor.value; + String storeType = indexMetadata.getSettings().get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey()); + if (IndexModule.Type.REMOTE_SNAPSHOT.getSettingsKey().equals(storeType)) { + String snapshotId = indexMetadata.getSettings().get(IndexSettings.SEARCHABLE_SNAPSHOT_ID_UUID.getKey()); + if (uuidToSnapshotId.get(snapshotId) != null) { + snapshotsToBeNotDeleted.add(uuidToSnapshotId.get(snapshotId).getName()); + } + } + } + + if (!snapshotsToBeNotDeleted.isEmpty()) { + throw new SnapshotInUseDeletionException( + repoName, + snapshotsToBeNotDeleted.toString(), + "These remote snapshots are backing some indices and hence can't be deleted! No snapshots were deleted." + ); + } + } } diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java index 48b90af98022f..645775c3ec09c 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java @@ -132,6 +132,7 @@ import static java.util.Collections.emptySet; import static java.util.Collections.unmodifiableList; import static org.opensearch.cluster.SnapshotsInProgress.completed; +import static org.opensearch.snapshots.SnapshotUtils.validateSnapshotsBackingAnyIndex; /** * Service responsible for creating snapshots. This service runs all the steps executed on the cluster-manager node during snapshot creation and @@ -1769,6 +1770,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { snapshotNames, repoName ); + validateSnapshotsBackingAnyIndex(currentState.getMetadata().getIndices(), snapshotIds, repoName); deleteFromRepoTask = createDeleteStateUpdate(snapshotIds, repoName, repositoryData, Priority.NORMAL, listener); return deleteFromRepoTask.execute(currentState); } diff --git a/server/src/main/java/org/opensearch/tasks/TaskResultsService.java b/server/src/main/java/org/opensearch/tasks/TaskResultsService.java index 66d3aeb748cf7..accc02624f71c 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskResultsService.java +++ b/server/src/main/java/org/opensearch/tasks/TaskResultsService.java @@ -86,7 +86,7 @@ public class TaskResultsService { public static final String TASK_RESULT_MAPPING_VERSION_META_FIELD = "version"; - public static final int TASK_RESULT_MAPPING_VERSION = 3; // must match version in task-index-mapping.json + public static final int TASK_RESULT_MAPPING_VERSION = 4; // must match version in task-index-mapping.json /** * The backoff policy to use when saving a task result fails. The total wait diff --git a/server/src/main/resources/org/opensearch/tasks/task-index-mapping.json b/server/src/main/resources/org/opensearch/tasks/task-index-mapping.json index 54e9d39902f03..58b6b2d3bc873 100644 --- a/server/src/main/resources/org/opensearch/tasks/task-index-mapping.json +++ b/server/src/main/resources/org/opensearch/tasks/task-index-mapping.json @@ -1,7 +1,7 @@ { "_doc" : { "_meta": { - "version": 3 + "version": 4 }, "dynamic" : "strict", "properties" : { diff --git a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java index 616ad0a57bf93..559963b0e0b68 100644 --- a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java @@ -106,6 +106,7 @@ import org.opensearch.snapshots.SnapshotException; import org.opensearch.snapshots.SnapshotId; import org.opensearch.snapshots.SnapshotInProgressException; +import org.opensearch.snapshots.SnapshotInUseDeletionException; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import org.opensearch.transport.ActionNotFoundTransportException; @@ -862,6 +863,7 @@ public void testIds() { ids.put(163, DecommissioningFailedException.class); ids.put(164, NodeDecommissionedException.class); ids.put(165, ClusterManagerThrottlingException.class); + ids.put(166, SnapshotInUseDeletionException.class); Map, Integer> reverse = new HashMap<>(); for (Map.Entry> entry : ids.entrySet()) { diff --git a/server/src/test/java/org/opensearch/action/admin/indices/shrink/TransportResizeActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/shrink/TransportResizeActionTests.java index e4b79ac54f8fd..5705362cc73f4 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/shrink/TransportResizeActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/shrink/TransportResizeActionTests.java @@ -38,8 +38,8 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.EmptyClusterInfoService; +import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; @@ -52,7 +52,9 @@ import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; import org.opensearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.index.shard.DocsStats; +import org.opensearch.index.store.StoreStats; import org.opensearch.snapshots.EmptySnapshotsInfoService; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.gateway.TestGatewayAllocator; @@ -107,6 +109,7 @@ public void testErrorCondition() { new ResizeRequest("target", "source"), state, (i) -> new DocsStats(Integer.MAX_VALUE, between(1, 1000), between(1, 100)), + new StoreStats(between(1, 10000), between(1, 10000)), "source", "target" ) @@ -121,6 +124,7 @@ public void testErrorCondition() { req, clusterState, (i) -> i == 2 || i == 3 ? new DocsStats(Integer.MAX_VALUE / 2, between(1, 1000), between(1, 10000)) : null, + new StoreStats(between(1, 10000), between(1, 10000)), "source", "target" ); @@ -139,6 +143,7 @@ public void testErrorCondition() { req, clusterState, (i) -> new DocsStats(between(10, 1000), between(1, 10), between(1, 10000)), + new StoreStats(between(1, 10000), between(1, 10000)), "source", "target" ); @@ -167,6 +172,7 @@ public void testErrorCondition() { new ResizeRequest("target", "source"), clusterState, (i) -> new DocsStats(between(1, 1000), between(1, 1000), between(0, 10000)), + new StoreStats(between(1, 10000), between(1, 10000)), "source", "target" ); @@ -193,13 +199,27 @@ public void testPassNumRoutingShards() { ResizeRequest resizeRequest = new ResizeRequest("target", "source"); resizeRequest.setResizeType(ResizeType.SPLIT); resizeRequest.getTargetIndexRequest().settings(Settings.builder().put("index.number_of_shards", 2).build()); - TransportResizeAction.prepareCreateIndexRequest(resizeRequest, clusterState, null, "source", "target"); + TransportResizeAction.prepareCreateIndexRequest( + resizeRequest, + clusterState, + null, + new StoreStats(between(1, 10000), between(1, 10000)), + "source", + "target" + ); resizeRequest.getTargetIndexRequest() .settings( Settings.builder().put("index.number_of_routing_shards", randomIntBetween(2, 10)).put("index.number_of_shards", 2).build() ); - TransportResizeAction.prepareCreateIndexRequest(resizeRequest, clusterState, null, "source", "target"); + TransportResizeAction.prepareCreateIndexRequest( + resizeRequest, + clusterState, + null, + new StoreStats(between(1, 10000), between(1, 10000)), + "source", + "target" + ); } public void testPassNumRoutingShardsAndFail() { @@ -224,7 +244,14 @@ public void testPassNumRoutingShardsAndFail() { ResizeRequest resizeRequest = new ResizeRequest("target", "source"); resizeRequest.setResizeType(ResizeType.SPLIT); resizeRequest.getTargetIndexRequest().settings(Settings.builder().put("index.number_of_shards", numShards * 2).build()); - TransportResizeAction.prepareCreateIndexRequest(resizeRequest, clusterState, null, "source", "target"); + TransportResizeAction.prepareCreateIndexRequest( + resizeRequest, + clusterState, + null, + new StoreStats(between(1, 10000), between(1, 10000)), + "source", + "target" + ); resizeRequest.getTargetIndexRequest() .settings( @@ -233,7 +260,14 @@ public void testPassNumRoutingShardsAndFail() { ClusterState finalState = clusterState; IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, - () -> TransportResizeAction.prepareCreateIndexRequest(resizeRequest, finalState, null, "source", "target") + () -> TransportResizeAction.prepareCreateIndexRequest( + resizeRequest, + finalState, + null, + new StoreStats(between(1, 10000), between(1, 10000)), + "source", + "target" + ) ); assertEquals("cannot provide index.number_of_routing_shards on resize", iae.getMessage()); } @@ -266,6 +300,7 @@ public void testShrinkIndexSettings() { target, clusterState, (i) -> stats, + new StoreStats(between(1, 10000), between(1, 10000)), indexName, "target" ); @@ -276,6 +311,206 @@ public void testShrinkIndexSettings() { assertEquals(request.waitForActiveShards(), activeShardCount); } + public void testShrinkWithMaxShardSize() { + String indexName = randomAlphaOfLength(10); + // create one that won't fail + ClusterState clusterState = ClusterState.builder( + createClusterState(indexName, 10, 0, Settings.builder().put("index.blocks.write", true).build()) + ).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); + + // Cannot set max_shard_size when split or clone + ResizeRequest resizeRequestForFailure = new ResizeRequest("target", indexName); + ResizeType resizeType = ResizeType.SPLIT; + if (randomBoolean()) { + resizeType = ResizeType.CLONE; + } + resizeRequestForFailure.setResizeType(resizeType); + resizeRequestForFailure.setMaxShardSize(new ByteSizeValue(100)); + resizeRequestForFailure.getTargetIndexRequest() + .settings(Settings.builder().put("index.number_of_shards", randomIntBetween(1, 100)).build()); + ClusterState finalState = clusterState; + IllegalArgumentException iae = expectThrows( + IllegalArgumentException.class, + () -> TransportResizeAction.prepareCreateIndexRequest( + resizeRequestForFailure, + finalState, + null, + new StoreStats(between(1, 10000), between(1, 10000)), + indexName, + "target" + ) + ); + assertEquals("Unsupported parameter [max_shard_size]", iae.getMessage()); + + // Cannot set max_shard_size and index.number_of_shards at the same time + ResizeRequest resizeRequest = new ResizeRequest("target", indexName); + resizeRequest.setResizeType(ResizeType.SHRINK); + resizeRequest.setMaxShardSize(new ByteSizeValue(100)); + resizeRequest.getTargetIndexRequest().settings(Settings.builder().put("index.number_of_shards", randomIntBetween(1, 100)).build()); + iae = expectThrows( + IllegalArgumentException.class, + () -> TransportResizeAction.prepareCreateIndexRequest( + resizeRequest, + finalState, + null, + new StoreStats(between(1, 10000), between(1, 10000)), + indexName, + "target" + ) + ); + assertEquals("Cannot set max_shard_size and index.number_of_shards at the same time!", iae.getMessage()); + + AllocationService service = new AllocationService( + new AllocationDeciders(Collections.singleton(new MaxRetryAllocationDecider())), + new TestGatewayAllocator(), + new BalancedShardsAllocator(Settings.EMPTY), + EmptyClusterInfoService.INSTANCE, + EmptySnapshotsInfoService.INSTANCE + ); + RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable(); + clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); + // now we start the shard + routingTable = OpenSearchAllocationTestCase.startInitializingShardsAndReroute(service, clusterState, indexName).routingTable(); + clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); + int numSourceShards = clusterState.metadata().index(indexName).getNumberOfShards(); + DocsStats stats = new DocsStats(between(0, (IndexWriter.MAX_DOCS) / numSourceShards), between(1, 1000), between(1, 10000)); + + // target index's shards number must be the lowest factor of the source index's shards number + int expectedShardsNum = 5; + resizeRequest.setMaxShardSize(new ByteSizeValue(25)); + // clear index settings + resizeRequest.getTargetIndexRequest().settings(Settings.builder().build()); + resizeRequest.setWaitForActiveShards(expectedShardsNum); + CreateIndexClusterStateUpdateRequest request = TransportResizeAction.prepareCreateIndexRequest( + resizeRequest, + clusterState, + (i) -> stats, + new StoreStats(100, between(1, 10000)), + indexName, + "target" + ); + assertNotNull(request.recoverFrom()); + assertEquals(indexName, request.recoverFrom().getName()); + assertEquals(String.valueOf(expectedShardsNum), request.settings().get("index.number_of_shards")); + assertEquals("shrink_index", request.cause()); + assertEquals(request.waitForActiveShards(), ActiveShardCount.from(expectedShardsNum)); + + // if max_shard_size is greater than whole of the source primary shards' storage, + // then the target index will only have one primary shard. + expectedShardsNum = 1; + resizeRequest.setMaxShardSize(new ByteSizeValue(1000)); + // clear index settings + resizeRequest.getTargetIndexRequest().settings(Settings.builder().build()); + resizeRequest.setWaitForActiveShards(expectedShardsNum); + request = TransportResizeAction.prepareCreateIndexRequest( + resizeRequest, + clusterState, + (i) -> stats, + new StoreStats(100, between(1, 10000)), + indexName, + "target" + ); + assertNotNull(request.recoverFrom()); + assertEquals(indexName, request.recoverFrom().getName()); + assertEquals(String.valueOf(expectedShardsNum), request.settings().get("index.number_of_shards")); + assertEquals("shrink_index", request.cause()); + assertEquals(request.waitForActiveShards(), ActiveShardCount.from(expectedShardsNum)); + + // if max_shard_size is less than the primary shard's storage of the source index, + // then the target index's shards number will be equal to the source index's. + expectedShardsNum = numSourceShards; + resizeRequest.setMaxShardSize(new ByteSizeValue(1)); + // clear index settings + resizeRequest.getTargetIndexRequest().settings(Settings.builder().build()); + resizeRequest.setWaitForActiveShards(expectedShardsNum); + request = TransportResizeAction.prepareCreateIndexRequest( + resizeRequest, + clusterState, + (i) -> stats, + new StoreStats(100, between(1, 10000)), + indexName, + "target" + ); + assertNotNull(request.recoverFrom()); + assertEquals(indexName, request.recoverFrom().getName()); + assertEquals(String.valueOf(expectedShardsNum), request.settings().get("index.number_of_shards")); + assertEquals("shrink_index", request.cause()); + assertEquals(request.waitForActiveShards(), ActiveShardCount.from(expectedShardsNum)); + } + + public void testCalculateTargetIndexShardsNum() { + String indexName = randomAlphaOfLength(10); + ClusterState clusterState = ClusterState.builder( + createClusterState(indexName, randomIntBetween(2, 10), 0, Settings.builder().put("index.blocks.write", true).build()) + ).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); + IndexMetadata indexMetadata = clusterState.metadata().index(indexName); + + assertEquals(TransportResizeAction.calculateTargetIndexShardsNum(null, new StoreStats(100, between(1, 10000)), indexMetadata), 1); + assertEquals( + TransportResizeAction.calculateTargetIndexShardsNum( + new ByteSizeValue(0), + new StoreStats(100, between(1, 10000)), + indexMetadata + ), + 1 + ); + assertEquals(TransportResizeAction.calculateTargetIndexShardsNum(new ByteSizeValue(1), null, indexMetadata), 1); + assertEquals(TransportResizeAction.calculateTargetIndexShardsNum(new ByteSizeValue(1), new StoreStats(0, 0), indexMetadata), 1); + assertEquals( + TransportResizeAction.calculateTargetIndexShardsNum( + new ByteSizeValue(1000), + new StoreStats(100, between(1, 10000)), + indexMetadata + ), + 1 + ); + assertEquals( + TransportResizeAction.calculateTargetIndexShardsNum( + new ByteSizeValue(1), + new StoreStats(100, between(1, 10000)), + indexMetadata + ), + indexMetadata.getNumberOfShards() + ); + + clusterState = ClusterState.builder( + createClusterState(indexName, 10, 0, Settings.builder().put("index.blocks.write", true).build()) + ).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); + indexMetadata = clusterState.metadata().index(indexName); + assertEquals( + TransportResizeAction.calculateTargetIndexShardsNum( + new ByteSizeValue(10), + new StoreStats(100, between(1, 10000)), + indexMetadata + ), + 10 + ); + assertEquals( + TransportResizeAction.calculateTargetIndexShardsNum( + new ByteSizeValue(12), + new StoreStats(100, between(1, 10000)), + indexMetadata + ), + indexMetadata.getNumberOfShards() + ); + assertEquals( + TransportResizeAction.calculateTargetIndexShardsNum( + new ByteSizeValue(20), + new StoreStats(100, between(1, 10000)), + indexMetadata + ), + 5 + ); + assertEquals( + TransportResizeAction.calculateTargetIndexShardsNum( + new ByteSizeValue(50), + new StoreStats(100, between(1, 10000)), + indexMetadata + ), + 2 + ); + } + private DiscoveryNode newNode(String nodeId) { final Set roles = Collections.unmodifiableSet( new HashSet<>(Arrays.asList(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE, DiscoveryNodeRole.DATA_ROLE)) diff --git a/server/src/test/java/org/opensearch/action/bulk/BulkRequestParserTests.java b/server/src/test/java/org/opensearch/action/bulk/BulkRequestParserTests.java index d3da77112408b..32a0b3723f7ae 100644 --- a/server/src/test/java/org/opensearch/action/bulk/BulkRequestParserTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/BulkRequestParserTests.java @@ -234,4 +234,30 @@ public void testParseDeduplicatesParameterStrings() throws IOException { assertSame(first.getPipeline(), second.getPipeline()); assertSame(first.routing(), second.routing()); } + + public void testFailOnUnsupportedAction() { + BytesArray request = new BytesArray("{ \"baz\":{ \"_id\": \"bar\" } }\n{}\n"); + BulkRequestParser parser = new BulkRequestParser(); + + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> parser.parse( + request, + "foo", + null, + null, + null, + true, + false, + XContentType.JSON, + req -> fail(), + req -> fail(), + req -> fail() + ) + ); + assertEquals( + "Malformed action/metadata line [1], expected one of [create, delete, index, update] but found [baz]", + ex.getMessage() + ); + } } diff --git a/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java b/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java index 87cab4a006a63..014f2d237a306 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java @@ -901,7 +901,10 @@ public void testWeightedOperationRoutingWeightUndefinedForOneZone() throws Excep try { ClusterState state = clusterStateForWeightedRouting(indexNames, numShards, numReplicas); - Settings setting = Settings.builder().put("cluster.routing.allocation.awareness.attributes", "zone").build(); + Settings setting = Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .build(); threadPool = new TestThreadPool("testThatOnlyNodesSupport"); clusterService = ClusterServiceUtils.createClusterService(threadPool); @@ -932,8 +935,9 @@ public void testWeightedOperationRoutingWeightUndefinedForOneZone() throws Excep ); for (ShardIterator it : groupIterator) { - List shardRoutings = Collections.singletonList(it.nextOrNull()); - for (ShardRouting shardRouting : shardRoutings) { + while (it.remaining() > 0) { + ShardRouting shardRouting = it.nextOrNull(); + assertNotNull(shardRouting); selectedNodes.add(shardRouting.currentNodeId()); } } @@ -950,9 +954,8 @@ public void testWeightedOperationRoutingWeightUndefinedForOneZone() throws Excep assertFalse(weighAwayNodesInUndefinedZone); selectedNodes = new HashSet<>(); - setting = Settings.builder().put("cluster.routing.allocation.awareness.attributes", "zone").build(); - // Updating weighted round robin weights in cluster state + // Updating weighted round-robin weights in cluster state weights = Map.of("a", 0.0, "b", 1.0); state = setWeightedRoutingWeights(state, weights); @@ -964,11 +967,13 @@ public void testWeightedOperationRoutingWeightUndefinedForOneZone() throws Excep groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests); for (ShardIterator it : groupIterator) { - List shardRoutings = Collections.singletonList(it.nextOrNull()); - for (ShardRouting shardRouting : shardRoutings) { + while (it.remaining() > 0) { + ShardRouting shardRouting = it.nextOrNull(); + assertNotNull(shardRouting); selectedNodes.add(shardRouting.currentNodeId()); } } + // tests that no shards are assigned to zone with weight zero // tests shards are assigned to nodes in zone c weighAwayNodesInUndefinedZone = true; diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index 72b77bb706065..892ed5a23fb9d 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -2764,8 +2764,21 @@ public void testRestoreShardFromRemoteStore() throws IOException { // Delete files in store directory to restore from remote directory Directory storeDirectory = target.store().directory(); + for (String file : storeDirectory.listAll()) { storeDirectory.deleteFile(file); + // Windows has buggy File delete logic where AccessDeniedExceptions + // are thrown when there is an open file handle on a particular file. FSDirectory attempts to resolve this with hacks by + // swallowing the exceptions and moving the file to a pending delete state + // to retry in the future while being filtered from listAll invocations. + // However, this logic is also buggy and after the first delete attempt we are left in a state where the file is still on disk + // and not pending delete. + // A second attempt to delete the file will properly move it to pending deletion, and be filtered from listAll. + if (Arrays.asList(storeDirectory.listAll()).contains(file) && storeDirectory.getPendingDeletions().contains(file) == false) { + logger.info("File {} was not deleted and is not pending delete, attempting delete again...", file); + storeDirectory.deleteFile(file); + assertTrue(storeDirectory.getPendingDeletions().contains(file)); + } } assertEquals(0, storeDirectory.listAll().length); diff --git a/server/src/test/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInputTests.java b/server/src/test/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInputTests.java index 65969cc65359e..6f3387a935c03 100644 --- a/server/src/test/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInputTests.java +++ b/server/src/test/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInputTests.java @@ -16,6 +16,7 @@ import org.apache.lucene.store.MMapDirectory; import org.apache.lucene.store.SimpleFSLockFactory; import org.apache.lucene.tests.util.LuceneTestCase; +import org.apache.lucene.util.Constants; import org.apache.lucene.util.Version; import org.junit.After; import org.junit.Before; @@ -52,6 +53,7 @@ public class OnDemandBlockSnapshotIndexInputTests extends OpenSearchTestCase { @Before public void init() { + assumeFalse("Awaiting Windows fix https://github.com/opensearch-project/OpenSearch/issues/5396", Constants.WINDOWS); transferManager = mock(TransferManager.class); lockFactory = SimpleFSLockFactory.INSTANCE; path = LuceneTestCase.createTempDir("OnDemandBlockSnapshotIndexInputTests"); diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceTests.java new file mode 100644 index 0000000000000..adca47bf64c64 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceTests.java @@ -0,0 +1,123 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog.transfer; + +import org.opensearch.action.ActionListener; +import org.opensearch.action.LatchedActionListener; +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.env.Environment; +import org.opensearch.env.TestEnvironment; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.repositories.Repository; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.repositories.blobstore.BlobStoreTestUtil; +import org.opensearch.repositories.fs.FsRepository; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +public class BlobStoreTransferServiceTests extends OpenSearchTestCase { + + private ExecutorService executorService; + + private BlobStoreRepository repository; + + @Override + public void setUp() throws Exception { + super.setUp(); + repository = createRepository(); + executorService = Executors.newFixedThreadPool(1); + } + + public void testUploadBlob() throws IOException { + Path testFile = createTempFile(); + Files.write(testFile, randomByteArrayOfLength(128), StandardOpenOption.APPEND); + FileSnapshot.TransferFileSnapshot transferFileSnapshot = new FileSnapshot.TransferFileSnapshot(testFile, randomNonNegativeLong()); + TransferService transferService = new BlobStoreTransferService(repository.blobStore(), executorService); + transferService.uploadBlob(transferFileSnapshot, repository.basePath()); + } + + public void testUploadBlobAsync() throws IOException, InterruptedException { + Path testFile = createTempFile(); + Files.write(testFile, randomByteArrayOfLength(128), StandardOpenOption.APPEND); + AtomicBoolean succeeded = new AtomicBoolean(false); + FileSnapshot.TransferFileSnapshot transferFileSnapshot = new FileSnapshot.TransferFileSnapshot(testFile, randomNonNegativeLong()); + CountDownLatch latch = new CountDownLatch(1); + TransferService transferService = new BlobStoreTransferService(repository.blobStore(), executorService); + transferService.uploadBlobAsync(transferFileSnapshot, repository.basePath(), new LatchedActionListener<>(new ActionListener<>() { + @Override + public void onResponse(FileSnapshot.TransferFileSnapshot fileSnapshot) { + assert succeeded.compareAndSet(false, true); + assertEquals(transferFileSnapshot.getPrimaryTerm(), fileSnapshot.getPrimaryTerm()); + assertEquals(transferFileSnapshot.getName(), fileSnapshot.getName()); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError("Failed to perform uploadBlobAsync", e); + } + }, latch)); + assertTrue(latch.await(1000, TimeUnit.MILLISECONDS)); + assertTrue(succeeded.get()); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + repository.stop(); + executorService.shutdown(); + executorService.awaitTermination(1000, TimeUnit.MILLISECONDS); + } + + /** Create a {@link Repository} with a random name **/ + private BlobStoreRepository createRepository() { + Settings settings = Settings.builder().put("location", randomAlphaOfLength(10)).build(); + RepositoryMetadata repositoryMetadata = new RepositoryMetadata(randomAlphaOfLength(10), FsRepository.TYPE, settings); + final ClusterService clusterService = BlobStoreTestUtil.mockClusterService(repositoryMetadata); + final FsRepository repository = new FsRepository( + repositoryMetadata, + createEnvironment(), + xContentRegistry(), + clusterService, + new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)) + ) { + @Override + protected void assertSnapshotOrGenericThread() { + // eliminate thread name check as we create repo manually + } + }; + clusterService.addStateApplier(event -> repository.updateState(event.state())); + // Apply state once to initialize repo properly like RepositoriesService would + repository.updateState(clusterService.state()); + repository.start(); + return repository; + } + + /** Create a {@link Environment} with random path.home and path.repo **/ + private Environment createEnvironment() { + Path home = createTempDir(); + return TestEnvironment.newEnvironment( + Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), home.toAbsolutePath()) + .put(Environment.PATH_REPO_SETTING.getKey(), home.resolve("repo").toAbsolutePath()) + .build() + ); + } +} diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java new file mode 100644 index 0000000000000..60b7029f18fa6 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java @@ -0,0 +1,150 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog.transfer; + +import org.apache.lucene.tests.util.LuceneTestCase; +import org.mockito.Mockito; +import org.opensearch.action.ActionListener; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.index.translog.Translog; +import org.opensearch.index.translog.transfer.listener.FileTransferListener; +import org.opensearch.index.translog.transfer.listener.TranslogTransferListener; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.index.translog.transfer.FileSnapshot.CheckpointFileSnapshot; +import org.opensearch.index.translog.transfer.FileSnapshot.TranslogFileSnapshot; +import org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; + +import java.io.IOException; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.mock; + +@LuceneTestCase.SuppressFileSystems("*") +public class TranslogTransferManagerTests extends OpenSearchTestCase { + + private TransferService transferService; + private BlobPath remoteBaseTransferPath; + private long primaryTerm; + private long generation; + private long minTranslogGeneration; + + @Override + public void setUp() throws Exception { + super.setUp(); + primaryTerm = randomNonNegativeLong(); + generation = randomNonNegativeLong(); + minTranslogGeneration = randomLongBetween(0, generation); + remoteBaseTransferPath = new BlobPath().add("base_path"); + transferService = mock(TransferService.class); + } + + @SuppressWarnings("unchecked") + public void testTransferSnapshot() throws IOException { + AtomicInteger fileTransferSucceeded = new AtomicInteger(); + AtomicInteger fileTransferFailed = new AtomicInteger(); + AtomicInteger translogTransferSucceeded = new AtomicInteger(); + AtomicInteger translogTransferFailed = new AtomicInteger(); + + doNothing().when(transferService) + .uploadBlob(any(TransferFileSnapshot.class), Mockito.eq(remoteBaseTransferPath.add(String.valueOf(primaryTerm)))); + doAnswer(invocationOnMock -> { + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; + listener.onResponse((TransferFileSnapshot) invocationOnMock.getArguments()[0]); + return null; + }).when(transferService).uploadBlobAsync(any(TransferFileSnapshot.class), any(BlobPath.class), any(ActionListener.class)); + + TranslogTransferManager translogTransferManager = new TranslogTransferManager( + transferService, + remoteBaseTransferPath, + new FileTransferListener() { + @Override + public void onSuccess(TransferFileSnapshot fileSnapshot) { + fileTransferSucceeded.incrementAndGet(); + } + + @Override + public void onFailure(TransferFileSnapshot fileSnapshot, Exception e) { + fileTransferFailed.incrementAndGet(); + } + }, + r -> r + ); + + assertTrue(translogTransferManager.transferSnapshot(createTransferSnapshot(), new TranslogTransferListener() { + @Override + public void onUploadComplete(TransferSnapshot transferSnapshot) { + translogTransferSucceeded.incrementAndGet(); + } + + @Override + public void onUploadFailed(TransferSnapshot transferSnapshot, Exception ex) { + translogTransferFailed.incrementAndGet(); + } + })); + assertEquals(4, fileTransferSucceeded.get()); + assertEquals(0, fileTransferFailed.get()); + assertEquals(1, translogTransferSucceeded.get()); + assertEquals(0, translogTransferFailed.get()); + } + + private TransferSnapshot createTransferSnapshot() { + return new TransferSnapshot() { + @Override + public Set getCheckpointFileSnapshots() { + try { + return Set.of( + new CheckpointFileSnapshot( + primaryTerm, + generation, + minTranslogGeneration, + createTempFile(Translog.TRANSLOG_FILE_PREFIX + generation, Translog.CHECKPOINT_SUFFIX) + ), + new CheckpointFileSnapshot( + primaryTerm, + generation, + minTranslogGeneration, + createTempFile(Translog.TRANSLOG_FILE_PREFIX + (generation - 1), Translog.CHECKPOINT_SUFFIX) + ) + ); + } catch (IOException e) { + throw new AssertionError("Failed to create temp file", e); + } + } + + @Override + public Set getTranslogFileSnapshots() { + try { + return Set.of( + new TranslogFileSnapshot( + primaryTerm, + generation, + createTempFile(Translog.TRANSLOG_FILE_PREFIX + generation, Translog.TRANSLOG_FILE_SUFFIX) + ), + new TranslogFileSnapshot( + primaryTerm, + generation - 1, + createTempFile(Translog.TRANSLOG_FILE_PREFIX + (generation - 1), Translog.TRANSLOG_FILE_SUFFIX) + ) + ); + } catch (IOException e) { + throw new AssertionError("Failed to create temp file", e); + } + } + + @Override + public TranslogTransferMetadata getTranslogTransferMetadata() { + return new TranslogTransferMetadata(primaryTerm, generation, minTranslogGeneration, randomInt(5)); + } + }; + } +} diff --git a/server/src/test/java/org/opensearch/rest/RestControllerTests.java b/server/src/test/java/org/opensearch/rest/RestControllerTests.java index d8cfd87a11b50..6787d6e641337 100644 --- a/server/src/test/java/org/opensearch/rest/RestControllerTests.java +++ b/server/src/test/java/org/opensearch/rest/RestControllerTests.java @@ -57,6 +57,7 @@ import org.opensearch.identity.Identity; import org.opensearch.authn.internal.InternalAuthenticationManager; import org.opensearch.indices.breaker.HierarchyCircuitBreakerService; +import org.opensearch.rest.action.admin.indices.RestCreateIndexAction; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.client.NoOpNodeClient; import org.opensearch.test.rest.FakeRestRequest; @@ -567,6 +568,20 @@ public void testHandleBadRequestWithHtmlSpecialCharsInUri() { assertThat(channel.getRestResponse().content().utf8ToString(), containsString("invalid uri has been requested")); } + public void testHandleBadInputWithCreateIndex() { + final FakeRestRequest fakeRestRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withPath("/foo") + .withMethod(RestRequest.Method.PUT) + .withContent(new BytesArray("ddd"), XContentType.JSON) + .build(); + final AssertingChannel channel = new AssertingChannel(fakeRestRequest, true, RestStatus.BAD_REQUEST); + restController.registerHandler(RestRequest.Method.PUT, "/foo", new RestCreateIndexAction()); + restController.dispatchRequest(fakeRestRequest, channel, client.threadPool().getThreadContext()); + assertEquals( + channel.getRestResponse().content().utf8ToString(), + "{\"error\":{\"root_cause\":[{\"type\":\"not_x_content_exception\",\"reason\":\"Compressor detection can only be called on some xcontent bytes or compressed xcontent bytes\"}],\"type\":\"not_x_content_exception\",\"reason\":\"Compressor detection can only be called on some xcontent bytes or compressed xcontent bytes\"},\"status\":400}" + ); + } + public void testDispatchUnsupportedHttpMethod() { final boolean hasContent = randomBoolean(); final RestRequest request = RestRequest.request(xContentRegistry(), new HttpRequest() { diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotUtilsTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotUtilsTests.java index 489294fd53bd4..8dae5026a18bc 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotUtilsTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotUtilsTests.java @@ -31,13 +31,22 @@ package org.opensearch.snapshots; +import org.opensearch.Version; import org.opensearch.action.support.IndicesOptions; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.common.collect.ImmutableOpenMap; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.Index; +import org.opensearch.index.IndexModule; +import org.opensearch.index.IndexSettings; import org.opensearch.test.OpenSearchTestCase; import java.util.Arrays; import java.util.List; import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_VERSION_CREATED; public class SnapshotUtilsTests extends OpenSearchTestCase { public void testIndexNameFiltering() { @@ -85,4 +94,39 @@ private void assertIndexNameFiltering(String[] indices, String[] filter, Indices List actual = SnapshotUtils.filterIndices(indicesList, filter, indicesOptions); assertThat(actual, containsInAnyOrder(expected)); } + + public void testValidateSnapshotsBackingAnyIndex() { + final String repoName = "test-repo"; + final SnapshotId snapshotId1 = new SnapshotId("testSnapshot1", "uuid1"); + final SnapshotId snapshotId2 = new SnapshotId("testSnapshot2", "uuid2"); + SnapshotUtils.validateSnapshotsBackingAnyIndex(getIndexMetadata(snapshotId1, repoName), List.of(snapshotId2), repoName); + } + + public void testValidateSnapshotsBackingAnyIndexThrowsException() { + final String repoName = "test-repo"; + final SnapshotId snapshotId1 = new SnapshotId("testSnapshot1", "uuid1"); + expectThrows( + SnapshotInUseDeletionException.class, + () -> SnapshotUtils.validateSnapshotsBackingAnyIndex(getIndexMetadata(snapshotId1, repoName), List.of(snapshotId1), repoName) + ); + } + + private static ImmutableOpenMap getIndexMetadata(SnapshotId snapshotId, String repoName) { + final String index = "test-index"; + Snapshot snapshot = new Snapshot(repoName, snapshotId); + final Metadata.Builder builder = Metadata.builder(); + builder.put(createIndexMetadata(new Index(index, "uuid"), snapshot), true); + return builder.build().getIndices(); + } + + private static IndexMetadata createIndexMetadata(final Index index, Snapshot snapshot) { + final Settings settings = Settings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT.id) + .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.REMOTE_SNAPSHOT.getSettingsKey()) + .put(IndexSettings.SEARCHABLE_SNAPSHOT_REPOSITORY.getKey(), snapshot.getRepository()) + .put(IndexSettings.SEARCHABLE_SNAPSHOT_ID_UUID.getKey(), snapshot.getSnapshotId().getUUID()) + .put(IndexSettings.SEARCHABLE_SNAPSHOT_ID_NAME.getKey(), snapshot.getSnapshotId().getName()) + .build(); + return IndexMetadata.builder(index.getName()).settings(settings).numberOfShards(1).numberOfReplicas(0).build(); + } } diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 8307443defa3a..66d74afad5bc8 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -38,11 +38,11 @@ dependencies { exclude module: 'jettison' } api "org.codehaus.jettison:jettison:${versions.jettison}" - api "org.apache.commons:commons-compress:1.21" + api "org.apache.commons:commons-compress:1.22" api "commons-codec:commons-codec:${versions.commonscodec}" api "org.apache.logging.log4j:log4j-core:${versions.log4j}" api "io.netty:netty-all:${versions.netty}" - api 'com.google.code.gson:gson:2.9.1' + api 'com.google.code.gson:gson:2.10' api "org.bouncycastle:bcpkix-jdk15on:${versions.bouncycastle}" api "com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:${versions.jackson}" api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" diff --git a/test/fixtures/minio-fixture/Dockerfile b/test/fixtures/minio-fixture/Dockerfile index b56440c0d44a9..81655aa545afd 100644 --- a/test/fixtures/minio-fixture/Dockerfile +++ b/test/fixtures/minio-fixture/Dockerfile @@ -1,4 +1,4 @@ -FROM minio/minio:RELEASE.2022-06-25T15-50-16Z +FROM minio/minio:RELEASE.2022-11-17T23-20-09Z ARG bucket ARG accessKey diff --git a/test/fixtures/minio-fixture/docker-compose.yml b/test/fixtures/minio-fixture/docker-compose.yml index 4c0245772ed4c..e4d2faab9a657 100644 --- a/test/fixtures/minio-fixture/docker-compose.yml +++ b/test/fixtures/minio-fixture/docker-compose.yml @@ -14,6 +14,14 @@ services: soft: 4096 ports: - "9000" + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"] + interval: 30s + timeout: 20s + retries: 3 + volumes: + - type: tmpfs + target: /minio/data command: ["server", "--console-address", ":9001", "/minio/data"] minio-fixture-other: build: @@ -29,6 +37,14 @@ services: soft: 4096 ports: - "9000" + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"] + interval: 30s + timeout: 20s + retries: 3 + volumes: + - type: tmpfs + target: /minio/data command: ["server", "--console-address", ":9001", "/minio/data"] minio-fixture-for-snapshot-tool: build: @@ -44,4 +60,12 @@ services: soft: 4096 ports: - "9000" + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"] + interval: 30s + timeout: 20s + retries: 3 + volumes: + - type: tmpfs + target: /minio/data command: ["server", "--console-address", ":9001", "/minio/data"] diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index f874ab44d9d3b..11b3ce1dd05d4 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -1301,8 +1301,9 @@ public void onReplicationFailure( } ); ids.add(target); - countDownLatch.await(1, TimeUnit.SECONDS); } + countDownLatch.await(30, TimeUnit.SECONDS); + assertEquals("Replication should complete successfully", 0, countDownLatch.getCount()); return ids; }